1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Aspeed 24XX/25XX I2C Controller.
4 *
5 * Copyright (C) 2012-2017 ASPEED Technology Inc.
6 * Copyright 2017 IBM Corporation
7 * Copyright 2017 Google, Inc.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/irqchip/chained_irq.h>
20 #include <linux/irqdomain.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
28 #include <linux/slab.h>
29
30 /* I2C Register */
31 #define ASPEED_I2C_FUN_CTRL_REG 0x00
32 #define ASPEED_I2C_AC_TIMING_REG1 0x04
33 #define ASPEED_I2C_AC_TIMING_REG2 0x08
34 #define ASPEED_I2C_INTR_CTRL_REG 0x0c
35 #define ASPEED_I2C_INTR_STS_REG 0x10
36 #define ASPEED_I2C_CMD_REG 0x14
37 #define ASPEED_I2C_DEV_ADDR_REG 0x18
38 #define ASPEED_I2C_BYTE_BUF_REG 0x20
39
40 /* Global Register Definition */
41 /* 0x00 : I2C Interrupt Status Register */
42 /* 0x08 : I2C Interrupt Target Assignment */
43
44 /* Device Register Definition */
45 /* 0x00 : I2CD Function Control Register */
46 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15)
47 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8)
48 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7)
49 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6)
50 #define ASPEED_I2CD_SLAVE_EN BIT(1)
51 #define ASPEED_I2CD_MASTER_EN BIT(0)
52
53 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
54 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28)
55 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24)
56 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20)
57 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16
58 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16)
59 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12
60 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12)
61 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0)
62 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0)
63 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
64 #define ASPEED_NO_TIMEOUT_CTRL 0
65
66 /* 0x0c : I2CD Interrupt Control Register &
67 * 0x10 : I2CD Interrupt Status Register
68 *
69 * These share bit definitions, so use the same values for the enable &
70 * status bits.
71 */
72 #define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
73 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
74 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
75 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
76 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6)
77 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5)
78 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4)
79 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3)
80 #define ASPEED_I2CD_INTR_RX_DONE BIT(2)
81 #define ASPEED_I2CD_INTR_TX_NAK BIT(1)
82 #define ASPEED_I2CD_INTR_TX_ACK BIT(0)
83 #define ASPEED_I2CD_INTR_MASTER_ERRORS \
84 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
85 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
86 ASPEED_I2CD_INTR_ABNORMAL | \
87 ASPEED_I2CD_INTR_ARBIT_LOSS)
88 #define ASPEED_I2CD_INTR_ALL \
89 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
90 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \
91 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
92 ASPEED_I2CD_INTR_ABNORMAL | \
93 ASPEED_I2CD_INTR_NORMAL_STOP | \
94 ASPEED_I2CD_INTR_ARBIT_LOSS | \
95 ASPEED_I2CD_INTR_RX_DONE | \
96 ASPEED_I2CD_INTR_TX_NAK | \
97 ASPEED_I2CD_INTR_TX_ACK)
98
99 /* 0x14 : I2CD Command/Status Register */
100 #define ASPEED_I2CD_SCL_LINE_STS BIT(18)
101 #define ASPEED_I2CD_SDA_LINE_STS BIT(17)
102 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16)
103 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11)
104
105 /* Command Bit */
106 #define ASPEED_I2CD_M_STOP_CMD BIT(5)
107 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4)
108 #define ASPEED_I2CD_M_RX_CMD BIT(3)
109 #define ASPEED_I2CD_S_TX_CMD BIT(2)
110 #define ASPEED_I2CD_M_TX_CMD BIT(1)
111 #define ASPEED_I2CD_M_START_CMD BIT(0)
112 #define ASPEED_I2CD_MASTER_CMDS_MASK \
113 (ASPEED_I2CD_M_STOP_CMD | \
114 ASPEED_I2CD_M_S_RX_CMD_LAST | \
115 ASPEED_I2CD_M_RX_CMD | \
116 ASPEED_I2CD_M_TX_CMD | \
117 ASPEED_I2CD_M_START_CMD)
118
119 /* 0x18 : I2CD Slave Device Address Register */
120 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
121
122 enum aspeed_i2c_master_state {
123 ASPEED_I2C_MASTER_INACTIVE,
124 ASPEED_I2C_MASTER_PENDING,
125 ASPEED_I2C_MASTER_START,
126 ASPEED_I2C_MASTER_TX_FIRST,
127 ASPEED_I2C_MASTER_TX,
128 ASPEED_I2C_MASTER_RX_FIRST,
129 ASPEED_I2C_MASTER_RX,
130 ASPEED_I2C_MASTER_STOP,
131 };
132
133 enum aspeed_i2c_slave_state {
134 ASPEED_I2C_SLAVE_INACTIVE,
135 ASPEED_I2C_SLAVE_START,
136 ASPEED_I2C_SLAVE_READ_REQUESTED,
137 ASPEED_I2C_SLAVE_READ_PROCESSED,
138 ASPEED_I2C_SLAVE_WRITE_REQUESTED,
139 ASPEED_I2C_SLAVE_WRITE_RECEIVED,
140 ASPEED_I2C_SLAVE_STOP,
141 };
142
143 struct aspeed_i2c_bus {
144 struct i2c_adapter adap;
145 struct device *dev;
146 void __iomem *base;
147 struct reset_control *rst;
148 /* Synchronizes I/O mem access to base. */
149 spinlock_t lock;
150 struct completion cmd_complete;
151 u32 (*get_clk_reg_val)(struct device *dev,
152 u32 divisor);
153 unsigned long parent_clk_frequency;
154 u32 bus_frequency;
155 /* Transaction state. */
156 enum aspeed_i2c_master_state master_state;
157 struct i2c_msg *msgs;
158 size_t buf_index;
159 size_t msgs_index;
160 size_t msgs_count;
161 bool send_stop;
162 int cmd_err;
163 /* Protected only by i2c_lock_bus */
164 int master_xfer_result;
165 /* Multi-master */
166 bool multi_master;
167 #if IS_ENABLED(CONFIG_I2C_SLAVE)
168 struct i2c_client *slave;
169 enum aspeed_i2c_slave_state slave_state;
170 #endif /* CONFIG_I2C_SLAVE */
171 };
172
173 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
174
aspeed_i2c_recover_bus(struct aspeed_i2c_bus * bus)175 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
176 {
177 unsigned long time_left, flags;
178 int ret = 0;
179 u32 command;
180
181 spin_lock_irqsave(&bus->lock, flags);
182 command = readl(bus->base + ASPEED_I2C_CMD_REG);
183
184 if (command & ASPEED_I2CD_SDA_LINE_STS) {
185 /* Bus is idle: no recovery needed. */
186 if (command & ASPEED_I2CD_SCL_LINE_STS)
187 goto out;
188 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n",
189 command);
190
191 reinit_completion(&bus->cmd_complete);
192 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
193 spin_unlock_irqrestore(&bus->lock, flags);
194
195 time_left = wait_for_completion_timeout(
196 &bus->cmd_complete, bus->adap.timeout);
197
198 spin_lock_irqsave(&bus->lock, flags);
199 if (time_left == 0)
200 goto reset_out;
201 else if (bus->cmd_err)
202 goto reset_out;
203 /* Recovery failed. */
204 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
205 ASPEED_I2CD_SCL_LINE_STS))
206 goto reset_out;
207 /* Bus error. */
208 } else {
209 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n",
210 command);
211
212 reinit_completion(&bus->cmd_complete);
213 /* Writes 1 to 8 SCL clock cycles until SDA is released. */
214 writel(ASPEED_I2CD_BUS_RECOVER_CMD,
215 bus->base + ASPEED_I2C_CMD_REG);
216 spin_unlock_irqrestore(&bus->lock, flags);
217
218 time_left = wait_for_completion_timeout(
219 &bus->cmd_complete, bus->adap.timeout);
220
221 spin_lock_irqsave(&bus->lock, flags);
222 if (time_left == 0)
223 goto reset_out;
224 else if (bus->cmd_err)
225 goto reset_out;
226 /* Recovery failed. */
227 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
228 ASPEED_I2CD_SDA_LINE_STS))
229 goto reset_out;
230 }
231
232 out:
233 spin_unlock_irqrestore(&bus->lock, flags);
234
235 return ret;
236
237 reset_out:
238 spin_unlock_irqrestore(&bus->lock, flags);
239
240 return aspeed_i2c_reset(bus);
241 }
242
243 #if IS_ENABLED(CONFIG_I2C_SLAVE)
aspeed_i2c_slave_irq(struct aspeed_i2c_bus * bus,u32 irq_status)244 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
245 {
246 u32 command, irq_handled = 0;
247 struct i2c_client *slave = bus->slave;
248 u8 value;
249
250 if (!slave)
251 return 0;
252
253 command = readl(bus->base + ASPEED_I2C_CMD_REG);
254
255 /* Slave was requested, restart state machine. */
256 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
257 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
258 bus->slave_state = ASPEED_I2C_SLAVE_START;
259 }
260
261 /* Slave is not currently active, irq was for someone else. */
262 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
263 return irq_handled;
264
265 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
266 irq_status, command);
267
268 /* Slave was sent something. */
269 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
270 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
271 /* Handle address frame. */
272 if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
273 if (value & 0x1)
274 bus->slave_state =
275 ASPEED_I2C_SLAVE_READ_REQUESTED;
276 else
277 bus->slave_state =
278 ASPEED_I2C_SLAVE_WRITE_REQUESTED;
279 }
280 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
281 }
282
283 /* Slave was asked to stop. */
284 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
285 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
286 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
287 }
288 if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
289 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
290 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
291 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
292 }
293
294 switch (bus->slave_state) {
295 case ASPEED_I2C_SLAVE_READ_REQUESTED:
296 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
297 dev_err(bus->dev, "Unexpected ACK on read request.\n");
298 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED;
299 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
300 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
301 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
302 break;
303 case ASPEED_I2C_SLAVE_READ_PROCESSED:
304 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
305 dev_err(bus->dev,
306 "Expected ACK after processed read.\n");
307 break;
308 }
309 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
310 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
311 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
312 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
313 break;
314 case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
315 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
316 i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
317 break;
318 case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
319 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
320 break;
321 case ASPEED_I2C_SLAVE_STOP:
322 i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
323 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
324 break;
325 case ASPEED_I2C_SLAVE_START:
326 /* Slave was just started. Waiting for the next event. */;
327 break;
328 default:
329 dev_err(bus->dev, "unknown slave_state: %d\n",
330 bus->slave_state);
331 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
332 break;
333 }
334
335 return irq_handled;
336 }
337 #endif /* CONFIG_I2C_SLAVE */
338
339 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_start(struct aspeed_i2c_bus * bus)340 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
341 {
342 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
343 struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
344 u8 slave_addr = i2c_8bit_addr_from_msg(msg);
345
346 #if IS_ENABLED(CONFIG_I2C_SLAVE)
347 /*
348 * If it's requested in the middle of a slave session, set the master
349 * state to 'pending' then H/W will continue handling this master
350 * command when the bus comes back to the idle state.
351 */
352 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
353 bus->master_state = ASPEED_I2C_MASTER_PENDING;
354 return;
355 }
356 #endif /* CONFIG_I2C_SLAVE */
357
358 bus->master_state = ASPEED_I2C_MASTER_START;
359 bus->buf_index = 0;
360
361 if (msg->flags & I2C_M_RD) {
362 command |= ASPEED_I2CD_M_RX_CMD;
363 /* Need to let the hardware know to NACK after RX. */
364 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
365 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
366 }
367
368 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
369 writel(command, bus->base + ASPEED_I2C_CMD_REG);
370 }
371
372 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_stop(struct aspeed_i2c_bus * bus)373 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
374 {
375 bus->master_state = ASPEED_I2C_MASTER_STOP;
376 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
377 }
378
379 /* precondition: bus.lock has been acquired. */
aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus * bus)380 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
381 {
382 if (bus->msgs_index + 1 < bus->msgs_count) {
383 bus->msgs_index++;
384 aspeed_i2c_do_start(bus);
385 } else {
386 aspeed_i2c_do_stop(bus);
387 }
388 }
389
aspeed_i2c_is_irq_error(u32 irq_status)390 static int aspeed_i2c_is_irq_error(u32 irq_status)
391 {
392 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
393 return -EAGAIN;
394 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |
395 ASPEED_I2CD_INTR_SCL_TIMEOUT))
396 return -EBUSY;
397 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL))
398 return -EPROTO;
399
400 return 0;
401 }
402
aspeed_i2c_master_irq(struct aspeed_i2c_bus * bus,u32 irq_status)403 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
404 {
405 u32 irq_handled = 0, command = 0;
406 struct i2c_msg *msg;
407 u8 recv_byte;
408 int ret;
409
410 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) {
411 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
412 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE;
413 goto out_complete;
414 }
415
416 /*
417 * We encountered an interrupt that reports an error: the hardware
418 * should clear the command queue effectively taking us back to the
419 * INACTIVE state.
420 */
421 ret = aspeed_i2c_is_irq_error(irq_status);
422 if (ret) {
423 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
424 irq_status);
425 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
426 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
427 bus->cmd_err = ret;
428 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
429 goto out_complete;
430 }
431 }
432
433 /* Master is not currently active, irq was for someone else. */
434 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
435 bus->master_state == ASPEED_I2C_MASTER_PENDING)
436 goto out_no_complete;
437
438 /* We are in an invalid state; reset bus to a known state. */
439 if (!bus->msgs) {
440 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
441 irq_status);
442 bus->cmd_err = -EIO;
443 if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
444 bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
445 aspeed_i2c_do_stop(bus);
446 goto out_no_complete;
447 }
448 msg = &bus->msgs[bus->msgs_index];
449
450 /*
451 * START is a special case because we still have to handle a subsequent
452 * TX or RX immediately after we handle it, so we handle it here and
453 * then update the state and handle the new state below.
454 */
455 if (bus->master_state == ASPEED_I2C_MASTER_START) {
456 #if IS_ENABLED(CONFIG_I2C_SLAVE)
457 /*
458 * If a peer master starts a xfer immediately after it queues a
459 * master command, clear the queued master command and change
460 * its state to 'pending'. To simplify handling of pending
461 * cases, it uses S/W solution instead of H/W command queue
462 * handling.
463 */
464 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
465 writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
466 ~ASPEED_I2CD_MASTER_CMDS_MASK,
467 bus->base + ASPEED_I2C_CMD_REG);
468 bus->master_state = ASPEED_I2C_MASTER_PENDING;
469 dev_dbg(bus->dev,
470 "master goes pending due to a slave start\n");
471 goto out_no_complete;
472 }
473 #endif /* CONFIG_I2C_SLAVE */
474 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
475 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
476 bus->cmd_err = -ENXIO;
477 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
478 goto out_complete;
479 }
480 pr_devel("no slave present at %02x\n", msg->addr);
481 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
482 bus->cmd_err = -ENXIO;
483 aspeed_i2c_do_stop(bus);
484 goto out_no_complete;
485 }
486 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
487 if (msg->len == 0) { /* SMBUS_QUICK */
488 aspeed_i2c_do_stop(bus);
489 goto out_no_complete;
490 }
491 if (msg->flags & I2C_M_RD)
492 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST;
493 else
494 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
495 }
496
497 switch (bus->master_state) {
498 case ASPEED_I2C_MASTER_TX:
499 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
500 dev_dbg(bus->dev, "slave NACKed TX\n");
501 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
502 goto error_and_stop;
503 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
504 dev_err(bus->dev, "slave failed to ACK TX\n");
505 goto error_and_stop;
506 }
507 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
508 fallthrough;
509 case ASPEED_I2C_MASTER_TX_FIRST:
510 if (bus->buf_index < msg->len) {
511 bus->master_state = ASPEED_I2C_MASTER_TX;
512 writel(msg->buf[bus->buf_index++],
513 bus->base + ASPEED_I2C_BYTE_BUF_REG);
514 writel(ASPEED_I2CD_M_TX_CMD,
515 bus->base + ASPEED_I2C_CMD_REG);
516 } else {
517 aspeed_i2c_next_msg_or_stop(bus);
518 }
519 goto out_no_complete;
520 case ASPEED_I2C_MASTER_RX_FIRST:
521 /* RX may not have completed yet (only address cycle) */
522 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
523 goto out_no_complete;
524 fallthrough;
525 case ASPEED_I2C_MASTER_RX:
526 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
527 dev_err(bus->dev, "master failed to RX\n");
528 goto error_and_stop;
529 }
530 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
531
532 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
533 msg->buf[bus->buf_index++] = recv_byte;
534
535 if (msg->flags & I2C_M_RECV_LEN) {
536 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
537 bus->cmd_err = -EPROTO;
538 aspeed_i2c_do_stop(bus);
539 goto out_no_complete;
540 }
541 msg->len = recv_byte +
542 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
543 msg->flags &= ~I2C_M_RECV_LEN;
544 }
545
546 if (bus->buf_index < msg->len) {
547 bus->master_state = ASPEED_I2C_MASTER_RX;
548 command = ASPEED_I2CD_M_RX_CMD;
549 if (bus->buf_index + 1 == msg->len)
550 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
551 writel(command, bus->base + ASPEED_I2C_CMD_REG);
552 } else {
553 aspeed_i2c_next_msg_or_stop(bus);
554 }
555 goto out_no_complete;
556 case ASPEED_I2C_MASTER_STOP:
557 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
558 dev_err(bus->dev,
559 "master failed to STOP. irq_status:0x%x\n",
560 irq_status);
561 bus->cmd_err = -EIO;
562 /* Do not STOP as we have already tried. */
563 } else {
564 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
565 }
566
567 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
568 goto out_complete;
569 case ASPEED_I2C_MASTER_INACTIVE:
570 dev_err(bus->dev,
571 "master received interrupt 0x%08x, but is inactive\n",
572 irq_status);
573 bus->cmd_err = -EIO;
574 /* Do not STOP as we should be inactive. */
575 goto out_complete;
576 default:
577 WARN(1, "unknown master state\n");
578 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
579 bus->cmd_err = -EINVAL;
580 goto out_complete;
581 }
582 error_and_stop:
583 bus->cmd_err = -EIO;
584 aspeed_i2c_do_stop(bus);
585 goto out_no_complete;
586 out_complete:
587 bus->msgs = NULL;
588 if (bus->cmd_err)
589 bus->master_xfer_result = bus->cmd_err;
590 else
591 bus->master_xfer_result = bus->msgs_index + 1;
592 complete(&bus->cmd_complete);
593 out_no_complete:
594 return irq_handled;
595 }
596
aspeed_i2c_bus_irq(int irq,void * dev_id)597 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
598 {
599 struct aspeed_i2c_bus *bus = dev_id;
600 u32 irq_received, irq_remaining, irq_handled;
601
602 spin_lock(&bus->lock);
603 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
604 /* Ack all interrupts except for Rx done */
605 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
606 bus->base + ASPEED_I2C_INTR_STS_REG);
607 readl(bus->base + ASPEED_I2C_INTR_STS_REG);
608 irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
609 irq_remaining = irq_received;
610
611 #if IS_ENABLED(CONFIG_I2C_SLAVE)
612 /*
613 * In most cases, interrupt bits will be set one by one, although
614 * multiple interrupt bits could be set at the same time. It's also
615 * possible that master interrupt bits could be set along with slave
616 * interrupt bits. Each case needs to be handled using corresponding
617 * handlers depending on the current state.
618 */
619 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
620 bus->master_state != ASPEED_I2C_MASTER_PENDING) {
621 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
622 irq_remaining &= ~irq_handled;
623 if (irq_remaining)
624 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
625 } else {
626 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
627 irq_remaining &= ~irq_handled;
628 if (irq_remaining)
629 irq_handled |= aspeed_i2c_master_irq(bus,
630 irq_remaining);
631 }
632
633 /*
634 * Start a pending master command at here if a slave operation is
635 * completed.
636 */
637 if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
638 bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
639 aspeed_i2c_do_start(bus);
640 #else
641 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
642 #endif /* CONFIG_I2C_SLAVE */
643
644 irq_remaining &= ~irq_handled;
645 if (irq_remaining)
646 dev_err(bus->dev,
647 "irq handled != irq. expected 0x%08x, but was 0x%08x\n",
648 irq_received, irq_handled);
649
650 /* Ack Rx done */
651 if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
652 writel(ASPEED_I2CD_INTR_RX_DONE,
653 bus->base + ASPEED_I2C_INTR_STS_REG);
654 readl(bus->base + ASPEED_I2C_INTR_STS_REG);
655 }
656 spin_unlock(&bus->lock);
657 return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
658 }
659
aspeed_i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)660 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
661 struct i2c_msg *msgs, int num)
662 {
663 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
664 unsigned long time_left, flags;
665
666 spin_lock_irqsave(&bus->lock, flags);
667 bus->cmd_err = 0;
668
669 /* If bus is busy in a single master environment, attempt recovery. */
670 if (!bus->multi_master &&
671 (readl(bus->base + ASPEED_I2C_CMD_REG) &
672 ASPEED_I2CD_BUS_BUSY_STS)) {
673 int ret;
674
675 spin_unlock_irqrestore(&bus->lock, flags);
676 ret = aspeed_i2c_recover_bus(bus);
677 if (ret)
678 return ret;
679 spin_lock_irqsave(&bus->lock, flags);
680 }
681
682 bus->cmd_err = 0;
683 bus->msgs = msgs;
684 bus->msgs_index = 0;
685 bus->msgs_count = num;
686
687 reinit_completion(&bus->cmd_complete);
688 aspeed_i2c_do_start(bus);
689 spin_unlock_irqrestore(&bus->lock, flags);
690
691 time_left = wait_for_completion_timeout(&bus->cmd_complete,
692 bus->adap.timeout);
693
694 if (time_left == 0) {
695 /*
696 * If timed out and bus is still busy in a multi master
697 * environment, attempt recovery at here.
698 */
699 if (bus->multi_master &&
700 (readl(bus->base + ASPEED_I2C_CMD_REG) &
701 ASPEED_I2CD_BUS_BUSY_STS))
702 aspeed_i2c_recover_bus(bus);
703
704 /*
705 * If timed out and the state is still pending, drop the pending
706 * master command.
707 */
708 spin_lock_irqsave(&bus->lock, flags);
709 if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
710 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
711 spin_unlock_irqrestore(&bus->lock, flags);
712
713 return -ETIMEDOUT;
714 }
715
716 return bus->master_xfer_result;
717 }
718
aspeed_i2c_functionality(struct i2c_adapter * adap)719 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap)
720 {
721 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
722 }
723
724 #if IS_ENABLED(CONFIG_I2C_SLAVE)
725 /* precondition: bus.lock has been acquired. */
__aspeed_i2c_reg_slave(struct aspeed_i2c_bus * bus,u16 slave_addr)726 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
727 {
728 u32 addr_reg_val, func_ctrl_reg_val;
729
730 /*
731 * Set slave addr. Reserved bits can all safely be written with zeros
732 * on all of ast2[456]00, so zero everything else to ensure we only
733 * enable a single slave address (ast2500 has two, ast2600 has three,
734 * the enable bits for which are also in this register) so that we don't
735 * end up with additional phantom devices responding on the bus.
736 */
737 addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
738 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
739
740 /* Turn on slave mode. */
741 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
742 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
743 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
744 }
745
aspeed_i2c_reg_slave(struct i2c_client * client)746 static int aspeed_i2c_reg_slave(struct i2c_client *client)
747 {
748 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
749 unsigned long flags;
750
751 spin_lock_irqsave(&bus->lock, flags);
752 if (bus->slave) {
753 spin_unlock_irqrestore(&bus->lock, flags);
754 return -EINVAL;
755 }
756
757 __aspeed_i2c_reg_slave(bus, client->addr);
758
759 bus->slave = client;
760 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
761 spin_unlock_irqrestore(&bus->lock, flags);
762
763 return 0;
764 }
765
aspeed_i2c_unreg_slave(struct i2c_client * client)766 static int aspeed_i2c_unreg_slave(struct i2c_client *client)
767 {
768 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
769 u32 func_ctrl_reg_val;
770 unsigned long flags;
771
772 spin_lock_irqsave(&bus->lock, flags);
773 if (!bus->slave) {
774 spin_unlock_irqrestore(&bus->lock, flags);
775 return -EINVAL;
776 }
777
778 /* Turn off slave mode. */
779 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
780 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
781 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
782
783 bus->slave = NULL;
784 spin_unlock_irqrestore(&bus->lock, flags);
785
786 return 0;
787 }
788 #endif /* CONFIG_I2C_SLAVE */
789
790 static const struct i2c_algorithm aspeed_i2c_algo = {
791 .master_xfer = aspeed_i2c_master_xfer,
792 .functionality = aspeed_i2c_functionality,
793 #if IS_ENABLED(CONFIG_I2C_SLAVE)
794 .reg_slave = aspeed_i2c_reg_slave,
795 .unreg_slave = aspeed_i2c_unreg_slave,
796 #endif /* CONFIG_I2C_SLAVE */
797 };
798
aspeed_i2c_get_clk_reg_val(struct device * dev,u32 clk_high_low_mask,u32 divisor)799 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev,
800 u32 clk_high_low_mask,
801 u32 divisor)
802 {
803 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp;
804
805 /*
806 * SCL_high and SCL_low represent a value 1 greater than what is stored
807 * since a zero divider is meaningless. Thus, the max value each can
808 * store is every bit set + 1. Since SCL_high and SCL_low are added
809 * together (see below), the max value of both is the max value of one
810 * them times two.
811 */
812 clk_high_low_max = (clk_high_low_mask + 1) * 2;
813
814 /*
815 * The actual clock frequency of SCL is:
816 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
817 * = APB_freq / divisor
818 * where base_freq is a programmable clock divider; its value is
819 * base_freq = 1 << base_clk_divisor
820 * SCL_high is the number of base_freq clock cycles that SCL stays high
821 * and SCL_low is the number of base_freq clock cycles that SCL stays
822 * low for a period of SCL.
823 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
824 * thus, they start counting at zero. So
825 * SCL_high = clk_high + 1
826 * SCL_low = clk_low + 1
827 * Thus,
828 * SCL_freq = APB_freq /
829 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
830 * The documentation recommends clk_high >= clk_high_max / 2 and
831 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
832 * gives us the following solution:
833 */
834 base_clk_divisor = divisor > clk_high_low_max ?
835 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
836
837 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) {
838 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK;
839 clk_low = clk_high_low_mask;
840 clk_high = clk_high_low_mask;
841 dev_err(dev,
842 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
843 divisor, (1 << base_clk_divisor) * clk_high_low_max);
844 } else {
845 tmp = (divisor + (1 << base_clk_divisor) - 1)
846 >> base_clk_divisor;
847 clk_low = tmp / 2;
848 clk_high = tmp - clk_low;
849
850 if (clk_high)
851 clk_high--;
852
853 if (clk_low)
854 clk_low--;
855 }
856
857
858 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT)
859 & ASPEED_I2CD_TIME_SCL_HIGH_MASK)
860 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT)
861 & ASPEED_I2CD_TIME_SCL_LOW_MASK)
862 | (base_clk_divisor
863 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK);
864 }
865
aspeed_i2c_24xx_get_clk_reg_val(struct device * dev,u32 divisor)866 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
867 {
868 /*
869 * clk_high and clk_low are each 3 bits wide, so each can hold a max
870 * value of 8 giving a clk_high_low_max of 16.
871 */
872 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
873 }
874
aspeed_i2c_25xx_get_clk_reg_val(struct device * dev,u32 divisor)875 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
876 {
877 /*
878 * clk_high and clk_low are each 4 bits wide, so each can hold a max
879 * value of 16 giving a clk_high_low_max of 32.
880 */
881 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
882 }
883
884 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init_clk(struct aspeed_i2c_bus * bus)885 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
886 {
887 u32 divisor, clk_reg_val;
888
889 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
890 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1);
891 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK |
892 ASPEED_I2CD_TIME_THDSTA_MASK |
893 ASPEED_I2CD_TIME_TACST_MASK);
894 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
895 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
896 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
897
898 return 0;
899 }
900
901 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init(struct aspeed_i2c_bus * bus,struct platform_device * pdev)902 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
903 struct platform_device *pdev)
904 {
905 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN;
906 int ret;
907
908 /* Disable everything. */
909 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
910
911 ret = aspeed_i2c_init_clk(bus);
912 if (ret < 0)
913 return ret;
914
915 if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
916 bus->multi_master = true;
917 else
918 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS;
919
920 /* Enable Master Mode */
921 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg,
922 bus->base + ASPEED_I2C_FUN_CTRL_REG);
923
924 #if IS_ENABLED(CONFIG_I2C_SLAVE)
925 /* If slave has already been registered, re-enable it. */
926 if (bus->slave)
927 __aspeed_i2c_reg_slave(bus, bus->slave->addr);
928 #endif /* CONFIG_I2C_SLAVE */
929
930 /* Set interrupt generation of I2C controller */
931 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
932
933 return 0;
934 }
935
aspeed_i2c_reset(struct aspeed_i2c_bus * bus)936 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
937 {
938 struct platform_device *pdev = to_platform_device(bus->dev);
939 unsigned long flags;
940 int ret;
941
942 spin_lock_irqsave(&bus->lock, flags);
943
944 /* Disable and ack all interrupts. */
945 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
946 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
947
948 ret = aspeed_i2c_init(bus, pdev);
949
950 spin_unlock_irqrestore(&bus->lock, flags);
951
952 return ret;
953 }
954
955 static const struct of_device_id aspeed_i2c_bus_of_table[] = {
956 {
957 .compatible = "aspeed,ast2400-i2c-bus",
958 .data = aspeed_i2c_24xx_get_clk_reg_val,
959 },
960 {
961 .compatible = "aspeed,ast2500-i2c-bus",
962 .data = aspeed_i2c_25xx_get_clk_reg_val,
963 },
964 {
965 .compatible = "aspeed,ast2600-i2c-bus",
966 .data = aspeed_i2c_25xx_get_clk_reg_val,
967 },
968 { },
969 };
970 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
971
aspeed_i2c_probe_bus(struct platform_device * pdev)972 static int aspeed_i2c_probe_bus(struct platform_device *pdev)
973 {
974 const struct of_device_id *match;
975 struct aspeed_i2c_bus *bus;
976 struct clk *parent_clk;
977 struct resource *res;
978 int irq, ret;
979
980 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
981 if (!bus)
982 return -ENOMEM;
983
984 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
985 bus->base = devm_ioremap_resource(&pdev->dev, res);
986 if (IS_ERR(bus->base))
987 return PTR_ERR(bus->base);
988
989 parent_clk = devm_clk_get(&pdev->dev, NULL);
990 if (IS_ERR(parent_clk))
991 return PTR_ERR(parent_clk);
992 bus->parent_clk_frequency = clk_get_rate(parent_clk);
993 /* We just need the clock rate, we don't actually use the clk object. */
994 devm_clk_put(&pdev->dev, parent_clk);
995
996 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
997 if (IS_ERR(bus->rst)) {
998 dev_err(&pdev->dev,
999 "missing or invalid reset controller device tree entry\n");
1000 return PTR_ERR(bus->rst);
1001 }
1002 reset_control_deassert(bus->rst);
1003
1004 ret = of_property_read_u32(pdev->dev.of_node,
1005 "bus-frequency", &bus->bus_frequency);
1006 if (ret < 0) {
1007 dev_err(&pdev->dev,
1008 "Could not read bus-frequency property\n");
1009 bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
1010 }
1011
1012 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
1013 if (!match)
1014 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1015 else
1016 bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
1017 match->data;
1018
1019 /* Initialize the I2C adapter */
1020 spin_lock_init(&bus->lock);
1021 init_completion(&bus->cmd_complete);
1022 bus->adap.owner = THIS_MODULE;
1023 bus->adap.retries = 0;
1024 bus->adap.algo = &aspeed_i2c_algo;
1025 bus->adap.dev.parent = &pdev->dev;
1026 bus->adap.dev.of_node = pdev->dev.of_node;
1027 strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
1028 i2c_set_adapdata(&bus->adap, bus);
1029
1030 bus->dev = &pdev->dev;
1031
1032 /* Clean up any left over interrupt state. */
1033 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1034 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
1035 /*
1036 * bus.lock does not need to be held because the interrupt handler has
1037 * not been enabled yet.
1038 */
1039 ret = aspeed_i2c_init(bus, pdev);
1040 if (ret < 0)
1041 return ret;
1042
1043 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1044 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
1045 0, dev_name(&pdev->dev), bus);
1046 if (ret < 0)
1047 return ret;
1048
1049 ret = i2c_add_adapter(&bus->adap);
1050 if (ret < 0)
1051 return ret;
1052
1053 platform_set_drvdata(pdev, bus);
1054
1055 dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
1056 bus->adap.nr, irq);
1057
1058 return 0;
1059 }
1060
aspeed_i2c_remove_bus(struct platform_device * pdev)1061 static int aspeed_i2c_remove_bus(struct platform_device *pdev)
1062 {
1063 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&bus->lock, flags);
1067
1068 /* Disable everything. */
1069 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
1070 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1071
1072 spin_unlock_irqrestore(&bus->lock, flags);
1073
1074 reset_control_assert(bus->rst);
1075
1076 i2c_del_adapter(&bus->adap);
1077
1078 return 0;
1079 }
1080
1081 static struct platform_driver aspeed_i2c_bus_driver = {
1082 .probe = aspeed_i2c_probe_bus,
1083 .remove = aspeed_i2c_remove_bus,
1084 .driver = {
1085 .name = "aspeed-i2c-bus",
1086 .of_match_table = aspeed_i2c_bus_of_table,
1087 },
1088 };
1089 module_platform_driver(aspeed_i2c_bus_driver);
1090
1091 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1092 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1093 MODULE_LICENSE("GPL v2");
1094