1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
4 *
5 * Copyright 2016 Broadcom
6 */
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 #include <linux/sysfs.h>
24 #include <linux/types.h>
25 #include "spi-bcm-qspi.h"
26
27 #define DRIVER_NAME "bcm_qspi"
28
29
30 /* BSPI register offsets */
31 #define BSPI_REVISION_ID 0x000
32 #define BSPI_SCRATCH 0x004
33 #define BSPI_MAST_N_BOOT_CTRL 0x008
34 #define BSPI_BUSY_STATUS 0x00c
35 #define BSPI_INTR_STATUS 0x010
36 #define BSPI_B0_STATUS 0x014
37 #define BSPI_B0_CTRL 0x018
38 #define BSPI_B1_STATUS 0x01c
39 #define BSPI_B1_CTRL 0x020
40 #define BSPI_STRAP_OVERRIDE_CTRL 0x024
41 #define BSPI_FLEX_MODE_ENABLE 0x028
42 #define BSPI_BITS_PER_CYCLE 0x02c
43 #define BSPI_BITS_PER_PHASE 0x030
44 #define BSPI_CMD_AND_MODE_BYTE 0x034
45 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
46 #define BSPI_BSPI_XOR_VALUE 0x03c
47 #define BSPI_BSPI_XOR_ENABLE 0x040
48 #define BSPI_BSPI_PIO_MODE_ENABLE 0x044
49 #define BSPI_BSPI_PIO_IODIR 0x048
50 #define BSPI_BSPI_PIO_DATA 0x04c
51
52 /* RAF register offsets */
53 #define BSPI_RAF_START_ADDR 0x100
54 #define BSPI_RAF_NUM_WORDS 0x104
55 #define BSPI_RAF_CTRL 0x108
56 #define BSPI_RAF_FULLNESS 0x10c
57 #define BSPI_RAF_WATERMARK 0x110
58 #define BSPI_RAF_STATUS 0x114
59 #define BSPI_RAF_READ_DATA 0x118
60 #define BSPI_RAF_WORD_CNT 0x11c
61 #define BSPI_RAF_CURR_ADDR 0x120
62
63 /* Override mode masks */
64 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
65 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
66 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
67 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
68 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
69
70 #define BSPI_ADDRLEN_3BYTES 3
71 #define BSPI_ADDRLEN_4BYTES 4
72
73 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
74
75 #define BSPI_RAF_CTRL_START_MASK BIT(0)
76 #define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
77
78 #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
79 #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
80
81 #define BSPI_READ_LENGTH 256
82
83 /* MSPI register offsets */
84 #define MSPI_SPCR0_LSB 0x000
85 #define MSPI_SPCR0_MSB 0x004
86 #define MSPI_SPCR0_MSB_CPHA BIT(0)
87 #define MSPI_SPCR0_MSB_CPOL BIT(1)
88 #define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
89 #define MSPI_SPCR1_LSB 0x008
90 #define MSPI_SPCR1_MSB 0x00c
91 #define MSPI_NEWQP 0x010
92 #define MSPI_ENDQP 0x014
93 #define MSPI_SPCR2 0x018
94 #define MSPI_MSPI_STATUS 0x020
95 #define MSPI_CPTQP 0x024
96 #define MSPI_SPCR3 0x028
97 #define MSPI_REV 0x02c
98 #define MSPI_TXRAM 0x040
99 #define MSPI_RXRAM 0x0c0
100 #define MSPI_CDRAM 0x140
101 #define MSPI_WRITE_LOCK 0x180
102
103 #define MSPI_MASTER_BIT BIT(7)
104
105 #define MSPI_NUM_CDRAM 16
106 #define MSPI_CDRAM_OUTP BIT(8)
107 #define MSPI_CDRAM_CONT_BIT BIT(7)
108 #define MSPI_CDRAM_BITSE_BIT BIT(6)
109 #define MSPI_CDRAM_DT_BIT BIT(5)
110 #define MSPI_CDRAM_PCS 0xf
111
112 #define MSPI_SPCR2_SPE BIT(6)
113 #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
114
115 #define MSPI_SPCR3_FASTBR BIT(0)
116 #define MSPI_SPCR3_FASTDT BIT(1)
117 #define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
118 #define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
119 ~(BIT(10) | BIT(11)))
120 #define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
121 BIT(11))
122 #define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
123 #define MSPI_SPCR3_DAM_8BYTE 0
124 #define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
125 #define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
126 #define MSPI_SPCR3_HALFDUPLEX BIT(6)
127 #define MSPI_SPCR3_HDOUTTYPE BIT(7)
128 #define MSPI_SPCR3_DATA_REG_SZ BIT(8)
129 #define MSPI_SPCR3_CPHARX BIT(9)
130
131 #define MSPI_MSPI_STATUS_SPIF BIT(0)
132
133 #define INTR_BASE_BIT_SHIFT 0x02
134 #define INTR_COUNT 0x07
135
136 #define NUM_CHIPSELECT 4
137 #define QSPI_SPBR_MAX 255U
138 #define MSPI_BASE_FREQ 27000000UL
139
140 #define OPCODE_DIOR 0xBB
141 #define OPCODE_QIOR 0xEB
142 #define OPCODE_DIOR_4B 0xBC
143 #define OPCODE_QIOR_4B 0xEC
144
145 #define MAX_CMD_SIZE 6
146
147 #define ADDR_4MB_MASK GENMASK(22, 0)
148
149 /* stop at end of transfer, no other reason */
150 #define TRANS_STATUS_BREAK_NONE 0
151 /* stop at end of spi_message */
152 #define TRANS_STATUS_BREAK_EOM 1
153 /* stop at end of spi_transfer if delay */
154 #define TRANS_STATUS_BREAK_DELAY 2
155 /* stop at end of spi_transfer if cs_change */
156 #define TRANS_STATUS_BREAK_CS_CHANGE 4
157 /* stop if we run out of bytes */
158 #define TRANS_STATUS_BREAK_NO_BYTES 8
159
160 /* events that make us stop filling TX slots */
161 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
162 TRANS_STATUS_BREAK_DELAY | \
163 TRANS_STATUS_BREAK_CS_CHANGE)
164
165 /* events that make us deassert CS */
166 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
167 TRANS_STATUS_BREAK_CS_CHANGE)
168
169 /*
170 * Used for writing and reading data in the right order
171 * to TXRAM and RXRAM when used as 32-bit registers respectively
172 */
173 #define swap4bytes(__val) \
174 ((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
175 (((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
176
177 struct bcm_qspi_parms {
178 u32 speed_hz;
179 u8 mode;
180 u8 bits_per_word;
181 };
182
183 struct bcm_xfer_mode {
184 bool flex_mode;
185 unsigned int width;
186 unsigned int addrlen;
187 unsigned int hp;
188 };
189
190 enum base_type {
191 MSPI,
192 BSPI,
193 CHIP_SELECT,
194 BASEMAX,
195 };
196
197 enum irq_source {
198 SINGLE_L2,
199 MUXED_L1,
200 };
201
202 struct bcm_qspi_irq {
203 const char *irq_name;
204 const irq_handler_t irq_handler;
205 int irq_source;
206 u32 mask;
207 };
208
209 struct bcm_qspi_dev_id {
210 const struct bcm_qspi_irq *irqp;
211 void *dev;
212 };
213
214
215 struct qspi_trans {
216 struct spi_transfer *trans;
217 int byte;
218 bool mspi_last_trans;
219 };
220
221 struct bcm_qspi {
222 struct platform_device *pdev;
223 struct spi_master *master;
224 struct clk *clk;
225 u32 base_clk;
226 u32 max_speed_hz;
227 void __iomem *base[BASEMAX];
228
229 /* Some SoCs provide custom interrupt status register(s) */
230 struct bcm_qspi_soc_intc *soc_intc;
231
232 struct bcm_qspi_parms last_parms;
233 struct qspi_trans trans_pos;
234 int curr_cs;
235 int bspi_maj_rev;
236 int bspi_min_rev;
237 int bspi_enabled;
238 const struct spi_mem_op *bspi_rf_op;
239 u32 bspi_rf_op_idx;
240 u32 bspi_rf_op_len;
241 u32 bspi_rf_op_status;
242 struct bcm_xfer_mode xfer_mode;
243 u32 s3_strap_override_ctrl;
244 bool bspi_mode;
245 bool big_endian;
246 int num_irqs;
247 struct bcm_qspi_dev_id *dev_ids;
248 struct completion mspi_done;
249 struct completion bspi_done;
250 u8 mspi_maj_rev;
251 u8 mspi_min_rev;
252 bool mspi_spcr3_sysclk;
253 };
254
has_bspi(struct bcm_qspi * qspi)255 static inline bool has_bspi(struct bcm_qspi *qspi)
256 {
257 return qspi->bspi_mode;
258 }
259
260 /* hardware supports spcr3 and fast baud-rate */
bcm_qspi_has_fastbr(struct bcm_qspi * qspi)261 static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
262 {
263 if (!has_bspi(qspi) &&
264 ((qspi->mspi_maj_rev >= 1) &&
265 (qspi->mspi_min_rev >= 5)))
266 return true;
267
268 return false;
269 }
270
271 /* hardware supports sys clk 108Mhz */
bcm_qspi_has_sysclk_108(struct bcm_qspi * qspi)272 static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
273 {
274 if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
275 ((qspi->mspi_maj_rev >= 1) &&
276 (qspi->mspi_min_rev >= 6))))
277 return true;
278
279 return false;
280 }
281
bcm_qspi_spbr_min(struct bcm_qspi * qspi)282 static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
283 {
284 if (bcm_qspi_has_fastbr(qspi))
285 return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
286 else
287 return 8;
288 }
289
290 /* Read qspi controller register*/
bcm_qspi_read(struct bcm_qspi * qspi,enum base_type type,unsigned int offset)291 static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
292 unsigned int offset)
293 {
294 return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
295 }
296
297 /* Write qspi controller register*/
bcm_qspi_write(struct bcm_qspi * qspi,enum base_type type,unsigned int offset,unsigned int data)298 static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
299 unsigned int offset, unsigned int data)
300 {
301 bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
302 }
303
304 /* BSPI helpers */
bcm_qspi_bspi_busy_poll(struct bcm_qspi * qspi)305 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
306 {
307 int i;
308
309 /* this should normally finish within 10us */
310 for (i = 0; i < 1000; i++) {
311 if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
312 return 0;
313 udelay(1);
314 }
315 dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
316 return -EIO;
317 }
318
bcm_qspi_bspi_ver_three(struct bcm_qspi * qspi)319 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
320 {
321 if (qspi->bspi_maj_rev < 4)
322 return true;
323 return false;
324 }
325
bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi * qspi)326 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
327 {
328 bcm_qspi_bspi_busy_poll(qspi);
329 /* Force rising edge for the b0/b1 'flush' field */
330 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
331 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
332 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
333 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
334 }
335
bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi * qspi)336 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
337 {
338 return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
339 BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
340 }
341
bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi * qspi)342 static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
343 {
344 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
345
346 /* BSPI v3 LR is LE only, convert data to host endianness */
347 if (bcm_qspi_bspi_ver_three(qspi))
348 data = le32_to_cpu(data);
349
350 return data;
351 }
352
bcm_qspi_bspi_lr_start(struct bcm_qspi * qspi)353 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
354 {
355 bcm_qspi_bspi_busy_poll(qspi);
356 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
357 BSPI_RAF_CTRL_START_MASK);
358 }
359
bcm_qspi_bspi_lr_clear(struct bcm_qspi * qspi)360 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
361 {
362 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
363 BSPI_RAF_CTRL_CLEAR_MASK);
364 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
365 }
366
bcm_qspi_bspi_lr_data_read(struct bcm_qspi * qspi)367 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
368 {
369 u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
370 u32 data = 0;
371
372 dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
373 qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
374 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
375 data = bcm_qspi_bspi_lr_read_fifo(qspi);
376 if (likely(qspi->bspi_rf_op_len >= 4) &&
377 IS_ALIGNED((uintptr_t)buf, 4)) {
378 buf[qspi->bspi_rf_op_idx++] = data;
379 qspi->bspi_rf_op_len -= 4;
380 } else {
381 /* Read out remaining bytes, make sure*/
382 u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
383
384 data = cpu_to_le32(data);
385 while (qspi->bspi_rf_op_len) {
386 *cbuf++ = (u8)data;
387 data >>= 8;
388 qspi->bspi_rf_op_len--;
389 }
390 }
391 }
392 }
393
bcm_qspi_bspi_set_xfer_params(struct bcm_qspi * qspi,u8 cmd_byte,int bpp,int bpc,int flex_mode)394 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
395 int bpp, int bpc, int flex_mode)
396 {
397 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
398 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
399 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
400 bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
401 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
402 }
403
bcm_qspi_bspi_set_flex_mode(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)404 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
405 const struct spi_mem_op *op, int hp)
406 {
407 int bpc = 0, bpp = 0;
408 u8 command = op->cmd.opcode;
409 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
410 int addrlen = op->addr.nbytes;
411 int flex_mode = 1;
412
413 dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
414 width, addrlen, hp);
415
416 if (addrlen == BSPI_ADDRLEN_4BYTES)
417 bpp = BSPI_BPP_ADDR_SELECT_MASK;
418
419 if (op->dummy.nbytes)
420 bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
421
422 switch (width) {
423 case SPI_NBITS_SINGLE:
424 if (addrlen == BSPI_ADDRLEN_3BYTES)
425 /* default mode, does not need flex_cmd */
426 flex_mode = 0;
427 break;
428 case SPI_NBITS_DUAL:
429 bpc = 0x00000001;
430 if (hp) {
431 bpc |= 0x00010100; /* address and mode are 2-bit */
432 bpp = BSPI_BPP_MODE_SELECT_MASK;
433 }
434 break;
435 case SPI_NBITS_QUAD:
436 bpc = 0x00000002;
437 if (hp) {
438 bpc |= 0x00020200; /* address and mode are 4-bit */
439 bpp |= BSPI_BPP_MODE_SELECT_MASK;
440 }
441 break;
442 default:
443 return -EINVAL;
444 }
445
446 bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
447
448 return 0;
449 }
450
bcm_qspi_bspi_set_override(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)451 static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
452 const struct spi_mem_op *op, int hp)
453 {
454 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
455 int addrlen = op->addr.nbytes;
456 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
457
458 dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
459 width, addrlen, hp);
460
461 switch (width) {
462 case SPI_NBITS_SINGLE:
463 /* clear quad/dual mode */
464 data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
465 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
466 break;
467 case SPI_NBITS_QUAD:
468 /* clear dual mode and set quad mode */
469 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
470 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
471 break;
472 case SPI_NBITS_DUAL:
473 /* clear quad mode set dual mode */
474 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
475 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
476 break;
477 default:
478 return -EINVAL;
479 }
480
481 if (addrlen == BSPI_ADDRLEN_4BYTES)
482 /* set 4byte mode*/
483 data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
484 else
485 /* clear 4 byte mode */
486 data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
487
488 /* set the override mode */
489 data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
490 bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
491 bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
492
493 return 0;
494 }
495
bcm_qspi_bspi_set_mode(struct bcm_qspi * qspi,const struct spi_mem_op * op,int hp)496 static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
497 const struct spi_mem_op *op, int hp)
498 {
499 int error = 0;
500 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
501 int addrlen = op->addr.nbytes;
502
503 /* default mode */
504 qspi->xfer_mode.flex_mode = true;
505
506 if (!bcm_qspi_bspi_ver_three(qspi)) {
507 u32 val, mask;
508
509 val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
510 mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
511 if (val & mask || qspi->s3_strap_override_ctrl & mask) {
512 qspi->xfer_mode.flex_mode = false;
513 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
514 error = bcm_qspi_bspi_set_override(qspi, op, hp);
515 }
516 }
517
518 if (qspi->xfer_mode.flex_mode)
519 error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
520
521 if (error) {
522 dev_warn(&qspi->pdev->dev,
523 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
524 width, addrlen, hp);
525 } else if (qspi->xfer_mode.width != width ||
526 qspi->xfer_mode.addrlen != addrlen ||
527 qspi->xfer_mode.hp != hp) {
528 qspi->xfer_mode.width = width;
529 qspi->xfer_mode.addrlen = addrlen;
530 qspi->xfer_mode.hp = hp;
531 dev_dbg(&qspi->pdev->dev,
532 "cs:%d %d-lane output, %d-byte address%s\n",
533 qspi->curr_cs,
534 qspi->xfer_mode.width,
535 qspi->xfer_mode.addrlen,
536 qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
537 }
538
539 return error;
540 }
541
bcm_qspi_enable_bspi(struct bcm_qspi * qspi)542 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
543 {
544 if (!has_bspi(qspi))
545 return;
546
547 qspi->bspi_enabled = 1;
548 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
549 return;
550
551 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
552 udelay(1);
553 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
554 udelay(1);
555 }
556
bcm_qspi_disable_bspi(struct bcm_qspi * qspi)557 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
558 {
559 if (!has_bspi(qspi))
560 return;
561
562 qspi->bspi_enabled = 0;
563 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
564 return;
565
566 bcm_qspi_bspi_busy_poll(qspi);
567 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
568 udelay(1);
569 }
570
bcm_qspi_chip_select(struct bcm_qspi * qspi,int cs)571 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
572 {
573 u32 rd = 0;
574 u32 wr = 0;
575
576 if (qspi->base[CHIP_SELECT]) {
577 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
578 wr = (rd & ~0xff) | (1 << cs);
579 if (rd == wr)
580 return;
581 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
582 usleep_range(10, 20);
583 }
584
585 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
586 qspi->curr_cs = cs;
587 }
588
589 /* MSPI helpers */
bcm_qspi_hw_set_parms(struct bcm_qspi * qspi,const struct bcm_qspi_parms * xp)590 static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
591 const struct bcm_qspi_parms *xp)
592 {
593 u32 spcr, spbr = 0;
594
595 if (!qspi->mspi_maj_rev)
596 /* legacy controller */
597 spcr = MSPI_MASTER_BIT;
598 else
599 spcr = 0;
600
601 /*
602 * Bits per transfer. BITS determines the number of data bits
603 * transferred if the command control bit (BITSE of a
604 * CDRAM Register) is equal to 1.
605 * If CDRAM BITSE is equal to 0, 8 data bits are transferred
606 * regardless
607 */
608 if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
609 spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
610
611 spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
612 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
613
614 if (bcm_qspi_has_fastbr(qspi)) {
615 spcr = 0;
616
617 /* enable fastbr */
618 spcr |= MSPI_SPCR3_FASTBR;
619
620 if (xp->mode & SPI_3WIRE)
621 spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
622
623 if (bcm_qspi_has_sysclk_108(qspi)) {
624 /* SYSCLK_108 */
625 spcr |= MSPI_SPCR3_SYSCLKSEL_108;
626 qspi->base_clk = MSPI_BASE_FREQ * 4;
627 }
628
629 if (xp->bits_per_word > 16) {
630 /* data_reg_size 1 (64bit) */
631 spcr |= MSPI_SPCR3_DATA_REG_SZ;
632 /* TxRx RAM data access mode 2 for 32B and set fastdt */
633 spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
634 /*
635 * Set length of delay after transfer
636 * DTL from 0(256) to 1
637 */
638 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
639 } else {
640 /* data_reg_size[8] = 0 */
641 spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
642
643 /*
644 * TxRx RAM access mode 8B
645 * and disable fastdt
646 */
647 spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
648 }
649 bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
650 }
651
652 if (xp->speed_hz)
653 spbr = qspi->base_clk / (2 * xp->speed_hz);
654
655 spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
656 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
657
658 qspi->last_parms = *xp;
659 }
660
bcm_qspi_update_parms(struct bcm_qspi * qspi,struct spi_device * spi,struct spi_transfer * trans)661 static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
662 struct spi_device *spi,
663 struct spi_transfer *trans)
664 {
665 struct bcm_qspi_parms xp;
666
667 xp.speed_hz = trans->speed_hz;
668 xp.bits_per_word = trans->bits_per_word;
669 xp.mode = spi->mode;
670
671 bcm_qspi_hw_set_parms(qspi, &xp);
672 }
673
bcm_qspi_setup(struct spi_device * spi)674 static int bcm_qspi_setup(struct spi_device *spi)
675 {
676 struct bcm_qspi_parms *xp;
677
678 if (spi->bits_per_word > 64)
679 return -EINVAL;
680
681 xp = spi_get_ctldata(spi);
682 if (!xp) {
683 xp = kzalloc(sizeof(*xp), GFP_KERNEL);
684 if (!xp)
685 return -ENOMEM;
686 spi_set_ctldata(spi, xp);
687 }
688 xp->speed_hz = spi->max_speed_hz;
689 xp->mode = spi->mode;
690
691 if (spi->bits_per_word)
692 xp->bits_per_word = spi->bits_per_word;
693 else
694 xp->bits_per_word = 8;
695
696 return 0;
697 }
698
bcm_qspi_mspi_transfer_is_last(struct bcm_qspi * qspi,struct qspi_trans * qt)699 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
700 struct qspi_trans *qt)
701 {
702 if (qt->mspi_last_trans &&
703 spi_transfer_is_last(qspi->master, qt->trans))
704 return true;
705 else
706 return false;
707 }
708
update_qspi_trans_byte_count(struct bcm_qspi * qspi,struct qspi_trans * qt,int flags)709 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
710 struct qspi_trans *qt, int flags)
711 {
712 int ret = TRANS_STATUS_BREAK_NONE;
713
714 /* count the last transferred bytes */
715 if (qt->trans->bits_per_word <= 8)
716 qt->byte++;
717 else if (qt->trans->bits_per_word <= 16)
718 qt->byte += 2;
719 else if (qt->trans->bits_per_word <= 32)
720 qt->byte += 4;
721 else if (qt->trans->bits_per_word <= 64)
722 qt->byte += 8;
723
724 if (qt->byte >= qt->trans->len) {
725 /* we're at the end of the spi_transfer */
726 /* in TX mode, need to pause for a delay or CS change */
727 if (qt->trans->delay.value &&
728 (flags & TRANS_STATUS_BREAK_DELAY))
729 ret |= TRANS_STATUS_BREAK_DELAY;
730 if (qt->trans->cs_change &&
731 (flags & TRANS_STATUS_BREAK_CS_CHANGE))
732 ret |= TRANS_STATUS_BREAK_CS_CHANGE;
733
734 if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
735 ret |= TRANS_STATUS_BREAK_EOM;
736 else
737 ret |= TRANS_STATUS_BREAK_NO_BYTES;
738
739 qt->trans = NULL;
740 }
741
742 dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
743 qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
744 return ret;
745 }
746
read_rxram_slot_u8(struct bcm_qspi * qspi,int slot)747 static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
748 {
749 u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
750
751 /* mask out reserved bits */
752 return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
753 }
754
read_rxram_slot_u16(struct bcm_qspi * qspi,int slot)755 static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
756 {
757 u32 reg_offset = MSPI_RXRAM;
758 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
759 u32 msb_offset = reg_offset + (slot << 3);
760
761 return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
762 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
763 }
764
read_rxram_slot_u32(struct bcm_qspi * qspi,int slot)765 static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
766 {
767 u32 reg_offset = MSPI_RXRAM;
768 u32 offset = reg_offset + (slot << 3);
769 u32 val;
770
771 val = bcm_qspi_read(qspi, MSPI, offset);
772 val = swap4bytes(val);
773
774 return val;
775 }
776
read_rxram_slot_u64(struct bcm_qspi * qspi,int slot)777 static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
778 {
779 u32 reg_offset = MSPI_RXRAM;
780 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
781 u32 msb_offset = reg_offset + (slot << 3);
782 u32 msb, lsb;
783
784 msb = bcm_qspi_read(qspi, MSPI, msb_offset);
785 msb = swap4bytes(msb);
786 lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
787 lsb = swap4bytes(lsb);
788
789 return ((u64)msb << 32 | lsb);
790 }
791
read_from_hw(struct bcm_qspi * qspi,int slots)792 static void read_from_hw(struct bcm_qspi *qspi, int slots)
793 {
794 struct qspi_trans tp;
795 int slot;
796
797 bcm_qspi_disable_bspi(qspi);
798
799 if (slots > MSPI_NUM_CDRAM) {
800 /* should never happen */
801 dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
802 return;
803 }
804
805 tp = qspi->trans_pos;
806
807 for (slot = 0; slot < slots; slot++) {
808 if (tp.trans->bits_per_word <= 8) {
809 u8 *buf = tp.trans->rx_buf;
810
811 if (buf)
812 buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
813 dev_dbg(&qspi->pdev->dev, "RD %02x\n",
814 buf ? buf[tp.byte] : 0x0);
815 } else if (tp.trans->bits_per_word <= 16) {
816 u16 *buf = tp.trans->rx_buf;
817
818 if (buf)
819 buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
820 slot);
821 dev_dbg(&qspi->pdev->dev, "RD %04x\n",
822 buf ? buf[tp.byte / 2] : 0x0);
823 } else if (tp.trans->bits_per_word <= 32) {
824 u32 *buf = tp.trans->rx_buf;
825
826 if (buf)
827 buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
828 slot);
829 dev_dbg(&qspi->pdev->dev, "RD %08x\n",
830 buf ? buf[tp.byte / 4] : 0x0);
831
832 } else if (tp.trans->bits_per_word <= 64) {
833 u64 *buf = tp.trans->rx_buf;
834
835 if (buf)
836 buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
837 slot);
838 dev_dbg(&qspi->pdev->dev, "RD %llx\n",
839 buf ? buf[tp.byte / 8] : 0x0);
840
841
842 }
843
844 update_qspi_trans_byte_count(qspi, &tp,
845 TRANS_STATUS_BREAK_NONE);
846 }
847
848 qspi->trans_pos = tp;
849 }
850
write_txram_slot_u8(struct bcm_qspi * qspi,int slot,u8 val)851 static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
852 u8 val)
853 {
854 u32 reg_offset = MSPI_TXRAM + (slot << 3);
855
856 /* mask out reserved bits */
857 bcm_qspi_write(qspi, MSPI, reg_offset, val);
858 }
859
write_txram_slot_u16(struct bcm_qspi * qspi,int slot,u16 val)860 static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
861 u16 val)
862 {
863 u32 reg_offset = MSPI_TXRAM;
864 u32 msb_offset = reg_offset + (slot << 3);
865 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
866
867 bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
868 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
869 }
870
write_txram_slot_u32(struct bcm_qspi * qspi,int slot,u32 val)871 static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
872 u32 val)
873 {
874 u32 reg_offset = MSPI_TXRAM;
875 u32 msb_offset = reg_offset + (slot << 3);
876
877 bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
878 }
879
write_txram_slot_u64(struct bcm_qspi * qspi,int slot,u64 val)880 static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
881 u64 val)
882 {
883 u32 reg_offset = MSPI_TXRAM;
884 u32 msb_offset = reg_offset + (slot << 3);
885 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
886 u32 msb = upper_32_bits(val);
887 u32 lsb = lower_32_bits(val);
888
889 bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
890 bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
891 }
892
read_cdram_slot(struct bcm_qspi * qspi,int slot)893 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
894 {
895 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
896 }
897
write_cdram_slot(struct bcm_qspi * qspi,int slot,u32 val)898 static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
899 {
900 bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
901 }
902
903 /* Return number of slots written */
write_to_hw(struct bcm_qspi * qspi,struct spi_device * spi)904 static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
905 {
906 struct qspi_trans tp;
907 int slot = 0, tstatus = 0;
908 u32 mspi_cdram = 0;
909
910 bcm_qspi_disable_bspi(qspi);
911 tp = qspi->trans_pos;
912 bcm_qspi_update_parms(qspi, spi, tp.trans);
913
914 /* Run until end of transfer or reached the max data */
915 while (!tstatus && slot < MSPI_NUM_CDRAM) {
916 mspi_cdram = MSPI_CDRAM_CONT_BIT;
917 if (tp.trans->bits_per_word <= 8) {
918 const u8 *buf = tp.trans->tx_buf;
919 u8 val = buf ? buf[tp.byte] : 0x00;
920
921 write_txram_slot_u8(qspi, slot, val);
922 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
923 } else if (tp.trans->bits_per_word <= 16) {
924 const u16 *buf = tp.trans->tx_buf;
925 u16 val = buf ? buf[tp.byte / 2] : 0x0000;
926
927 write_txram_slot_u16(qspi, slot, val);
928 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
929 } else if (tp.trans->bits_per_word <= 32) {
930 const u32 *buf = tp.trans->tx_buf;
931 u32 val = buf ? buf[tp.byte/4] : 0x0;
932
933 write_txram_slot_u32(qspi, slot, val);
934 dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
935 } else if (tp.trans->bits_per_word <= 64) {
936 const u64 *buf = tp.trans->tx_buf;
937 u64 val = (buf ? buf[tp.byte/8] : 0x0);
938
939 /* use the length of delay from SPCR1_LSB */
940 if (bcm_qspi_has_fastbr(qspi))
941 mspi_cdram |= MSPI_CDRAM_DT_BIT;
942
943 write_txram_slot_u64(qspi, slot, val);
944 dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
945 }
946
947 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
948 MSPI_CDRAM_BITSE_BIT);
949
950 /* set 3wrire halfduplex mode data from master to slave */
951 if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
952 mspi_cdram |= MSPI_CDRAM_OUTP;
953
954 if (has_bspi(qspi))
955 mspi_cdram &= ~1;
956 else
957 mspi_cdram |= (~(1 << spi->chip_select) &
958 MSPI_CDRAM_PCS);
959
960 write_cdram_slot(qspi, slot, mspi_cdram);
961
962 tstatus = update_qspi_trans_byte_count(qspi, &tp,
963 TRANS_STATUS_BREAK_TX);
964 slot++;
965 }
966
967 if (!slot) {
968 dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
969 goto done;
970 }
971
972 dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
973 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
974 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
975
976 /*
977 * case 1) EOM =1, cs_change =0: SSb inactive
978 * case 2) EOM =1, cs_change =1: SSb stay active
979 * case 3) EOM =0, cs_change =0: SSb stay active
980 * case 4) EOM =0, cs_change =1: SSb inactive
981 */
982 if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
983 == TRANS_STATUS_BREAK_CS_CHANGE) ||
984 ((tstatus & TRANS_STATUS_BREAK_DESELECT)
985 == TRANS_STATUS_BREAK_EOM)) {
986 mspi_cdram = read_cdram_slot(qspi, slot - 1) &
987 ~MSPI_CDRAM_CONT_BIT;
988 write_cdram_slot(qspi, slot - 1, mspi_cdram);
989 }
990
991 if (has_bspi(qspi))
992 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
993
994 /* Must flush previous writes before starting MSPI operation */
995 mb();
996 /* Set cont | spe | spifie */
997 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
998
999 done:
1000 return slot;
1001 }
1002
bcm_qspi_bspi_exec_mem_op(struct spi_device * spi,const struct spi_mem_op * op)1003 static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
1004 const struct spi_mem_op *op)
1005 {
1006 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
1007 u32 addr = 0, len, rdlen, len_words, from = 0;
1008 int ret = 0;
1009 unsigned long timeo = msecs_to_jiffies(100);
1010 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1011
1012 if (bcm_qspi_bspi_ver_three(qspi))
1013 if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
1014 return -EIO;
1015
1016 from = op->addr.val;
1017 if (!spi->cs_gpiod)
1018 bcm_qspi_chip_select(qspi, spi->chip_select);
1019 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1020
1021 /*
1022 * when using flex mode we need to send
1023 * the upper address byte to bspi
1024 */
1025 if (!bcm_qspi_bspi_ver_three(qspi)) {
1026 addr = from & 0xff000000;
1027 bcm_qspi_write(qspi, BSPI,
1028 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
1029 }
1030
1031 if (!qspi->xfer_mode.flex_mode)
1032 addr = from;
1033 else
1034 addr = from & 0x00ffffff;
1035
1036 if (bcm_qspi_bspi_ver_three(qspi) == true)
1037 addr = (addr + 0xc00000) & 0xffffff;
1038
1039 /*
1040 * read into the entire buffer by breaking the reads
1041 * into RAF buffer read lengths
1042 */
1043 len = op->data.nbytes;
1044 qspi->bspi_rf_op_idx = 0;
1045
1046 do {
1047 if (len > BSPI_READ_LENGTH)
1048 rdlen = BSPI_READ_LENGTH;
1049 else
1050 rdlen = len;
1051
1052 reinit_completion(&qspi->bspi_done);
1053 bcm_qspi_enable_bspi(qspi);
1054 len_words = (rdlen + 3) >> 2;
1055 qspi->bspi_rf_op = op;
1056 qspi->bspi_rf_op_status = 0;
1057 qspi->bspi_rf_op_len = rdlen;
1058 dev_dbg(&qspi->pdev->dev,
1059 "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
1060 bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
1061 bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
1062 bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
1063 if (qspi->soc_intc) {
1064 /*
1065 * clear soc MSPI and BSPI interrupts and enable
1066 * BSPI interrupts.
1067 */
1068 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
1069 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
1070 }
1071
1072 /* Must flush previous writes before starting BSPI operation */
1073 mb();
1074 bcm_qspi_bspi_lr_start(qspi);
1075 if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
1076 dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
1077 ret = -ETIMEDOUT;
1078 break;
1079 }
1080
1081 /* set msg return length */
1082 addr += rdlen;
1083 len -= rdlen;
1084 } while (len);
1085
1086 return ret;
1087 }
1088
bcm_qspi_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * trans)1089 static int bcm_qspi_transfer_one(struct spi_master *master,
1090 struct spi_device *spi,
1091 struct spi_transfer *trans)
1092 {
1093 struct bcm_qspi *qspi = spi_master_get_devdata(master);
1094 int slots;
1095 unsigned long timeo = msecs_to_jiffies(100);
1096
1097 if (!spi->cs_gpiod)
1098 bcm_qspi_chip_select(qspi, spi->chip_select);
1099 qspi->trans_pos.trans = trans;
1100 qspi->trans_pos.byte = 0;
1101
1102 while (qspi->trans_pos.byte < trans->len) {
1103 reinit_completion(&qspi->mspi_done);
1104
1105 slots = write_to_hw(qspi, spi);
1106 if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
1107 dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
1108 return -ETIMEDOUT;
1109 }
1110
1111 read_from_hw(qspi, slots);
1112 }
1113 bcm_qspi_enable_bspi(qspi);
1114
1115 return 0;
1116 }
1117
bcm_qspi_mspi_exec_mem_op(struct spi_device * spi,const struct spi_mem_op * op)1118 static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
1119 const struct spi_mem_op *op)
1120 {
1121 struct spi_master *master = spi->master;
1122 struct bcm_qspi *qspi = spi_master_get_devdata(master);
1123 struct spi_transfer t[2];
1124 u8 cmd[6] = { };
1125 int ret, i;
1126
1127 memset(cmd, 0, sizeof(cmd));
1128 memset(t, 0, sizeof(t));
1129
1130 /* tx */
1131 /* opcode is in cmd[0] */
1132 cmd[0] = op->cmd.opcode;
1133 for (i = 0; i < op->addr.nbytes; i++)
1134 cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
1135
1136 t[0].tx_buf = cmd;
1137 t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
1138 t[0].bits_per_word = spi->bits_per_word;
1139 t[0].tx_nbits = op->cmd.buswidth;
1140 /* lets mspi know that this is not last transfer */
1141 qspi->trans_pos.mspi_last_trans = false;
1142 ret = bcm_qspi_transfer_one(master, spi, &t[0]);
1143
1144 /* rx */
1145 qspi->trans_pos.mspi_last_trans = true;
1146 if (!ret) {
1147 /* rx */
1148 t[1].rx_buf = op->data.buf.in;
1149 t[1].len = op->data.nbytes;
1150 t[1].rx_nbits = op->data.buswidth;
1151 t[1].bits_per_word = spi->bits_per_word;
1152 ret = bcm_qspi_transfer_one(master, spi, &t[1]);
1153 }
1154
1155 return ret;
1156 }
1157
bcm_qspi_exec_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)1158 static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
1159 const struct spi_mem_op *op)
1160 {
1161 struct spi_device *spi = mem->spi;
1162 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
1163 int ret = 0;
1164 bool mspi_read = false;
1165 u32 addr = 0, len;
1166 u_char *buf;
1167
1168 if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
1169 op->data.dir != SPI_MEM_DATA_IN)
1170 return -ENOTSUPP;
1171
1172 buf = op->data.buf.in;
1173 addr = op->addr.val;
1174 len = op->data.nbytes;
1175
1176 if (bcm_qspi_bspi_ver_three(qspi) == true) {
1177 /*
1178 * The address coming into this function is a raw flash offset.
1179 * But for BSPI <= V3, we need to convert it to a remapped BSPI
1180 * address. If it crosses a 4MB boundary, just revert back to
1181 * using MSPI.
1182 */
1183 addr = (addr + 0xc00000) & 0xffffff;
1184
1185 if ((~ADDR_4MB_MASK & addr) ^
1186 (~ADDR_4MB_MASK & (addr + len - 1)))
1187 mspi_read = true;
1188 }
1189
1190 /* non-aligned and very short transfers are handled by MSPI */
1191 if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
1192 len < 4)
1193 mspi_read = true;
1194
1195 if (mspi_read)
1196 return bcm_qspi_mspi_exec_mem_op(spi, op);
1197
1198 ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
1199
1200 if (!ret)
1201 ret = bcm_qspi_bspi_exec_mem_op(spi, op);
1202
1203 return ret;
1204 }
1205
bcm_qspi_cleanup(struct spi_device * spi)1206 static void bcm_qspi_cleanup(struct spi_device *spi)
1207 {
1208 struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1209
1210 kfree(xp);
1211 }
1212
bcm_qspi_mspi_l2_isr(int irq,void * dev_id)1213 static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1214 {
1215 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1216 struct bcm_qspi *qspi = qspi_dev_id->dev;
1217 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1218
1219 if (status & MSPI_MSPI_STATUS_SPIF) {
1220 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1221 /* clear interrupt */
1222 status &= ~MSPI_MSPI_STATUS_SPIF;
1223 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1224 if (qspi->soc_intc)
1225 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1226 complete(&qspi->mspi_done);
1227 return IRQ_HANDLED;
1228 }
1229
1230 return IRQ_NONE;
1231 }
1232
bcm_qspi_bspi_lr_l2_isr(int irq,void * dev_id)1233 static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1234 {
1235 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1236 struct bcm_qspi *qspi = qspi_dev_id->dev;
1237 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1238 u32 status = qspi_dev_id->irqp->mask;
1239
1240 if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1241 bcm_qspi_bspi_lr_data_read(qspi);
1242 if (qspi->bspi_rf_op_len == 0) {
1243 qspi->bspi_rf_op = NULL;
1244 if (qspi->soc_intc) {
1245 /* disable soc BSPI interrupt */
1246 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1247 false);
1248 /* indicate done */
1249 status = INTR_BSPI_LR_SESSION_DONE_MASK;
1250 }
1251
1252 if (qspi->bspi_rf_op_status)
1253 bcm_qspi_bspi_lr_clear(qspi);
1254 else
1255 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1256 }
1257
1258 if (qspi->soc_intc)
1259 /* clear soc BSPI interrupt */
1260 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1261 }
1262
1263 status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1264 if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1265 complete(&qspi->bspi_done);
1266
1267 return IRQ_HANDLED;
1268 }
1269
bcm_qspi_bspi_lr_err_l2_isr(int irq,void * dev_id)1270 static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1271 {
1272 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1273 struct bcm_qspi *qspi = qspi_dev_id->dev;
1274 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1275
1276 dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1277 qspi->bspi_rf_op_status = -EIO;
1278 if (qspi->soc_intc)
1279 /* clear soc interrupt */
1280 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1281
1282 complete(&qspi->bspi_done);
1283 return IRQ_HANDLED;
1284 }
1285
bcm_qspi_l1_isr(int irq,void * dev_id)1286 static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1287 {
1288 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1289 struct bcm_qspi *qspi = qspi_dev_id->dev;
1290 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1291 irqreturn_t ret = IRQ_NONE;
1292
1293 if (soc_intc) {
1294 u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1295
1296 if (status & MSPI_DONE)
1297 ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1298 else if (status & BSPI_DONE)
1299 ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1300 else if (status & BSPI_ERR)
1301 ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1302 }
1303
1304 return ret;
1305 }
1306
1307 static const struct bcm_qspi_irq qspi_irq_tab[] = {
1308 {
1309 .irq_name = "spi_lr_fullness_reached",
1310 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1311 .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1312 },
1313 {
1314 .irq_name = "spi_lr_session_aborted",
1315 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1316 .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1317 },
1318 {
1319 .irq_name = "spi_lr_impatient",
1320 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1321 .mask = INTR_BSPI_LR_IMPATIENT_MASK,
1322 },
1323 {
1324 .irq_name = "spi_lr_session_done",
1325 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1326 .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1327 },
1328 #ifdef QSPI_INT_DEBUG
1329 /* this interrupt is for debug purposes only, dont request irq */
1330 {
1331 .irq_name = "spi_lr_overread",
1332 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1333 .mask = INTR_BSPI_LR_OVERREAD_MASK,
1334 },
1335 #endif
1336 {
1337 .irq_name = "mspi_done",
1338 .irq_handler = bcm_qspi_mspi_l2_isr,
1339 .mask = INTR_MSPI_DONE_MASK,
1340 },
1341 {
1342 .irq_name = "mspi_halted",
1343 .irq_handler = bcm_qspi_mspi_l2_isr,
1344 .mask = INTR_MSPI_HALTED_MASK,
1345 },
1346 {
1347 /* single muxed L1 interrupt source */
1348 .irq_name = "spi_l1_intr",
1349 .irq_handler = bcm_qspi_l1_isr,
1350 .irq_source = MUXED_L1,
1351 .mask = QSPI_INTERRUPTS_ALL,
1352 },
1353 };
1354
bcm_qspi_bspi_init(struct bcm_qspi * qspi)1355 static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1356 {
1357 u32 val = 0;
1358
1359 val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1360 qspi->bspi_maj_rev = (val >> 8) & 0xff;
1361 qspi->bspi_min_rev = val & 0xff;
1362 if (!(bcm_qspi_bspi_ver_three(qspi))) {
1363 /* Force mapping of BSPI address -> flash offset */
1364 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1365 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1366 }
1367 qspi->bspi_enabled = 1;
1368 bcm_qspi_disable_bspi(qspi);
1369 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1370 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1371 }
1372
bcm_qspi_hw_init(struct bcm_qspi * qspi)1373 static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1374 {
1375 struct bcm_qspi_parms parms;
1376
1377 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1378 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1379 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1380 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1381 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1382
1383 parms.mode = SPI_MODE_3;
1384 parms.bits_per_word = 8;
1385 parms.speed_hz = qspi->max_speed_hz;
1386 bcm_qspi_hw_set_parms(qspi, &parms);
1387
1388 if (has_bspi(qspi))
1389 bcm_qspi_bspi_init(qspi);
1390 }
1391
bcm_qspi_hw_uninit(struct bcm_qspi * qspi)1392 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1393 {
1394 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1395
1396 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1397 if (has_bspi(qspi))
1398 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1399
1400 /* clear interrupt */
1401 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
1402 }
1403
1404 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1405 .exec_op = bcm_qspi_exec_mem_op,
1406 };
1407
1408 struct bcm_qspi_data {
1409 bool has_mspi_rev;
1410 bool has_spcr3_sysclk;
1411 };
1412
1413 static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
1414 .has_mspi_rev = false,
1415 .has_spcr3_sysclk = false,
1416 };
1417
1418 static const struct bcm_qspi_data bcm_qspi_rev_data = {
1419 .has_mspi_rev = true,
1420 .has_spcr3_sysclk = false,
1421 };
1422
1423 static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
1424 .has_mspi_rev = true,
1425 .has_spcr3_sysclk = true,
1426 };
1427
1428 static const struct of_device_id bcm_qspi_of_match[] = {
1429 {
1430 .compatible = "brcm,spi-bcm7445-qspi",
1431 .data = &bcm_qspi_rev_data,
1432
1433 },
1434 {
1435 .compatible = "brcm,spi-bcm-qspi",
1436 .data = &bcm_qspi_no_rev_data,
1437 },
1438 {
1439 .compatible = "brcm,spi-bcm7216-qspi",
1440 .data = &bcm_qspi_spcr3_data,
1441 },
1442 {
1443 .compatible = "brcm,spi-bcm7278-qspi",
1444 .data = &bcm_qspi_spcr3_data,
1445 },
1446 {},
1447 };
1448 MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1449
bcm_qspi_probe(struct platform_device * pdev,struct bcm_qspi_soc_intc * soc_intc)1450 int bcm_qspi_probe(struct platform_device *pdev,
1451 struct bcm_qspi_soc_intc *soc_intc)
1452 {
1453 const struct of_device_id *of_id = NULL;
1454 const struct bcm_qspi_data *data;
1455 struct device *dev = &pdev->dev;
1456 struct bcm_qspi *qspi;
1457 struct spi_master *master;
1458 struct resource *res;
1459 int irq, ret = 0, num_ints = 0;
1460 u32 val;
1461 u32 rev = 0;
1462 const char *name = NULL;
1463 int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1464
1465 /* We only support device-tree instantiation */
1466 if (!dev->of_node)
1467 return -ENODEV;
1468
1469 of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
1470 if (!of_id)
1471 return -ENODEV;
1472
1473 data = of_id->data;
1474
1475 master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
1476 if (!master) {
1477 dev_err(dev, "error allocating spi_master\n");
1478 return -ENOMEM;
1479 }
1480
1481 qspi = spi_master_get_devdata(master);
1482
1483 qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
1484 if (IS_ERR(qspi->clk))
1485 return PTR_ERR(qspi->clk);
1486
1487 qspi->pdev = pdev;
1488 qspi->trans_pos.trans = NULL;
1489 qspi->trans_pos.byte = 0;
1490 qspi->trans_pos.mspi_last_trans = true;
1491 qspi->master = master;
1492
1493 master->bus_num = -1;
1494 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
1495 SPI_3WIRE;
1496 master->setup = bcm_qspi_setup;
1497 master->transfer_one = bcm_qspi_transfer_one;
1498 master->mem_ops = &bcm_qspi_mem_ops;
1499 master->cleanup = bcm_qspi_cleanup;
1500 master->dev.of_node = dev->of_node;
1501 master->num_chipselect = NUM_CHIPSELECT;
1502 master->use_gpio_descriptors = true;
1503
1504 qspi->big_endian = of_device_is_big_endian(dev->of_node);
1505
1506 if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1507 master->num_chipselect = val;
1508
1509 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1510 if (!res)
1511 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1512 "mspi");
1513
1514 if (res) {
1515 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
1516 if (IS_ERR(qspi->base[MSPI]))
1517 return PTR_ERR(qspi->base[MSPI]);
1518 } else {
1519 return 0;
1520 }
1521
1522 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1523 if (res) {
1524 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
1525 if (IS_ERR(qspi->base[BSPI]))
1526 return PTR_ERR(qspi->base[BSPI]);
1527 qspi->bspi_mode = true;
1528 } else {
1529 qspi->bspi_mode = false;
1530 }
1531
1532 dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1533
1534 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1535 if (res) {
1536 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1537 if (IS_ERR(qspi->base[CHIP_SELECT]))
1538 return PTR_ERR(qspi->base[CHIP_SELECT]);
1539 }
1540
1541 qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1542 GFP_KERNEL);
1543 if (!qspi->dev_ids)
1544 return -ENOMEM;
1545
1546 /*
1547 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1548 * in specific ways
1549 */
1550 if (soc_intc) {
1551 qspi->soc_intc = soc_intc;
1552 soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1553 } else {
1554 qspi->soc_intc = NULL;
1555 }
1556
1557 if (qspi->clk) {
1558 ret = clk_prepare_enable(qspi->clk);
1559 if (ret) {
1560 dev_err(dev, "failed to prepare clock\n");
1561 goto qspi_probe_err;
1562 }
1563 qspi->base_clk = clk_get_rate(qspi->clk);
1564 } else {
1565 qspi->base_clk = MSPI_BASE_FREQ;
1566 }
1567
1568 if (data->has_mspi_rev) {
1569 rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
1570 /* some older revs do not have a MSPI_REV register */
1571 if ((rev & 0xff) == 0xff)
1572 rev = 0;
1573 }
1574
1575 qspi->mspi_maj_rev = (rev >> 4) & 0xf;
1576 qspi->mspi_min_rev = rev & 0xf;
1577 qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
1578
1579 qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
1580
1581 /*
1582 * On SW resets it is possible to have the mask still enabled
1583 * Need to disable the mask and clear the status while we init
1584 */
1585 bcm_qspi_hw_uninit(qspi);
1586
1587 for (val = 0; val < num_irqs; val++) {
1588 irq = -1;
1589 name = qspi_irq_tab[val].irq_name;
1590 if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1591 /* get the l2 interrupts */
1592 irq = platform_get_irq_byname_optional(pdev, name);
1593 } else if (!num_ints && soc_intc) {
1594 /* all mspi, bspi intrs muxed to one L1 intr */
1595 irq = platform_get_irq(pdev, 0);
1596 }
1597
1598 if (irq >= 0) {
1599 ret = devm_request_irq(&pdev->dev, irq,
1600 qspi_irq_tab[val].irq_handler, 0,
1601 name,
1602 &qspi->dev_ids[val]);
1603 if (ret < 0) {
1604 dev_err(&pdev->dev, "IRQ %s not found\n", name);
1605 goto qspi_unprepare_err;
1606 }
1607
1608 qspi->dev_ids[val].dev = qspi;
1609 qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1610 num_ints++;
1611 dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1612 qspi_irq_tab[val].irq_name,
1613 irq);
1614 }
1615 }
1616
1617 if (!num_ints) {
1618 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1619 ret = -EINVAL;
1620 goto qspi_unprepare_err;
1621 }
1622
1623 bcm_qspi_hw_init(qspi);
1624 init_completion(&qspi->mspi_done);
1625 init_completion(&qspi->bspi_done);
1626 qspi->curr_cs = -1;
1627
1628 platform_set_drvdata(pdev, qspi);
1629
1630 qspi->xfer_mode.width = -1;
1631 qspi->xfer_mode.addrlen = -1;
1632 qspi->xfer_mode.hp = -1;
1633
1634 ret = spi_register_master(master);
1635 if (ret < 0) {
1636 dev_err(dev, "can't register master\n");
1637 goto qspi_reg_err;
1638 }
1639
1640 return 0;
1641
1642 qspi_reg_err:
1643 bcm_qspi_hw_uninit(qspi);
1644 qspi_unprepare_err:
1645 clk_disable_unprepare(qspi->clk);
1646 qspi_probe_err:
1647 kfree(qspi->dev_ids);
1648 return ret;
1649 }
1650 /* probe function to be called by SoC specific platform driver probe */
1651 EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1652
bcm_qspi_remove(struct platform_device * pdev)1653 int bcm_qspi_remove(struct platform_device *pdev)
1654 {
1655 struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1656
1657 spi_unregister_master(qspi->master);
1658 bcm_qspi_hw_uninit(qspi);
1659 clk_disable_unprepare(qspi->clk);
1660 kfree(qspi->dev_ids);
1661
1662 return 0;
1663 }
1664 /* function to be called by SoC specific platform driver remove() */
1665 EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1666
bcm_qspi_suspend(struct device * dev)1667 static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1668 {
1669 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1670
1671 /* store the override strap value */
1672 if (!bcm_qspi_bspi_ver_three(qspi))
1673 qspi->s3_strap_override_ctrl =
1674 bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1675
1676 spi_master_suspend(qspi->master);
1677 clk_disable_unprepare(qspi->clk);
1678 bcm_qspi_hw_uninit(qspi);
1679
1680 return 0;
1681 };
1682
bcm_qspi_resume(struct device * dev)1683 static int __maybe_unused bcm_qspi_resume(struct device *dev)
1684 {
1685 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1686 int ret = 0;
1687
1688 bcm_qspi_hw_init(qspi);
1689 bcm_qspi_chip_select(qspi, qspi->curr_cs);
1690 if (qspi->soc_intc)
1691 /* enable MSPI interrupt */
1692 qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1693 true);
1694
1695 ret = clk_prepare_enable(qspi->clk);
1696 if (!ret)
1697 spi_master_resume(qspi->master);
1698
1699 return ret;
1700 }
1701
1702 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1703
1704 /* pm_ops to be called by SoC specific platform driver */
1705 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1706
1707 MODULE_AUTHOR("Kamal Dasu");
1708 MODULE_DESCRIPTION("Broadcom QSPI driver");
1709 MODULE_LICENSE("GPL v2");
1710 MODULE_ALIAS("platform:" DRIVER_NAME);
1711