1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
8 */
9
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/math64.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/mutex.h>
19 #include <linux/of_platform.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sizes.h>
22 #include <linux/slab.h>
23 #include <linux/spi/flash.h>
24
25 #include "core.h"
26
27 /* Define max times to check status register before we give up. */
28
29 /*
30 * For everything but full-chip erase; probably could be much smaller, but kept
31 * around for safety for now
32 */
33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35 /*
36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
37 * for larger flash
38 */
39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41 #define SPI_NOR_MAX_ADDR_NBYTES 4
42
43 #define SPI_NOR_SRST_SLEEP_MIN 200
44 #define SPI_NOR_SRST_SLEEP_MAX 400
45
46 /**
47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
48 * extension type.
49 * @nor: pointer to a 'struct spi_nor'
50 * @op: pointer to the 'struct spi_mem_op' whose properties
51 * need to be initialized.
52 *
53 * Right now, only "repeat" and "invert" are supported.
54 *
55 * Return: The opcode extension.
56 */
spi_nor_get_cmd_ext(const struct spi_nor * nor,const struct spi_mem_op * op)57 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
58 const struct spi_mem_op *op)
59 {
60 switch (nor->cmd_ext_type) {
61 case SPI_NOR_EXT_INVERT:
62 return ~op->cmd.opcode;
63
64 case SPI_NOR_EXT_REPEAT:
65 return op->cmd.opcode;
66
67 default:
68 dev_err(nor->dev, "Unknown command extension type\n");
69 return 0;
70 }
71 }
72
73 /**
74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
75 * @nor: pointer to a 'struct spi_nor'
76 * @op: pointer to the 'struct spi_mem_op' whose properties
77 * need to be initialized.
78 * @proto: the protocol from which the properties need to be set.
79 */
spi_nor_spimem_setup_op(const struct spi_nor * nor,struct spi_mem_op * op,const enum spi_nor_protocol proto)80 void spi_nor_spimem_setup_op(const struct spi_nor *nor,
81 struct spi_mem_op *op,
82 const enum spi_nor_protocol proto)
83 {
84 u8 ext;
85
86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
87
88 if (op->addr.nbytes)
89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
90
91 if (op->dummy.nbytes)
92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
93
94 if (op->data.nbytes)
95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
96
97 if (spi_nor_protocol_is_dtr(proto)) {
98 /*
99 * SPIMEM supports mixed DTR modes, but right now we can only
100 * have all phases either DTR or STR. IOW, SPIMEM can have
101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
102 * phases to either DTR or STR.
103 */
104 op->cmd.dtr = true;
105 op->addr.dtr = true;
106 op->dummy.dtr = true;
107 op->data.dtr = true;
108
109 /* 2 bytes per clock cycle in DTR mode. */
110 op->dummy.nbytes *= 2;
111
112 ext = spi_nor_get_cmd_ext(nor, op);
113 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
114 op->cmd.nbytes = 2;
115 }
116 }
117
118 /**
119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
120 * transfer
121 * @nor: pointer to 'struct spi_nor'
122 * @op: pointer to 'struct spi_mem_op' template for transfer
123 *
124 * If we have to use the bounce buffer, the data field in @op will be updated.
125 *
126 * Return: true if the bounce buffer is needed, false if not
127 */
spi_nor_spimem_bounce(struct spi_nor * nor,struct spi_mem_op * op)128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
129 {
130 /* op->data.buf.in occupies the same memory as op->data.buf.out */
131 if (object_is_on_stack(op->data.buf.in) ||
132 !virt_addr_valid(op->data.buf.in)) {
133 if (op->data.nbytes > nor->bouncebuf_size)
134 op->data.nbytes = nor->bouncebuf_size;
135 op->data.buf.in = nor->bouncebuf;
136 return true;
137 }
138
139 return false;
140 }
141
142 /**
143 * spi_nor_spimem_exec_op() - execute a memory operation
144 * @nor: pointer to 'struct spi_nor'
145 * @op: pointer to 'struct spi_mem_op' template for transfer
146 *
147 * Return: 0 on success, -error otherwise.
148 */
spi_nor_spimem_exec_op(struct spi_nor * nor,struct spi_mem_op * op)149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
150 {
151 int error;
152
153 error = spi_mem_adjust_op_size(nor->spimem, op);
154 if (error)
155 return error;
156
157 return spi_mem_exec_op(nor->spimem, op);
158 }
159
spi_nor_controller_ops_read_reg(struct spi_nor * nor,u8 opcode,u8 * buf,size_t len)160 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
161 u8 *buf, size_t len)
162 {
163 if (spi_nor_protocol_is_dtr(nor->reg_proto))
164 return -EOPNOTSUPP;
165
166 return nor->controller_ops->read_reg(nor, opcode, buf, len);
167 }
168
spi_nor_controller_ops_write_reg(struct spi_nor * nor,u8 opcode,const u8 * buf,size_t len)169 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
170 const u8 *buf, size_t len)
171 {
172 if (spi_nor_protocol_is_dtr(nor->reg_proto))
173 return -EOPNOTSUPP;
174
175 return nor->controller_ops->write_reg(nor, opcode, buf, len);
176 }
177
spi_nor_controller_ops_erase(struct spi_nor * nor,loff_t offs)178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
179 {
180 if (spi_nor_protocol_is_dtr(nor->reg_proto))
181 return -EOPNOTSUPP;
182
183 return nor->controller_ops->erase(nor, offs);
184 }
185
186 /**
187 * spi_nor_spimem_read_data() - read data from flash's memory region via
188 * spi-mem
189 * @nor: pointer to 'struct spi_nor'
190 * @from: offset to read from
191 * @len: number of bytes to read
192 * @buf: pointer to dst buffer
193 *
194 * Return: number of bytes read successfully, -errno otherwise
195 */
spi_nor_spimem_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
197 size_t len, u8 *buf)
198 {
199 struct spi_mem_op op =
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
201 SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
203 SPI_MEM_OP_DATA_IN(len, buf, 0));
204 bool usebouncebuf;
205 ssize_t nbytes;
206 int error;
207
208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
209
210 /* convert the dummy cycles to the number of bytes */
211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
212 if (spi_nor_protocol_is_dtr(nor->read_proto))
213 op.dummy.nbytes *= 2;
214
215 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
216
217 if (nor->dirmap.rdesc) {
218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
219 op.data.nbytes, op.data.buf.in);
220 } else {
221 error = spi_nor_spimem_exec_op(nor, &op);
222 if (error)
223 return error;
224 nbytes = op.data.nbytes;
225 }
226
227 if (usebouncebuf && nbytes > 0)
228 memcpy(buf, op.data.buf.in, nbytes);
229
230 return nbytes;
231 }
232
233 /**
234 * spi_nor_read_data() - read data from flash memory
235 * @nor: pointer to 'struct spi_nor'
236 * @from: offset to read from
237 * @len: number of bytes to read
238 * @buf: pointer to dst buffer
239 *
240 * Return: number of bytes read successfully, -errno otherwise
241 */
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
243 {
244 if (nor->spimem)
245 return spi_nor_spimem_read_data(nor, from, len, buf);
246
247 return nor->controller_ops->read(nor, from, len, buf);
248 }
249
250 /**
251 * spi_nor_spimem_write_data() - write data to flash memory via
252 * spi-mem
253 * @nor: pointer to 'struct spi_nor'
254 * @to: offset to write to
255 * @len: number of bytes to write
256 * @buf: pointer to src buffer
257 *
258 * Return: number of bytes written successfully, -errno otherwise
259 */
spi_nor_spimem_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
261 size_t len, const u8 *buf)
262 {
263 struct spi_mem_op op =
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
265 SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
266 SPI_MEM_OP_NO_DUMMY,
267 SPI_MEM_OP_DATA_OUT(len, buf, 0));
268 ssize_t nbytes;
269 int error;
270
271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
272 op.addr.nbytes = 0;
273
274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
275
276 if (spi_nor_spimem_bounce(nor, &op))
277 memcpy(nor->bouncebuf, buf, op.data.nbytes);
278
279 if (nor->dirmap.wdesc) {
280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
281 op.data.nbytes, op.data.buf.out);
282 } else {
283 error = spi_nor_spimem_exec_op(nor, &op);
284 if (error)
285 return error;
286 nbytes = op.data.nbytes;
287 }
288
289 return nbytes;
290 }
291
292 /**
293 * spi_nor_write_data() - write data to flash memory
294 * @nor: pointer to 'struct spi_nor'
295 * @to: offset to write to
296 * @len: number of bytes to write
297 * @buf: pointer to src buffer
298 *
299 * Return: number of bytes written successfully, -errno otherwise
300 */
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
302 const u8 *buf)
303 {
304 if (nor->spimem)
305 return spi_nor_spimem_write_data(nor, to, len, buf);
306
307 return nor->controller_ops->write(nor, to, len, buf);
308 }
309
310 /**
311 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
312 * volatile.
313 * @nor: pointer to 'struct spi_nor'.
314 * @op: SPI memory operation. op->data.buf must be DMA-able.
315 * @proto: SPI protocol to use for the register operation.
316 *
317 * Return: zero on success, -errno otherwise
318 */
spi_nor_read_any_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)319 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
320 enum spi_nor_protocol proto)
321 {
322 if (!nor->spimem)
323 return -EOPNOTSUPP;
324
325 spi_nor_spimem_setup_op(nor, op, proto);
326 return spi_nor_spimem_exec_op(nor, op);
327 }
328
329 /**
330 * spi_nor_write_any_volatile_reg() - write any volatile register to flash
331 * memory.
332 * @nor: pointer to 'struct spi_nor'
333 * @op: SPI memory operation. op->data.buf must be DMA-able.
334 * @proto: SPI protocol to use for the register operation.
335 *
336 * Writing volatile registers are instant according to some manufacturers
337 * (Cypress, Micron) and do not need any status polling.
338 *
339 * Return: zero on success, -errno otherwise
340 */
spi_nor_write_any_volatile_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)341 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
342 enum spi_nor_protocol proto)
343 {
344 int ret;
345
346 if (!nor->spimem)
347 return -EOPNOTSUPP;
348
349 ret = spi_nor_write_enable(nor);
350 if (ret)
351 return ret;
352 spi_nor_spimem_setup_op(nor, op, proto);
353 return spi_nor_spimem_exec_op(nor, op);
354 }
355
356 /**
357 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
358 * @nor: pointer to 'struct spi_nor'.
359 *
360 * Return: 0 on success, -errno otherwise.
361 */
spi_nor_write_enable(struct spi_nor * nor)362 int spi_nor_write_enable(struct spi_nor *nor)
363 {
364 int ret;
365
366 if (nor->spimem) {
367 struct spi_mem_op op = SPI_NOR_WREN_OP;
368
369 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
370
371 ret = spi_mem_exec_op(nor->spimem, &op);
372 } else {
373 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
374 NULL, 0);
375 }
376
377 if (ret)
378 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
379
380 return ret;
381 }
382
383 /**
384 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
385 * @nor: pointer to 'struct spi_nor'.
386 *
387 * Return: 0 on success, -errno otherwise.
388 */
spi_nor_write_disable(struct spi_nor * nor)389 int spi_nor_write_disable(struct spi_nor *nor)
390 {
391 int ret;
392
393 if (nor->spimem) {
394 struct spi_mem_op op = SPI_NOR_WRDI_OP;
395
396 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
397
398 ret = spi_mem_exec_op(nor->spimem, &op);
399 } else {
400 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
401 NULL, 0);
402 }
403
404 if (ret)
405 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
406
407 return ret;
408 }
409
410 /**
411 * spi_nor_read_id() - Read the JEDEC ID.
412 * @nor: pointer to 'struct spi_nor'.
413 * @naddr: number of address bytes to send. Can be zero if the operation
414 * does not need to send an address.
415 * @ndummy: number of dummy bytes to send after an opcode or address. Can
416 * be zero if the operation does not require dummy bytes.
417 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID
418 * will be written.
419 * @proto: the SPI protocol for register operation.
420 *
421 * Return: 0 on success, -errno otherwise.
422 */
spi_nor_read_id(struct spi_nor * nor,u8 naddr,u8 ndummy,u8 * id,enum spi_nor_protocol proto)423 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
424 enum spi_nor_protocol proto)
425 {
426 int ret;
427
428 if (nor->spimem) {
429 struct spi_mem_op op =
430 SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
431
432 spi_nor_spimem_setup_op(nor, &op, proto);
433 ret = spi_mem_exec_op(nor->spimem, &op);
434 } else {
435 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
436 SPI_NOR_MAX_ID_LEN);
437 }
438 return ret;
439 }
440
441 /**
442 * spi_nor_read_sr() - Read the Status Register.
443 * @nor: pointer to 'struct spi_nor'.
444 * @sr: pointer to a DMA-able buffer where the value of the
445 * Status Register will be written. Should be at least 2 bytes.
446 *
447 * Return: 0 on success, -errno otherwise.
448 */
spi_nor_read_sr(struct spi_nor * nor,u8 * sr)449 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
450 {
451 int ret;
452
453 if (nor->spimem) {
454 struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
455
456 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
457 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
458 op.dummy.nbytes = nor->params->rdsr_dummy;
459 /*
460 * We don't want to read only one byte in DTR mode. So,
461 * read 2 and then discard the second byte.
462 */
463 op.data.nbytes = 2;
464 }
465
466 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
467
468 ret = spi_mem_exec_op(nor->spimem, &op);
469 } else {
470 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
471 1);
472 }
473
474 if (ret)
475 dev_dbg(nor->dev, "error %d reading SR\n", ret);
476
477 return ret;
478 }
479
480 /**
481 * spi_nor_read_cr() - Read the Configuration Register using the
482 * SPINOR_OP_RDCR (35h) command.
483 * @nor: pointer to 'struct spi_nor'
484 * @cr: pointer to a DMA-able buffer where the value of the
485 * Configuration Register will be written.
486 *
487 * Return: 0 on success, -errno otherwise.
488 */
spi_nor_read_cr(struct spi_nor * nor,u8 * cr)489 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
490 {
491 int ret;
492
493 if (nor->spimem) {
494 struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
495
496 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
497
498 ret = spi_mem_exec_op(nor->spimem, &op);
499 } else {
500 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
501 1);
502 }
503
504 if (ret)
505 dev_dbg(nor->dev, "error %d reading CR\n", ret);
506
507 return ret;
508 }
509
510 /**
511 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
512 * @nor: pointer to 'struct spi_nor'.
513 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
514 * address mode.
515 *
516 * Return: 0 on success, -errno otherwise.
517 */
spi_nor_set_4byte_addr_mode(struct spi_nor * nor,bool enable)518 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
519 {
520 int ret;
521
522 if (nor->spimem) {
523 struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
524
525 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
526
527 ret = spi_mem_exec_op(nor->spimem, &op);
528 } else {
529 ret = spi_nor_controller_ops_write_reg(nor,
530 enable ? SPINOR_OP_EN4B :
531 SPINOR_OP_EX4B,
532 NULL, 0);
533 }
534
535 if (ret)
536 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
537
538 return ret;
539 }
540
541 /**
542 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
543 * flashes.
544 * @nor: pointer to 'struct spi_nor'.
545 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
546 * address mode.
547 *
548 * Return: 0 on success, -errno otherwise.
549 */
spansion_set_4byte_addr_mode(struct spi_nor * nor,bool enable)550 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
551 {
552 int ret;
553
554 nor->bouncebuf[0] = enable << 7;
555
556 if (nor->spimem) {
557 struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
558
559 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
560
561 ret = spi_mem_exec_op(nor->spimem, &op);
562 } else {
563 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
564 nor->bouncebuf, 1);
565 }
566
567 if (ret)
568 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
569
570 return ret;
571 }
572
573 /**
574 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
575 * for new commands.
576 * @nor: pointer to 'struct spi_nor'.
577 *
578 * Return: 1 if ready, 0 if not ready, -errno on errors.
579 */
spi_nor_sr_ready(struct spi_nor * nor)580 int spi_nor_sr_ready(struct spi_nor *nor)
581 {
582 int ret;
583
584 ret = spi_nor_read_sr(nor, nor->bouncebuf);
585 if (ret)
586 return ret;
587
588 return !(nor->bouncebuf[0] & SR_WIP);
589 }
590
591 /**
592 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
593 * @nor: pointer to 'struct spi_nor'.
594 *
595 * Return: 1 if ready, 0 if not ready, -errno on errors.
596 */
spi_nor_ready(struct spi_nor * nor)597 static int spi_nor_ready(struct spi_nor *nor)
598 {
599 /* Flashes might override the standard routine. */
600 if (nor->params->ready)
601 return nor->params->ready(nor);
602
603 return spi_nor_sr_ready(nor);
604 }
605
606 /**
607 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
608 * Status Register until ready, or timeout occurs.
609 * @nor: pointer to "struct spi_nor".
610 * @timeout_jiffies: jiffies to wait until timeout.
611 *
612 * Return: 0 on success, -errno otherwise.
613 */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout_jiffies)614 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
615 unsigned long timeout_jiffies)
616 {
617 unsigned long deadline;
618 int timeout = 0, ret;
619
620 deadline = jiffies + timeout_jiffies;
621
622 while (!timeout) {
623 if (time_after_eq(jiffies, deadline))
624 timeout = 1;
625
626 ret = spi_nor_ready(nor);
627 if (ret < 0)
628 return ret;
629 if (ret)
630 return 0;
631
632 cond_resched();
633 }
634
635 dev_dbg(nor->dev, "flash operation timed out\n");
636
637 return -ETIMEDOUT;
638 }
639
640 /**
641 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
642 * flash to be ready, or timeout occurs.
643 * @nor: pointer to "struct spi_nor".
644 *
645 * Return: 0 on success, -errno otherwise.
646 */
spi_nor_wait_till_ready(struct spi_nor * nor)647 int spi_nor_wait_till_ready(struct spi_nor *nor)
648 {
649 return spi_nor_wait_till_ready_with_timeout(nor,
650 DEFAULT_READY_WAIT_JIFFIES);
651 }
652
653 /**
654 * spi_nor_global_block_unlock() - Unlock Global Block Protection.
655 * @nor: pointer to 'struct spi_nor'.
656 *
657 * Return: 0 on success, -errno otherwise.
658 */
spi_nor_global_block_unlock(struct spi_nor * nor)659 int spi_nor_global_block_unlock(struct spi_nor *nor)
660 {
661 int ret;
662
663 ret = spi_nor_write_enable(nor);
664 if (ret)
665 return ret;
666
667 if (nor->spimem) {
668 struct spi_mem_op op = SPI_NOR_GBULK_OP;
669
670 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
671
672 ret = spi_mem_exec_op(nor->spimem, &op);
673 } else {
674 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
675 NULL, 0);
676 }
677
678 if (ret) {
679 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
680 return ret;
681 }
682
683 return spi_nor_wait_till_ready(nor);
684 }
685
686 /**
687 * spi_nor_write_sr() - Write the Status Register.
688 * @nor: pointer to 'struct spi_nor'.
689 * @sr: pointer to DMA-able buffer to write to the Status Register.
690 * @len: number of bytes to write to the Status Register.
691 *
692 * Return: 0 on success, -errno otherwise.
693 */
spi_nor_write_sr(struct spi_nor * nor,const u8 * sr,size_t len)694 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
695 {
696 int ret;
697
698 ret = spi_nor_write_enable(nor);
699 if (ret)
700 return ret;
701
702 if (nor->spimem) {
703 struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
704
705 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
706
707 ret = spi_mem_exec_op(nor->spimem, &op);
708 } else {
709 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
710 len);
711 }
712
713 if (ret) {
714 dev_dbg(nor->dev, "error %d writing SR\n", ret);
715 return ret;
716 }
717
718 return spi_nor_wait_till_ready(nor);
719 }
720
721 /**
722 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
723 * ensure that the byte written match the received value.
724 * @nor: pointer to a 'struct spi_nor'.
725 * @sr1: byte value to be written to the Status Register.
726 *
727 * Return: 0 on success, -errno otherwise.
728 */
spi_nor_write_sr1_and_check(struct spi_nor * nor,u8 sr1)729 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
730 {
731 int ret;
732
733 nor->bouncebuf[0] = sr1;
734
735 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
736 if (ret)
737 return ret;
738
739 ret = spi_nor_read_sr(nor, nor->bouncebuf);
740 if (ret)
741 return ret;
742
743 if (nor->bouncebuf[0] != sr1) {
744 dev_dbg(nor->dev, "SR1: read back test failed\n");
745 return -EIO;
746 }
747
748 return 0;
749 }
750
751 /**
752 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
753 * Status Register 2 in one shot. Ensure that the byte written in the Status
754 * Register 1 match the received value, and that the 16-bit Write did not
755 * affect what was already in the Status Register 2.
756 * @nor: pointer to a 'struct spi_nor'.
757 * @sr1: byte value to be written to the Status Register 1.
758 *
759 * Return: 0 on success, -errno otherwise.
760 */
spi_nor_write_16bit_sr_and_check(struct spi_nor * nor,u8 sr1)761 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
762 {
763 int ret;
764 u8 *sr_cr = nor->bouncebuf;
765 u8 cr_written;
766
767 /* Make sure we don't overwrite the contents of Status Register 2. */
768 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
769 ret = spi_nor_read_cr(nor, &sr_cr[1]);
770 if (ret)
771 return ret;
772 } else if (nor->params->quad_enable) {
773 /*
774 * If the Status Register 2 Read command (35h) is not
775 * supported, we should at least be sure we don't
776 * change the value of the SR2 Quad Enable bit.
777 *
778 * We can safely assume that when the Quad Enable method is
779 * set, the value of the QE bit is one, as a consequence of the
780 * nor->params->quad_enable() call.
781 *
782 * We can safely assume that the Quad Enable bit is present in
783 * the Status Register 2 at BIT(1). According to the JESD216
784 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
785 * Write Status (01h) command is available just for the cases
786 * in which the QE bit is described in SR2 at BIT(1).
787 */
788 sr_cr[1] = SR2_QUAD_EN_BIT1;
789 } else {
790 sr_cr[1] = 0;
791 }
792
793 sr_cr[0] = sr1;
794
795 ret = spi_nor_write_sr(nor, sr_cr, 2);
796 if (ret)
797 return ret;
798
799 ret = spi_nor_read_sr(nor, sr_cr);
800 if (ret)
801 return ret;
802
803 if (sr1 != sr_cr[0]) {
804 dev_dbg(nor->dev, "SR: Read back test failed\n");
805 return -EIO;
806 }
807
808 if (nor->flags & SNOR_F_NO_READ_CR)
809 return 0;
810
811 cr_written = sr_cr[1];
812
813 ret = spi_nor_read_cr(nor, &sr_cr[1]);
814 if (ret)
815 return ret;
816
817 if (cr_written != sr_cr[1]) {
818 dev_dbg(nor->dev, "CR: read back test failed\n");
819 return -EIO;
820 }
821
822 return 0;
823 }
824
825 /**
826 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
827 * Configuration Register in one shot. Ensure that the byte written in the
828 * Configuration Register match the received value, and that the 16-bit Write
829 * did not affect what was already in the Status Register 1.
830 * @nor: pointer to a 'struct spi_nor'.
831 * @cr: byte value to be written to the Configuration Register.
832 *
833 * Return: 0 on success, -errno otherwise.
834 */
spi_nor_write_16bit_cr_and_check(struct spi_nor * nor,u8 cr)835 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
836 {
837 int ret;
838 u8 *sr_cr = nor->bouncebuf;
839 u8 sr_written;
840
841 /* Keep the current value of the Status Register 1. */
842 ret = spi_nor_read_sr(nor, sr_cr);
843 if (ret)
844 return ret;
845
846 sr_cr[1] = cr;
847
848 ret = spi_nor_write_sr(nor, sr_cr, 2);
849 if (ret)
850 return ret;
851
852 sr_written = sr_cr[0];
853
854 ret = spi_nor_read_sr(nor, sr_cr);
855 if (ret)
856 return ret;
857
858 if (sr_written != sr_cr[0]) {
859 dev_dbg(nor->dev, "SR: Read back test failed\n");
860 return -EIO;
861 }
862
863 if (nor->flags & SNOR_F_NO_READ_CR)
864 return 0;
865
866 ret = spi_nor_read_cr(nor, &sr_cr[1]);
867 if (ret)
868 return ret;
869
870 if (cr != sr_cr[1]) {
871 dev_dbg(nor->dev, "CR: read back test failed\n");
872 return -EIO;
873 }
874
875 return 0;
876 }
877
878 /**
879 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
880 * the byte written match the received value without affecting other bits in the
881 * Status Register 1 and 2.
882 * @nor: pointer to a 'struct spi_nor'.
883 * @sr1: byte value to be written to the Status Register.
884 *
885 * Return: 0 on success, -errno otherwise.
886 */
spi_nor_write_sr_and_check(struct spi_nor * nor,u8 sr1)887 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
888 {
889 if (nor->flags & SNOR_F_HAS_16BIT_SR)
890 return spi_nor_write_16bit_sr_and_check(nor, sr1);
891
892 return spi_nor_write_sr1_and_check(nor, sr1);
893 }
894
895 /**
896 * spi_nor_write_sr2() - Write the Status Register 2 using the
897 * SPINOR_OP_WRSR2 (3eh) command.
898 * @nor: pointer to 'struct spi_nor'.
899 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
900 *
901 * Return: 0 on success, -errno otherwise.
902 */
spi_nor_write_sr2(struct spi_nor * nor,const u8 * sr2)903 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
904 {
905 int ret;
906
907 ret = spi_nor_write_enable(nor);
908 if (ret)
909 return ret;
910
911 if (nor->spimem) {
912 struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
913
914 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
915
916 ret = spi_mem_exec_op(nor->spimem, &op);
917 } else {
918 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
919 sr2, 1);
920 }
921
922 if (ret) {
923 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
924 return ret;
925 }
926
927 return spi_nor_wait_till_ready(nor);
928 }
929
930 /**
931 * spi_nor_read_sr2() - Read the Status Register 2 using the
932 * SPINOR_OP_RDSR2 (3fh) command.
933 * @nor: pointer to 'struct spi_nor'.
934 * @sr2: pointer to DMA-able buffer where the value of the
935 * Status Register 2 will be written.
936 *
937 * Return: 0 on success, -errno otherwise.
938 */
spi_nor_read_sr2(struct spi_nor * nor,u8 * sr2)939 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
940 {
941 int ret;
942
943 if (nor->spimem) {
944 struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
945
946 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
947
948 ret = spi_mem_exec_op(nor->spimem, &op);
949 } else {
950 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
951 1);
952 }
953
954 if (ret)
955 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
956
957 return ret;
958 }
959
960 /**
961 * spi_nor_erase_chip() - Erase the entire flash memory.
962 * @nor: pointer to 'struct spi_nor'.
963 *
964 * Return: 0 on success, -errno otherwise.
965 */
spi_nor_erase_chip(struct spi_nor * nor)966 static int spi_nor_erase_chip(struct spi_nor *nor)
967 {
968 int ret;
969
970 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
971
972 if (nor->spimem) {
973 struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
974
975 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
976
977 ret = spi_mem_exec_op(nor->spimem, &op);
978 } else {
979 ret = spi_nor_controller_ops_write_reg(nor,
980 SPINOR_OP_CHIP_ERASE,
981 NULL, 0);
982 }
983
984 if (ret)
985 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
986
987 return ret;
988 }
989
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)990 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
991 {
992 size_t i;
993
994 for (i = 0; i < size; i++)
995 if (table[i][0] == opcode)
996 return table[i][1];
997
998 /* No conversion found, keep input op code. */
999 return opcode;
1000 }
1001
spi_nor_convert_3to4_read(u8 opcode)1002 u8 spi_nor_convert_3to4_read(u8 opcode)
1003 {
1004 static const u8 spi_nor_3to4_read[][2] = {
1005 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1006 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1007 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1008 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1009 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1010 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1011 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1012 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1013
1014 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1015 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1016 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1017 };
1018
1019 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1020 ARRAY_SIZE(spi_nor_3to4_read));
1021 }
1022
spi_nor_convert_3to4_program(u8 opcode)1023 static u8 spi_nor_convert_3to4_program(u8 opcode)
1024 {
1025 static const u8 spi_nor_3to4_program[][2] = {
1026 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1027 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1028 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1029 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1030 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1031 };
1032
1033 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1034 ARRAY_SIZE(spi_nor_3to4_program));
1035 }
1036
spi_nor_convert_3to4_erase(u8 opcode)1037 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1038 {
1039 static const u8 spi_nor_3to4_erase[][2] = {
1040 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1041 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1042 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1043 };
1044
1045 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1046 ARRAY_SIZE(spi_nor_3to4_erase));
1047 }
1048
spi_nor_has_uniform_erase(const struct spi_nor * nor)1049 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1050 {
1051 return !!nor->params->erase_map.uniform_erase_type;
1052 }
1053
spi_nor_set_4byte_opcodes(struct spi_nor * nor)1054 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1055 {
1056 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1057 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1058 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1059
1060 if (!spi_nor_has_uniform_erase(nor)) {
1061 struct spi_nor_erase_map *map = &nor->params->erase_map;
1062 struct spi_nor_erase_type *erase;
1063 int i;
1064
1065 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1066 erase = &map->erase_type[i];
1067 erase->opcode =
1068 spi_nor_convert_3to4_erase(erase->opcode);
1069 }
1070 }
1071 }
1072
spi_nor_lock_and_prep(struct spi_nor * nor)1073 int spi_nor_lock_and_prep(struct spi_nor *nor)
1074 {
1075 int ret = 0;
1076
1077 mutex_lock(&nor->lock);
1078
1079 if (nor->controller_ops && nor->controller_ops->prepare) {
1080 ret = nor->controller_ops->prepare(nor);
1081 if (ret) {
1082 mutex_unlock(&nor->lock);
1083 return ret;
1084 }
1085 }
1086 return ret;
1087 }
1088
spi_nor_unlock_and_unprep(struct spi_nor * nor)1089 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1090 {
1091 if (nor->controller_ops && nor->controller_ops->unprepare)
1092 nor->controller_ops->unprepare(nor);
1093 mutex_unlock(&nor->lock);
1094 }
1095
spi_nor_convert_addr(struct spi_nor * nor,loff_t addr)1096 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1097 {
1098 if (!nor->params->convert_addr)
1099 return addr;
1100
1101 return nor->params->convert_addr(nor, addr);
1102 }
1103
1104 /*
1105 * Initiate the erasure of a single sector
1106 */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)1107 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1108 {
1109 int i;
1110
1111 addr = spi_nor_convert_addr(nor, addr);
1112
1113 if (nor->spimem) {
1114 struct spi_mem_op op =
1115 SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1116 nor->addr_nbytes, addr);
1117
1118 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1119
1120 return spi_mem_exec_op(nor->spimem, &op);
1121 } else if (nor->controller_ops->erase) {
1122 return spi_nor_controller_ops_erase(nor, addr);
1123 }
1124
1125 /*
1126 * Default implementation, if driver doesn't have a specialized HW
1127 * control
1128 */
1129 for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1130 nor->bouncebuf[i] = addr & 0xff;
1131 addr >>= 8;
1132 }
1133
1134 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1135 nor->bouncebuf, nor->addr_nbytes);
1136 }
1137
1138 /**
1139 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1140 * @erase: pointer to a structure that describes a SPI NOR erase type
1141 * @dividend: dividend value
1142 * @remainder: pointer to u32 remainder (will be updated)
1143 *
1144 * Return: the result of the division
1145 */
spi_nor_div_by_erase_size(const struct spi_nor_erase_type * erase,u64 dividend,u32 * remainder)1146 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1147 u64 dividend, u32 *remainder)
1148 {
1149 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1150 *remainder = (u32)dividend & erase->size_mask;
1151 return dividend >> erase->size_shift;
1152 }
1153
1154 /**
1155 * spi_nor_find_best_erase_type() - find the best erase type for the given
1156 * offset in the serial flash memory and the
1157 * number of bytes to erase. The region in
1158 * which the address fits is expected to be
1159 * provided.
1160 * @map: the erase map of the SPI NOR
1161 * @region: pointer to a structure that describes a SPI NOR erase region
1162 * @addr: offset in the serial flash memory
1163 * @len: number of bytes to erase
1164 *
1165 * Return: a pointer to the best fitted erase type, NULL otherwise.
1166 */
1167 static const struct spi_nor_erase_type *
spi_nor_find_best_erase_type(const struct spi_nor_erase_map * map,const struct spi_nor_erase_region * region,u64 addr,u32 len)1168 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1169 const struct spi_nor_erase_region *region,
1170 u64 addr, u32 len)
1171 {
1172 const struct spi_nor_erase_type *erase;
1173 u32 rem;
1174 int i;
1175 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1176
1177 /*
1178 * Erase types are ordered by size, with the smallest erase type at
1179 * index 0.
1180 */
1181 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1182 /* Does the erase region support the tested erase type? */
1183 if (!(erase_mask & BIT(i)))
1184 continue;
1185
1186 erase = &map->erase_type[i];
1187 if (!erase->size)
1188 continue;
1189
1190 /* Alignment is not mandatory for overlaid regions */
1191 if (region->offset & SNOR_OVERLAID_REGION &&
1192 region->size <= len)
1193 return erase;
1194
1195 /* Don't erase more than what the user has asked for. */
1196 if (erase->size > len)
1197 continue;
1198
1199 spi_nor_div_by_erase_size(erase, addr, &rem);
1200 if (!rem)
1201 return erase;
1202 }
1203
1204 return NULL;
1205 }
1206
spi_nor_region_is_last(const struct spi_nor_erase_region * region)1207 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1208 {
1209 return region->offset & SNOR_LAST_REGION;
1210 }
1211
spi_nor_region_end(const struct spi_nor_erase_region * region)1212 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1213 {
1214 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1215 }
1216
1217 /**
1218 * spi_nor_region_next() - get the next spi nor region
1219 * @region: pointer to a structure that describes a SPI NOR erase region
1220 *
1221 * Return: the next spi nor region or NULL if last region.
1222 */
1223 struct spi_nor_erase_region *
spi_nor_region_next(struct spi_nor_erase_region * region)1224 spi_nor_region_next(struct spi_nor_erase_region *region)
1225 {
1226 if (spi_nor_region_is_last(region))
1227 return NULL;
1228 region++;
1229 return region;
1230 }
1231
1232 /**
1233 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1234 * which the offset fits
1235 * @map: the erase map of the SPI NOR
1236 * @addr: offset in the serial flash memory
1237 *
1238 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1239 * otherwise.
1240 */
1241 static struct spi_nor_erase_region *
spi_nor_find_erase_region(const struct spi_nor_erase_map * map,u64 addr)1242 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1243 {
1244 struct spi_nor_erase_region *region = map->regions;
1245 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1246 u64 region_end = region_start + region->size;
1247
1248 while (addr < region_start || addr >= region_end) {
1249 region = spi_nor_region_next(region);
1250 if (!region)
1251 return ERR_PTR(-EINVAL);
1252
1253 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1254 region_end = region_start + region->size;
1255 }
1256
1257 return region;
1258 }
1259
1260 /**
1261 * spi_nor_init_erase_cmd() - initialize an erase command
1262 * @region: pointer to a structure that describes a SPI NOR erase region
1263 * @erase: pointer to a structure that describes a SPI NOR erase type
1264 *
1265 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1266 * otherwise.
1267 */
1268 static struct spi_nor_erase_command *
spi_nor_init_erase_cmd(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase)1269 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1270 const struct spi_nor_erase_type *erase)
1271 {
1272 struct spi_nor_erase_command *cmd;
1273
1274 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1275 if (!cmd)
1276 return ERR_PTR(-ENOMEM);
1277
1278 INIT_LIST_HEAD(&cmd->list);
1279 cmd->opcode = erase->opcode;
1280 cmd->count = 1;
1281
1282 if (region->offset & SNOR_OVERLAID_REGION)
1283 cmd->size = region->size;
1284 else
1285 cmd->size = erase->size;
1286
1287 return cmd;
1288 }
1289
1290 /**
1291 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1292 * @erase_list: list of erase commands
1293 */
spi_nor_destroy_erase_cmd_list(struct list_head * erase_list)1294 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1295 {
1296 struct spi_nor_erase_command *cmd, *next;
1297
1298 list_for_each_entry_safe(cmd, next, erase_list, list) {
1299 list_del(&cmd->list);
1300 kfree(cmd);
1301 }
1302 }
1303
1304 /**
1305 * spi_nor_init_erase_cmd_list() - initialize erase command list
1306 * @nor: pointer to a 'struct spi_nor'
1307 * @erase_list: list of erase commands to be executed once we validate that the
1308 * erase can be performed
1309 * @addr: offset in the serial flash memory
1310 * @len: number of bytes to erase
1311 *
1312 * Builds the list of best fitted erase commands and verifies if the erase can
1313 * be performed.
1314 *
1315 * Return: 0 on success, -errno otherwise.
1316 */
spi_nor_init_erase_cmd_list(struct spi_nor * nor,struct list_head * erase_list,u64 addr,u32 len)1317 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1318 struct list_head *erase_list,
1319 u64 addr, u32 len)
1320 {
1321 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1322 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1323 struct spi_nor_erase_region *region;
1324 struct spi_nor_erase_command *cmd = NULL;
1325 u64 region_end;
1326 int ret = -EINVAL;
1327
1328 region = spi_nor_find_erase_region(map, addr);
1329 if (IS_ERR(region))
1330 return PTR_ERR(region);
1331
1332 region_end = spi_nor_region_end(region);
1333
1334 while (len) {
1335 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1336 if (!erase)
1337 goto destroy_erase_cmd_list;
1338
1339 if (prev_erase != erase ||
1340 erase->size != cmd->size ||
1341 region->offset & SNOR_OVERLAID_REGION) {
1342 cmd = spi_nor_init_erase_cmd(region, erase);
1343 if (IS_ERR(cmd)) {
1344 ret = PTR_ERR(cmd);
1345 goto destroy_erase_cmd_list;
1346 }
1347
1348 list_add_tail(&cmd->list, erase_list);
1349 } else {
1350 cmd->count++;
1351 }
1352
1353 addr += cmd->size;
1354 len -= cmd->size;
1355
1356 if (len && addr >= region_end) {
1357 region = spi_nor_region_next(region);
1358 if (!region)
1359 goto destroy_erase_cmd_list;
1360 region_end = spi_nor_region_end(region);
1361 }
1362
1363 prev_erase = erase;
1364 }
1365
1366 return 0;
1367
1368 destroy_erase_cmd_list:
1369 spi_nor_destroy_erase_cmd_list(erase_list);
1370 return ret;
1371 }
1372
1373 /**
1374 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1375 * @nor: pointer to a 'struct spi_nor'
1376 * @addr: offset in the serial flash memory
1377 * @len: number of bytes to erase
1378 *
1379 * Build a list of best fitted erase commands and execute it once we validate
1380 * that the erase can be performed.
1381 *
1382 * Return: 0 on success, -errno otherwise.
1383 */
spi_nor_erase_multi_sectors(struct spi_nor * nor,u64 addr,u32 len)1384 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1385 {
1386 LIST_HEAD(erase_list);
1387 struct spi_nor_erase_command *cmd, *next;
1388 int ret;
1389
1390 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1391 if (ret)
1392 return ret;
1393
1394 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1395 nor->erase_opcode = cmd->opcode;
1396 while (cmd->count) {
1397 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1398 cmd->size, cmd->opcode, cmd->count);
1399
1400 ret = spi_nor_write_enable(nor);
1401 if (ret)
1402 goto destroy_erase_cmd_list;
1403
1404 ret = spi_nor_erase_sector(nor, addr);
1405 if (ret)
1406 goto destroy_erase_cmd_list;
1407
1408 ret = spi_nor_wait_till_ready(nor);
1409 if (ret)
1410 goto destroy_erase_cmd_list;
1411
1412 addr += cmd->size;
1413 cmd->count--;
1414 }
1415 list_del(&cmd->list);
1416 kfree(cmd);
1417 }
1418
1419 return 0;
1420
1421 destroy_erase_cmd_list:
1422 spi_nor_destroy_erase_cmd_list(&erase_list);
1423 return ret;
1424 }
1425
1426 /*
1427 * Erase an address range on the nor chip. The address range may extend
1428 * one or more erase sectors. Return an error if there is a problem erasing.
1429 */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)1430 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1431 {
1432 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1433 u32 addr, len;
1434 uint32_t rem;
1435 int ret;
1436
1437 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1438 (long long)instr->len);
1439
1440 if (spi_nor_has_uniform_erase(nor)) {
1441 div_u64_rem(instr->len, mtd->erasesize, &rem);
1442 if (rem)
1443 return -EINVAL;
1444 }
1445
1446 addr = instr->addr;
1447 len = instr->len;
1448
1449 ret = spi_nor_lock_and_prep(nor);
1450 if (ret)
1451 return ret;
1452
1453 /* whole-chip erase? */
1454 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1455 unsigned long timeout;
1456
1457 ret = spi_nor_write_enable(nor);
1458 if (ret)
1459 goto erase_err;
1460
1461 ret = spi_nor_erase_chip(nor);
1462 if (ret)
1463 goto erase_err;
1464
1465 /*
1466 * Scale the timeout linearly with the size of the flash, with
1467 * a minimum calibrated to an old 2MB flash. We could try to
1468 * pull these from CFI/SFDP, but these values should be good
1469 * enough for now.
1470 */
1471 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1472 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1473 (unsigned long)(mtd->size / SZ_2M));
1474 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1475 if (ret)
1476 goto erase_err;
1477
1478 /* REVISIT in some cases we could speed up erasing large regions
1479 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1480 * to use "small sector erase", but that's not always optimal.
1481 */
1482
1483 /* "sector"-at-a-time erase */
1484 } else if (spi_nor_has_uniform_erase(nor)) {
1485 while (len) {
1486 ret = spi_nor_write_enable(nor);
1487 if (ret)
1488 goto erase_err;
1489
1490 ret = spi_nor_erase_sector(nor, addr);
1491 if (ret)
1492 goto erase_err;
1493
1494 ret = spi_nor_wait_till_ready(nor);
1495 if (ret)
1496 goto erase_err;
1497
1498 addr += mtd->erasesize;
1499 len -= mtd->erasesize;
1500 }
1501
1502 /* erase multiple sectors */
1503 } else {
1504 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1505 if (ret)
1506 goto erase_err;
1507 }
1508
1509 ret = spi_nor_write_disable(nor);
1510
1511 erase_err:
1512 spi_nor_unlock_and_unprep(nor);
1513
1514 return ret;
1515 }
1516
1517 /**
1518 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1519 * Register 1.
1520 * @nor: pointer to a 'struct spi_nor'
1521 *
1522 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1523 *
1524 * Return: 0 on success, -errno otherwise.
1525 */
spi_nor_sr1_bit6_quad_enable(struct spi_nor * nor)1526 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1527 {
1528 int ret;
1529
1530 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1531 if (ret)
1532 return ret;
1533
1534 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1535 return 0;
1536
1537 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1538
1539 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1540 }
1541
1542 /**
1543 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1544 * Register 2.
1545 * @nor: pointer to a 'struct spi_nor'.
1546 *
1547 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1548 *
1549 * Return: 0 on success, -errno otherwise.
1550 */
spi_nor_sr2_bit1_quad_enable(struct spi_nor * nor)1551 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1552 {
1553 int ret;
1554
1555 if (nor->flags & SNOR_F_NO_READ_CR)
1556 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1557
1558 ret = spi_nor_read_cr(nor, nor->bouncebuf);
1559 if (ret)
1560 return ret;
1561
1562 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1563 return 0;
1564
1565 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1566
1567 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1568 }
1569
1570 /**
1571 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1572 * @nor: pointer to a 'struct spi_nor'
1573 *
1574 * Set the Quad Enable (QE) bit in the Status Register 2.
1575 *
1576 * This is one of the procedures to set the QE bit described in the SFDP
1577 * (JESD216 rev B) specification but no manufacturer using this procedure has
1578 * been identified yet, hence the name of the function.
1579 *
1580 * Return: 0 on success, -errno otherwise.
1581 */
spi_nor_sr2_bit7_quad_enable(struct spi_nor * nor)1582 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1583 {
1584 u8 *sr2 = nor->bouncebuf;
1585 int ret;
1586 u8 sr2_written;
1587
1588 /* Check current Quad Enable bit value. */
1589 ret = spi_nor_read_sr2(nor, sr2);
1590 if (ret)
1591 return ret;
1592 if (*sr2 & SR2_QUAD_EN_BIT7)
1593 return 0;
1594
1595 /* Update the Quad Enable bit. */
1596 *sr2 |= SR2_QUAD_EN_BIT7;
1597
1598 ret = spi_nor_write_sr2(nor, sr2);
1599 if (ret)
1600 return ret;
1601
1602 sr2_written = *sr2;
1603
1604 /* Read back and check it. */
1605 ret = spi_nor_read_sr2(nor, sr2);
1606 if (ret)
1607 return ret;
1608
1609 if (*sr2 != sr2_written) {
1610 dev_dbg(nor->dev, "SR2: Read back test failed\n");
1611 return -EIO;
1612 }
1613
1614 return 0;
1615 }
1616
1617 static const struct spi_nor_manufacturer *manufacturers[] = {
1618 &spi_nor_atmel,
1619 &spi_nor_catalyst,
1620 &spi_nor_eon,
1621 &spi_nor_esmt,
1622 &spi_nor_everspin,
1623 &spi_nor_fujitsu,
1624 &spi_nor_gigadevice,
1625 &spi_nor_intel,
1626 &spi_nor_issi,
1627 &spi_nor_macronix,
1628 &spi_nor_micron,
1629 &spi_nor_st,
1630 &spi_nor_spansion,
1631 &spi_nor_sst,
1632 &spi_nor_winbond,
1633 &spi_nor_xilinx,
1634 &spi_nor_xmc,
1635 };
1636
1637 static const struct flash_info spi_nor_generic_flash = {
1638 .name = "spi-nor-generic",
1639 /*
1640 * JESD216 rev A doesn't specify the page size, therefore we need a
1641 * sane default.
1642 */
1643 .page_size = 256,
1644 .parse_sfdp = true,
1645 };
1646
spi_nor_match_id(struct spi_nor * nor,const u8 * id)1647 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
1648 const u8 *id)
1649 {
1650 const struct flash_info *part;
1651 unsigned int i, j;
1652
1653 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1654 for (j = 0; j < manufacturers[i]->nparts; j++) {
1655 part = &manufacturers[i]->parts[j];
1656 if (part->id_len &&
1657 !memcmp(part->id, id, part->id_len)) {
1658 nor->manufacturer = manufacturers[i];
1659 return part;
1660 }
1661 }
1662 }
1663
1664 return NULL;
1665 }
1666
spi_nor_detect(struct spi_nor * nor)1667 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
1668 {
1669 const struct flash_info *info;
1670 u8 *id = nor->bouncebuf;
1671 int ret;
1672
1673 ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
1674 if (ret) {
1675 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
1676 return ERR_PTR(ret);
1677 }
1678
1679 /* Cache the complete flash ID. */
1680 nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
1681 if (!nor->id)
1682 return ERR_PTR(-ENOMEM);
1683
1684 info = spi_nor_match_id(nor, id);
1685
1686 /* Fallback to a generic flash described only by its SFDP data. */
1687 if (!info) {
1688 ret = spi_nor_check_sfdp_signature(nor);
1689 if (!ret)
1690 info = &spi_nor_generic_flash;
1691 }
1692
1693 if (!info) {
1694 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
1695 SPI_NOR_MAX_ID_LEN, id);
1696 return ERR_PTR(-ENODEV);
1697 }
1698 return info;
1699 }
1700
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1701 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
1702 size_t *retlen, u_char *buf)
1703 {
1704 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1705 ssize_t ret;
1706
1707 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
1708
1709 ret = spi_nor_lock_and_prep(nor);
1710 if (ret)
1711 return ret;
1712
1713 while (len) {
1714 loff_t addr = from;
1715
1716 addr = spi_nor_convert_addr(nor, addr);
1717
1718 ret = spi_nor_read_data(nor, addr, len, buf);
1719 if (ret == 0) {
1720 /* We shouldn't see 0-length reads */
1721 ret = -EIO;
1722 goto read_err;
1723 }
1724 if (ret < 0)
1725 goto read_err;
1726
1727 WARN_ON(ret > len);
1728 *retlen += ret;
1729 buf += ret;
1730 from += ret;
1731 len -= ret;
1732 }
1733 ret = 0;
1734
1735 read_err:
1736 spi_nor_unlock_and_unprep(nor);
1737 return ret;
1738 }
1739
1740 /*
1741 * Write an address range to the nor chip. Data must be written in
1742 * FLASH_PAGESIZE chunks. The address range may be any size provided
1743 * it is within the physical boundaries.
1744 */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1745 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1746 size_t *retlen, const u_char *buf)
1747 {
1748 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1749 size_t page_offset, page_remain, i;
1750 ssize_t ret;
1751 u32 page_size = nor->params->page_size;
1752
1753 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1754
1755 ret = spi_nor_lock_and_prep(nor);
1756 if (ret)
1757 return ret;
1758
1759 for (i = 0; i < len; ) {
1760 ssize_t written;
1761 loff_t addr = to + i;
1762
1763 /*
1764 * If page_size is a power of two, the offset can be quickly
1765 * calculated with an AND operation. On the other cases we
1766 * need to do a modulus operation (more expensive).
1767 */
1768 if (is_power_of_2(page_size)) {
1769 page_offset = addr & (page_size - 1);
1770 } else {
1771 uint64_t aux = addr;
1772
1773 page_offset = do_div(aux, page_size);
1774 }
1775 /* the size of data remaining on the first page */
1776 page_remain = min_t(size_t, page_size - page_offset, len - i);
1777
1778 addr = spi_nor_convert_addr(nor, addr);
1779
1780 ret = spi_nor_write_enable(nor);
1781 if (ret)
1782 goto write_err;
1783
1784 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
1785 if (ret < 0)
1786 goto write_err;
1787 written = ret;
1788
1789 ret = spi_nor_wait_till_ready(nor);
1790 if (ret)
1791 goto write_err;
1792 *retlen += written;
1793 i += written;
1794 }
1795
1796 write_err:
1797 spi_nor_unlock_and_unprep(nor);
1798 return ret;
1799 }
1800
spi_nor_check(struct spi_nor * nor)1801 static int spi_nor_check(struct spi_nor *nor)
1802 {
1803 if (!nor->dev ||
1804 (!nor->spimem && !nor->controller_ops) ||
1805 (!nor->spimem && nor->controller_ops &&
1806 (!nor->controller_ops->read ||
1807 !nor->controller_ops->write ||
1808 !nor->controller_ops->read_reg ||
1809 !nor->controller_ops->write_reg))) {
1810 pr_err("spi-nor: please fill all the necessary fields!\n");
1811 return -EINVAL;
1812 }
1813
1814 if (nor->spimem && nor->controller_ops) {
1815 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
1816 return -EINVAL;
1817 }
1818
1819 return 0;
1820 }
1821
1822 void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)1823 spi_nor_set_read_settings(struct spi_nor_read_command *read,
1824 u8 num_mode_clocks,
1825 u8 num_wait_states,
1826 u8 opcode,
1827 enum spi_nor_protocol proto)
1828 {
1829 read->num_mode_clocks = num_mode_clocks;
1830 read->num_wait_states = num_wait_states;
1831 read->opcode = opcode;
1832 read->proto = proto;
1833 }
1834
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)1835 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
1836 enum spi_nor_protocol proto)
1837 {
1838 pp->opcode = opcode;
1839 pp->proto = proto;
1840 }
1841
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)1842 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
1843 {
1844 size_t i;
1845
1846 for (i = 0; i < size; i++)
1847 if (table[i][0] == (int)hwcaps)
1848 return table[i][1];
1849
1850 return -EINVAL;
1851 }
1852
spi_nor_hwcaps_read2cmd(u32 hwcaps)1853 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
1854 {
1855 static const int hwcaps_read2cmd[][2] = {
1856 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
1857 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
1858 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
1859 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
1860 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
1861 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
1862 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
1863 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
1864 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
1865 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
1866 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
1867 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
1868 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
1869 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
1870 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
1871 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
1872 };
1873
1874 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
1875 ARRAY_SIZE(hwcaps_read2cmd));
1876 }
1877
spi_nor_hwcaps_pp2cmd(u32 hwcaps)1878 int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
1879 {
1880 static const int hwcaps_pp2cmd[][2] = {
1881 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
1882 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
1883 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
1884 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
1885 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
1886 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
1887 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
1888 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
1889 };
1890
1891 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
1892 ARRAY_SIZE(hwcaps_pp2cmd));
1893 }
1894
1895 /**
1896 * spi_nor_spimem_check_op - check if the operation is supported
1897 * by controller
1898 *@nor: pointer to a 'struct spi_nor'
1899 *@op: pointer to op template to be checked
1900 *
1901 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
1902 */
spi_nor_spimem_check_op(struct spi_nor * nor,struct spi_mem_op * op)1903 static int spi_nor_spimem_check_op(struct spi_nor *nor,
1904 struct spi_mem_op *op)
1905 {
1906 /*
1907 * First test with 4 address bytes. The opcode itself might
1908 * be a 3B addressing opcode but we don't care, because
1909 * SPI controller implementation should not check the opcode,
1910 * but just the sequence.
1911 */
1912 op->addr.nbytes = 4;
1913 if (!spi_mem_supports_op(nor->spimem, op)) {
1914 if (nor->params->size > SZ_16M)
1915 return -EOPNOTSUPP;
1916
1917 /* If flash size <= 16MB, 3 address bytes are sufficient */
1918 op->addr.nbytes = 3;
1919 if (!spi_mem_supports_op(nor->spimem, op))
1920 return -EOPNOTSUPP;
1921 }
1922
1923 return 0;
1924 }
1925
1926 /**
1927 * spi_nor_spimem_check_readop - check if the read op is supported
1928 * by controller
1929 *@nor: pointer to a 'struct spi_nor'
1930 *@read: pointer to op template to be checked
1931 *
1932 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
1933 */
spi_nor_spimem_check_readop(struct spi_nor * nor,const struct spi_nor_read_command * read)1934 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
1935 const struct spi_nor_read_command *read)
1936 {
1937 struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
1938
1939 spi_nor_spimem_setup_op(nor, &op, read->proto);
1940
1941 /* convert the dummy cycles to the number of bytes */
1942 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
1943 op.dummy.buswidth / 8;
1944 if (spi_nor_protocol_is_dtr(nor->read_proto))
1945 op.dummy.nbytes *= 2;
1946
1947 return spi_nor_spimem_check_op(nor, &op);
1948 }
1949
1950 /**
1951 * spi_nor_spimem_check_pp - check if the page program op is supported
1952 * by controller
1953 *@nor: pointer to a 'struct spi_nor'
1954 *@pp: pointer to op template to be checked
1955 *
1956 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
1957 */
spi_nor_spimem_check_pp(struct spi_nor * nor,const struct spi_nor_pp_command * pp)1958 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
1959 const struct spi_nor_pp_command *pp)
1960 {
1961 struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
1962
1963 spi_nor_spimem_setup_op(nor, &op, pp->proto);
1964
1965 return spi_nor_spimem_check_op(nor, &op);
1966 }
1967
1968 /**
1969 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
1970 * based on SPI controller capabilities
1971 * @nor: pointer to a 'struct spi_nor'
1972 * @hwcaps: pointer to resulting capabilities after adjusting
1973 * according to controller and flash's capability
1974 */
1975 static void
spi_nor_spimem_adjust_hwcaps(struct spi_nor * nor,u32 * hwcaps)1976 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
1977 {
1978 struct spi_nor_flash_parameter *params = nor->params;
1979 unsigned int cap;
1980
1981 /* X-X-X modes are not supported yet, mask them all. */
1982 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
1983
1984 /*
1985 * If the reset line is broken, we do not want to enter a stateful
1986 * mode.
1987 */
1988 if (nor->flags & SNOR_F_BROKEN_RESET)
1989 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
1990
1991 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
1992 int rdidx, ppidx;
1993
1994 if (!(*hwcaps & BIT(cap)))
1995 continue;
1996
1997 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
1998 if (rdidx >= 0 &&
1999 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
2000 *hwcaps &= ~BIT(cap);
2001
2002 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2003 if (ppidx < 0)
2004 continue;
2005
2006 if (spi_nor_spimem_check_pp(nor,
2007 ¶ms->page_programs[ppidx]))
2008 *hwcaps &= ~BIT(cap);
2009 }
2010 }
2011
2012 /**
2013 * spi_nor_set_erase_type() - set a SPI NOR erase type
2014 * @erase: pointer to a structure that describes a SPI NOR erase type
2015 * @size: the size of the sector/block erased by the erase type
2016 * @opcode: the SPI command op code to erase the sector/block
2017 */
spi_nor_set_erase_type(struct spi_nor_erase_type * erase,u32 size,u8 opcode)2018 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2019 u8 opcode)
2020 {
2021 erase->size = size;
2022 erase->opcode = opcode;
2023 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2024 erase->size_shift = ffs(erase->size) - 1;
2025 erase->size_mask = (1 << erase->size_shift) - 1;
2026 }
2027
2028 /**
2029 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2030 * @erase: pointer to a structure that describes a SPI NOR erase type
2031 */
spi_nor_mask_erase_type(struct spi_nor_erase_type * erase)2032 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
2033 {
2034 erase->size = 0;
2035 }
2036
2037 /**
2038 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2039 * @map: the erase map of the SPI NOR
2040 * @erase_mask: bitmask encoding erase types that can erase the entire
2041 * flash memory
2042 * @flash_size: the spi nor flash memory size
2043 */
spi_nor_init_uniform_erase_map(struct spi_nor_erase_map * map,u8 erase_mask,u64 flash_size)2044 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2045 u8 erase_mask, u64 flash_size)
2046 {
2047 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2048 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2049 SNOR_LAST_REGION;
2050 map->uniform_region.size = flash_size;
2051 map->regions = &map->uniform_region;
2052 map->uniform_erase_type = erase_mask;
2053 }
2054
spi_nor_post_bfpt_fixups(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,const struct sfdp_bfpt * bfpt)2055 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2056 const struct sfdp_parameter_header *bfpt_header,
2057 const struct sfdp_bfpt *bfpt)
2058 {
2059 int ret;
2060
2061 if (nor->manufacturer && nor->manufacturer->fixups &&
2062 nor->manufacturer->fixups->post_bfpt) {
2063 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2064 bfpt);
2065 if (ret)
2066 return ret;
2067 }
2068
2069 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2070 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2071
2072 return 0;
2073 }
2074
spi_nor_select_read(struct spi_nor * nor,u32 shared_hwcaps)2075 static int spi_nor_select_read(struct spi_nor *nor,
2076 u32 shared_hwcaps)
2077 {
2078 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2079 const struct spi_nor_read_command *read;
2080
2081 if (best_match < 0)
2082 return -EINVAL;
2083
2084 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2085 if (cmd < 0)
2086 return -EINVAL;
2087
2088 read = &nor->params->reads[cmd];
2089 nor->read_opcode = read->opcode;
2090 nor->read_proto = read->proto;
2091
2092 /*
2093 * In the SPI NOR framework, we don't need to make the difference
2094 * between mode clock cycles and wait state clock cycles.
2095 * Indeed, the value of the mode clock cycles is used by a QSPI
2096 * flash memory to know whether it should enter or leave its 0-4-4
2097 * (Continuous Read / XIP) mode.
2098 * eXecution In Place is out of the scope of the mtd sub-system.
2099 * Hence we choose to merge both mode and wait state clock cycles
2100 * into the so called dummy clock cycles.
2101 */
2102 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2103 return 0;
2104 }
2105
spi_nor_select_pp(struct spi_nor * nor,u32 shared_hwcaps)2106 static int spi_nor_select_pp(struct spi_nor *nor,
2107 u32 shared_hwcaps)
2108 {
2109 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2110 const struct spi_nor_pp_command *pp;
2111
2112 if (best_match < 0)
2113 return -EINVAL;
2114
2115 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2116 if (cmd < 0)
2117 return -EINVAL;
2118
2119 pp = &nor->params->page_programs[cmd];
2120 nor->program_opcode = pp->opcode;
2121 nor->write_proto = pp->proto;
2122 return 0;
2123 }
2124
2125 /**
2126 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2127 * @map: the erase map of the SPI NOR
2128 * @wanted_size: the erase type size to search for. Contains the value of
2129 * info->sector_size, the "small sector" size in case
2130 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
2131 * there is no information about the sector size. The
2132 * latter is the case if the flash parameters are parsed
2133 * solely by SFDP, then the largest supported erase type
2134 * is selected.
2135 *
2136 * Once the optimum uniform sector erase command is found, disable all the
2137 * other.
2138 *
2139 * Return: pointer to erase type on success, NULL otherwise.
2140 */
2141 static const struct spi_nor_erase_type *
spi_nor_select_uniform_erase(struct spi_nor_erase_map * map,const u32 wanted_size)2142 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2143 const u32 wanted_size)
2144 {
2145 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2146 int i;
2147 u8 uniform_erase_type = map->uniform_erase_type;
2148
2149 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2150 if (!(uniform_erase_type & BIT(i)))
2151 continue;
2152
2153 tested_erase = &map->erase_type[i];
2154
2155 /* Skip masked erase types. */
2156 if (!tested_erase->size)
2157 continue;
2158
2159 /*
2160 * If the current erase size is the one, stop here:
2161 * we have found the right uniform Sector Erase command.
2162 */
2163 if (tested_erase->size == wanted_size) {
2164 erase = tested_erase;
2165 break;
2166 }
2167
2168 /*
2169 * Otherwise, the current erase size is still a valid candidate.
2170 * Select the biggest valid candidate.
2171 */
2172 if (!erase && tested_erase->size)
2173 erase = tested_erase;
2174 /* keep iterating to find the wanted_size */
2175 }
2176
2177 if (!erase)
2178 return NULL;
2179
2180 /* Disable all other Sector Erase commands. */
2181 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2182 map->uniform_erase_type |= BIT(erase - map->erase_type);
2183 return erase;
2184 }
2185
spi_nor_select_erase(struct spi_nor * nor)2186 static int spi_nor_select_erase(struct spi_nor *nor)
2187 {
2188 struct spi_nor_erase_map *map = &nor->params->erase_map;
2189 const struct spi_nor_erase_type *erase = NULL;
2190 struct mtd_info *mtd = &nor->mtd;
2191 u32 wanted_size = nor->info->sector_size;
2192 int i;
2193
2194 /*
2195 * The previous implementation handling Sector Erase commands assumed
2196 * that the SPI flash memory has an uniform layout then used only one
2197 * of the supported erase sizes for all Sector Erase commands.
2198 * So to be backward compatible, the new implementation also tries to
2199 * manage the SPI flash memory as uniform with a single erase sector
2200 * size, when possible.
2201 */
2202 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2203 /* prefer "small sector" erase if possible */
2204 wanted_size = 4096u;
2205 #endif
2206
2207 if (spi_nor_has_uniform_erase(nor)) {
2208 erase = spi_nor_select_uniform_erase(map, wanted_size);
2209 if (!erase)
2210 return -EINVAL;
2211 nor->erase_opcode = erase->opcode;
2212 mtd->erasesize = erase->size;
2213 return 0;
2214 }
2215
2216 /*
2217 * For non-uniform SPI flash memory, set mtd->erasesize to the
2218 * maximum erase sector size. No need to set nor->erase_opcode.
2219 */
2220 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2221 if (map->erase_type[i].size) {
2222 erase = &map->erase_type[i];
2223 break;
2224 }
2225 }
2226
2227 if (!erase)
2228 return -EINVAL;
2229
2230 mtd->erasesize = erase->size;
2231 return 0;
2232 }
2233
spi_nor_default_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2234 static int spi_nor_default_setup(struct spi_nor *nor,
2235 const struct spi_nor_hwcaps *hwcaps)
2236 {
2237 struct spi_nor_flash_parameter *params = nor->params;
2238 u32 ignored_mask, shared_mask;
2239 int err;
2240
2241 /*
2242 * Keep only the hardware capabilities supported by both the SPI
2243 * controller and the SPI flash memory.
2244 */
2245 shared_mask = hwcaps->mask & params->hwcaps.mask;
2246
2247 if (nor->spimem) {
2248 /*
2249 * When called from spi_nor_probe(), all caps are set and we
2250 * need to discard some of them based on what the SPI
2251 * controller actually supports (using spi_mem_supports_op()).
2252 */
2253 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2254 } else {
2255 /*
2256 * SPI n-n-n protocols are not supported when the SPI
2257 * controller directly implements the spi_nor interface.
2258 * Yet another reason to switch to spi-mem.
2259 */
2260 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2261 if (shared_mask & ignored_mask) {
2262 dev_dbg(nor->dev,
2263 "SPI n-n-n protocols are not supported.\n");
2264 shared_mask &= ~ignored_mask;
2265 }
2266 }
2267
2268 /* Select the (Fast) Read command. */
2269 err = spi_nor_select_read(nor, shared_mask);
2270 if (err) {
2271 dev_dbg(nor->dev,
2272 "can't select read settings supported by both the SPI controller and memory.\n");
2273 return err;
2274 }
2275
2276 /* Select the Page Program command. */
2277 err = spi_nor_select_pp(nor, shared_mask);
2278 if (err) {
2279 dev_dbg(nor->dev,
2280 "can't select write settings supported by both the SPI controller and memory.\n");
2281 return err;
2282 }
2283
2284 /* Select the Sector Erase command. */
2285 err = spi_nor_select_erase(nor);
2286 if (err) {
2287 dev_dbg(nor->dev,
2288 "can't select erase settings supported by both the SPI controller and memory.\n");
2289 return err;
2290 }
2291
2292 return 0;
2293 }
2294
spi_nor_set_addr_nbytes(struct spi_nor * nor)2295 static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2296 {
2297 if (nor->params->addr_nbytes) {
2298 nor->addr_nbytes = nor->params->addr_nbytes;
2299 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2300 /*
2301 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2302 * in this protocol an odd addr_nbytes cannot be used because
2303 * then the address phase would only span a cycle and a half.
2304 * Half a cycle would be left over. We would then have to start
2305 * the dummy phase in the middle of a cycle and so too the data
2306 * phase, and we will end the transaction with half a cycle left
2307 * over.
2308 *
2309 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2310 * avoid this situation.
2311 */
2312 nor->addr_nbytes = 4;
2313 } else if (nor->info->addr_nbytes) {
2314 nor->addr_nbytes = nor->info->addr_nbytes;
2315 } else {
2316 nor->addr_nbytes = 3;
2317 }
2318
2319 if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2320 /* enable 4-byte addressing if the device exceeds 16MiB */
2321 nor->addr_nbytes = 4;
2322 }
2323
2324 if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2325 dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2326 nor->addr_nbytes);
2327 return -EINVAL;
2328 }
2329
2330 /* Set 4byte opcodes when possible. */
2331 if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2332 !(nor->flags & SNOR_F_HAS_4BAIT))
2333 spi_nor_set_4byte_opcodes(nor);
2334
2335 return 0;
2336 }
2337
spi_nor_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2338 static int spi_nor_setup(struct spi_nor *nor,
2339 const struct spi_nor_hwcaps *hwcaps)
2340 {
2341 int ret;
2342
2343 if (nor->params->setup)
2344 ret = nor->params->setup(nor, hwcaps);
2345 else
2346 ret = spi_nor_default_setup(nor, hwcaps);
2347 if (ret)
2348 return ret;
2349
2350 return spi_nor_set_addr_nbytes(nor);
2351 }
2352
2353 /**
2354 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2355 * settings based on MFR register and ->default_init() hook.
2356 * @nor: pointer to a 'struct spi_nor'.
2357 */
spi_nor_manufacturer_init_params(struct spi_nor * nor)2358 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2359 {
2360 if (nor->manufacturer && nor->manufacturer->fixups &&
2361 nor->manufacturer->fixups->default_init)
2362 nor->manufacturer->fixups->default_init(nor);
2363
2364 if (nor->info->fixups && nor->info->fixups->default_init)
2365 nor->info->fixups->default_init(nor);
2366 }
2367
2368 /**
2369 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2370 * settings based on nor->info->sfdp_flags. This method should be called only by
2371 * flashes that do not define SFDP tables. If the flash supports SFDP but the
2372 * information is wrong and the settings from this function can not be retrieved
2373 * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2374 * bits.
2375 * @nor: pointer to a 'struct spi_nor'.
2376 */
spi_nor_no_sfdp_init_params(struct spi_nor * nor)2377 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2378 {
2379 struct spi_nor_flash_parameter *params = nor->params;
2380 struct spi_nor_erase_map *map = ¶ms->erase_map;
2381 const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
2382 u8 i, erase_mask;
2383
2384 if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2385 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2386 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2387 0, 8, SPINOR_OP_READ_1_1_2,
2388 SNOR_PROTO_1_1_2);
2389 }
2390
2391 if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2392 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2393 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2394 0, 8, SPINOR_OP_READ_1_1_4,
2395 SNOR_PROTO_1_1_4);
2396 }
2397
2398 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2399 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2400 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2401 0, 8, SPINOR_OP_READ_1_1_8,
2402 SNOR_PROTO_1_1_8);
2403 }
2404
2405 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2406 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2407 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2408 0, 20, SPINOR_OP_READ_FAST,
2409 SNOR_PROTO_8_8_8_DTR);
2410 }
2411
2412 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2413 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2414 /*
2415 * Since xSPI Page Program opcode is backward compatible with
2416 * Legacy SPI, use Legacy SPI opcode there as well.
2417 */
2418 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2419 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2420 }
2421
2422 /*
2423 * Sector Erase settings. Sort Erase Types in ascending order, with the
2424 * smallest erase size starting at BIT(0).
2425 */
2426 erase_mask = 0;
2427 i = 0;
2428 if (no_sfdp_flags & SECT_4K) {
2429 erase_mask |= BIT(i);
2430 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2431 SPINOR_OP_BE_4K);
2432 i++;
2433 }
2434 erase_mask |= BIT(i);
2435 spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
2436 SPINOR_OP_SE);
2437 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2438 }
2439
2440 /**
2441 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2442 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2443 * @nor: pointer to a 'struct spi_nor'
2444 */
spi_nor_init_flags(struct spi_nor * nor)2445 static void spi_nor_init_flags(struct spi_nor *nor)
2446 {
2447 struct device_node *np = spi_nor_get_flash_node(nor);
2448 const u16 flags = nor->info->flags;
2449
2450 if (of_property_read_bool(np, "broken-flash-reset"))
2451 nor->flags |= SNOR_F_BROKEN_RESET;
2452
2453 if (flags & SPI_NOR_SWP_IS_VOLATILE)
2454 nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2455
2456 if (flags & SPI_NOR_HAS_LOCK)
2457 nor->flags |= SNOR_F_HAS_LOCK;
2458
2459 if (flags & SPI_NOR_HAS_TB) {
2460 nor->flags |= SNOR_F_HAS_SR_TB;
2461 if (flags & SPI_NOR_TB_SR_BIT6)
2462 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2463 }
2464
2465 if (flags & SPI_NOR_4BIT_BP) {
2466 nor->flags |= SNOR_F_HAS_4BIT_BP;
2467 if (flags & SPI_NOR_BP3_SR_BIT6)
2468 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2469 }
2470
2471 if (flags & NO_CHIP_ERASE)
2472 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2473 }
2474
2475 /**
2476 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2477 * be discovered by SFDP for this particular flash because the SFDP table that
2478 * indicates this support is not defined in the flash. In case the table for
2479 * this support is defined but has wrong values, one should instead use a
2480 * post_sfdp() hook to set the SNOR_F equivalent flag.
2481 * @nor: pointer to a 'struct spi_nor'
2482 */
spi_nor_init_fixup_flags(struct spi_nor * nor)2483 static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2484 {
2485 const u8 fixup_flags = nor->info->fixup_flags;
2486
2487 if (fixup_flags & SPI_NOR_4B_OPCODES)
2488 nor->flags |= SNOR_F_4B_OPCODES;
2489
2490 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2491 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2492 }
2493
2494 /**
2495 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2496 * @nor: pointer to a 'struct spi_nor'
2497 *
2498 * Used to initialize flash parameters that are not declared in the JESD216
2499 * SFDP standard, or where SFDP tables are not defined at all.
2500 * Will replace the spi_nor_manufacturer_init_params() method.
2501 */
spi_nor_late_init_params(struct spi_nor * nor)2502 static void spi_nor_late_init_params(struct spi_nor *nor)
2503 {
2504 if (nor->manufacturer && nor->manufacturer->fixups &&
2505 nor->manufacturer->fixups->late_init)
2506 nor->manufacturer->fixups->late_init(nor);
2507
2508 if (nor->info->fixups && nor->info->fixups->late_init)
2509 nor->info->fixups->late_init(nor);
2510
2511 spi_nor_init_flags(nor);
2512 spi_nor_init_fixup_flags(nor);
2513
2514 /*
2515 * NOR protection support. When locking_ops are not provided, we pick
2516 * the default ones.
2517 */
2518 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2519 spi_nor_init_default_locking_ops(nor);
2520 }
2521
2522 /**
2523 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2524 * parameters and settings based on JESD216 SFDP standard.
2525 * @nor: pointer to a 'struct spi_nor'.
2526 *
2527 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2528 * legacy flash parameters and settings will be restored.
2529 */
spi_nor_sfdp_init_params_deprecated(struct spi_nor * nor)2530 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2531 {
2532 struct spi_nor_flash_parameter sfdp_params;
2533
2534 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2535
2536 if (spi_nor_parse_sfdp(nor)) {
2537 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2538 nor->flags &= ~SNOR_F_4B_OPCODES;
2539 }
2540 }
2541
2542 /**
2543 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2544 * parameters and settings.
2545 * @nor: pointer to a 'struct spi_nor'.
2546 *
2547 * The method assumes that flash doesn't support SFDP so it initializes flash
2548 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2549 * when parsing SFDP, if supported.
2550 */
spi_nor_init_params_deprecated(struct spi_nor * nor)2551 static void spi_nor_init_params_deprecated(struct spi_nor *nor)
2552 {
2553 spi_nor_no_sfdp_init_params(nor);
2554
2555 spi_nor_manufacturer_init_params(nor);
2556
2557 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
2558 SPI_NOR_QUAD_READ |
2559 SPI_NOR_OCTAL_READ |
2560 SPI_NOR_OCTAL_DTR_READ))
2561 spi_nor_sfdp_init_params_deprecated(nor);
2562 }
2563
2564 /**
2565 * spi_nor_init_default_params() - Default initialization of flash parameters
2566 * and settings. Done for all flashes, regardless is they define SFDP tables
2567 * or not.
2568 * @nor: pointer to a 'struct spi_nor'.
2569 */
spi_nor_init_default_params(struct spi_nor * nor)2570 static void spi_nor_init_default_params(struct spi_nor *nor)
2571 {
2572 struct spi_nor_flash_parameter *params = nor->params;
2573 const struct flash_info *info = nor->info;
2574 struct device_node *np = spi_nor_get_flash_node(nor);
2575
2576 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2577 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2578 params->otp.org = &info->otp_org;
2579
2580 /* Default to 16-bit Write Status (01h) Command */
2581 nor->flags |= SNOR_F_HAS_16BIT_SR;
2582
2583 /* Set SPI NOR sizes. */
2584 params->writesize = 1;
2585 params->size = (u64)info->sector_size * info->n_sectors;
2586 params->page_size = info->page_size;
2587
2588 if (!(info->flags & SPI_NOR_NO_FR)) {
2589 /* Default to Fast Read for DT and non-DT platform devices. */
2590 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2591
2592 /* Mask out Fast Read if not requested at DT instantiation. */
2593 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2594 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2595 }
2596
2597 /* (Fast) Read settings. */
2598 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2599 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2600 0, 0, SPINOR_OP_READ,
2601 SNOR_PROTO_1_1_1);
2602
2603 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2604 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2605 0, 8, SPINOR_OP_READ_FAST,
2606 SNOR_PROTO_1_1_1);
2607 /* Page Program settings. */
2608 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2609 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2610 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2611
2612 if (info->flags & SPI_NOR_QUAD_PP) {
2613 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2614 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
2615 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2616 }
2617 }
2618
2619 /**
2620 * spi_nor_init_params() - Initialize the flash's parameters and settings.
2621 * @nor: pointer to a 'struct spi_nor'.
2622 *
2623 * The flash parameters and settings are initialized based on a sequence of
2624 * calls that are ordered by priority:
2625 *
2626 * 1/ Default flash parameters initialization. The initializations are done
2627 * based on nor->info data:
2628 * spi_nor_info_init_params()
2629 *
2630 * which can be overwritten by:
2631 * 2/ Manufacturer flash parameters initialization. The initializations are
2632 * done based on MFR register, or when the decisions can not be done solely
2633 * based on MFR, by using specific flash_info tweeks, ->default_init():
2634 * spi_nor_manufacturer_init_params()
2635 *
2636 * which can be overwritten by:
2637 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2638 * should be more accurate that the above.
2639 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
2640 *
2641 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
2642 * the flash parameters and settings immediately after parsing the Basic
2643 * Flash Parameter Table.
2644 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
2645 * It is used to tweak various flash parameters when information provided
2646 * by the SFDP tables are wrong.
2647 *
2648 * which can be overwritten by:
2649 * 4/ Late flash parameters initialization, used to initialize flash
2650 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
2651 * tables are not defined at all.
2652 * spi_nor_late_init_params()
2653 *
2654 * Return: 0 on success, -errno otherwise.
2655 */
spi_nor_init_params(struct spi_nor * nor)2656 static int spi_nor_init_params(struct spi_nor *nor)
2657 {
2658 int ret;
2659
2660 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2661 if (!nor->params)
2662 return -ENOMEM;
2663
2664 spi_nor_init_default_params(nor);
2665
2666 if (nor->info->parse_sfdp) {
2667 ret = spi_nor_parse_sfdp(nor);
2668 if (ret) {
2669 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
2670 return ret;
2671 }
2672 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
2673 spi_nor_no_sfdp_init_params(nor);
2674 } else {
2675 spi_nor_init_params_deprecated(nor);
2676 }
2677
2678 spi_nor_late_init_params(nor);
2679
2680 return 0;
2681 }
2682
2683 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
2684 * @nor: pointer to a 'struct spi_nor'
2685 * @enable: whether to enable or disable Octal DTR
2686 *
2687 * Return: 0 on success, -errno otherwise.
2688 */
spi_nor_octal_dtr_enable(struct spi_nor * nor,bool enable)2689 static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
2690 {
2691 int ret;
2692
2693 if (!nor->params->octal_dtr_enable)
2694 return 0;
2695
2696 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
2697 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
2698 return 0;
2699
2700 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
2701 return 0;
2702
2703 ret = nor->params->octal_dtr_enable(nor, enable);
2704 if (ret)
2705 return ret;
2706
2707 if (enable)
2708 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
2709 else
2710 nor->reg_proto = SNOR_PROTO_1_1_1;
2711
2712 return 0;
2713 }
2714
2715 /**
2716 * spi_nor_quad_enable() - enable Quad I/O if needed.
2717 * @nor: pointer to a 'struct spi_nor'
2718 *
2719 * Return: 0 on success, -errno otherwise.
2720 */
spi_nor_quad_enable(struct spi_nor * nor)2721 static int spi_nor_quad_enable(struct spi_nor *nor)
2722 {
2723 if (!nor->params->quad_enable)
2724 return 0;
2725
2726 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2727 spi_nor_get_protocol_width(nor->write_proto) == 4))
2728 return 0;
2729
2730 return nor->params->quad_enable(nor);
2731 }
2732
spi_nor_init(struct spi_nor * nor)2733 static int spi_nor_init(struct spi_nor *nor)
2734 {
2735 int err;
2736
2737 err = spi_nor_octal_dtr_enable(nor, true);
2738 if (err) {
2739 dev_dbg(nor->dev, "octal mode not supported\n");
2740 return err;
2741 }
2742
2743 err = spi_nor_quad_enable(nor);
2744 if (err) {
2745 dev_dbg(nor->dev, "quad mode not supported\n");
2746 return err;
2747 }
2748
2749 /*
2750 * Some SPI NOR flashes are write protected by default after a power-on
2751 * reset cycle, in order to avoid inadvertent writes during power-up.
2752 * Backward compatibility imposes to unlock the entire flash memory
2753 * array at power-up by default. Depending on the kernel configuration
2754 * (1) do nothing, (2) always unlock the entire flash array or (3)
2755 * unlock the entire flash array only when the software write
2756 * protection bits are volatile. The latter is indicated by
2757 * SNOR_F_SWP_IS_VOLATILE.
2758 */
2759 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
2760 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
2761 nor->flags & SNOR_F_SWP_IS_VOLATILE))
2762 spi_nor_try_unlock_all(nor);
2763
2764 if (nor->addr_nbytes == 4 &&
2765 nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
2766 !(nor->flags & SNOR_F_4B_OPCODES)) {
2767 /*
2768 * If the RESET# pin isn't hooked up properly, or the system
2769 * otherwise doesn't perform a reset command in the boot
2770 * sequence, it's impossible to 100% protect against unexpected
2771 * reboots (e.g., crashes). Warn the user (or hopefully, system
2772 * designer) that this is bad.
2773 */
2774 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
2775 "enabling reset hack; may not recover from unexpected reboots\n");
2776 err = nor->params->set_4byte_addr_mode(nor, true);
2777 if (err && err != -ENOTSUPP)
2778 return err;
2779 }
2780
2781 return 0;
2782 }
2783
2784 /**
2785 * spi_nor_soft_reset() - Perform a software reset
2786 * @nor: pointer to 'struct spi_nor'
2787 *
2788 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
2789 * the device to its power-on-reset state. This is useful when the software has
2790 * made some changes to device (volatile) registers and needs to reset it before
2791 * shutting down, for example.
2792 *
2793 * Not every flash supports this sequence. The same set of opcodes might be used
2794 * for some other operation on a flash that does not support this. Support for
2795 * this sequence can be discovered via SFDP in the BFPT table.
2796 *
2797 * Return: 0 on success, -errno otherwise.
2798 */
spi_nor_soft_reset(struct spi_nor * nor)2799 static void spi_nor_soft_reset(struct spi_nor *nor)
2800 {
2801 struct spi_mem_op op;
2802 int ret;
2803
2804 op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
2805
2806 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2807
2808 ret = spi_mem_exec_op(nor->spimem, &op);
2809 if (ret) {
2810 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2811 return;
2812 }
2813
2814 op = (struct spi_mem_op)SPINOR_SRST_OP;
2815
2816 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2817
2818 ret = spi_mem_exec_op(nor->spimem, &op);
2819 if (ret) {
2820 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2821 return;
2822 }
2823
2824 /*
2825 * Software Reset is not instant, and the delay varies from flash to
2826 * flash. Looking at a few flashes, most range somewhere below 100
2827 * microseconds. So, sleep for a range of 200-400 us.
2828 */
2829 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
2830 }
2831
2832 /* mtd suspend handler */
spi_nor_suspend(struct mtd_info * mtd)2833 static int spi_nor_suspend(struct mtd_info *mtd)
2834 {
2835 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2836 int ret;
2837
2838 /* Disable octal DTR mode if we enabled it. */
2839 ret = spi_nor_octal_dtr_enable(nor, false);
2840 if (ret)
2841 dev_err(nor->dev, "suspend() failed\n");
2842
2843 return ret;
2844 }
2845
2846 /* mtd resume handler */
spi_nor_resume(struct mtd_info * mtd)2847 static void spi_nor_resume(struct mtd_info *mtd)
2848 {
2849 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2850 struct device *dev = nor->dev;
2851 int ret;
2852
2853 /* re-initialize the nor chip */
2854 ret = spi_nor_init(nor);
2855 if (ret)
2856 dev_err(dev, "resume() failed\n");
2857 }
2858
spi_nor_get_device(struct mtd_info * mtd)2859 static int spi_nor_get_device(struct mtd_info *mtd)
2860 {
2861 struct mtd_info *master = mtd_get_master(mtd);
2862 struct spi_nor *nor = mtd_to_spi_nor(master);
2863 struct device *dev;
2864
2865 if (nor->spimem)
2866 dev = nor->spimem->spi->controller->dev.parent;
2867 else
2868 dev = nor->dev;
2869
2870 if (!try_module_get(dev->driver->owner))
2871 return -ENODEV;
2872
2873 return 0;
2874 }
2875
spi_nor_put_device(struct mtd_info * mtd)2876 static void spi_nor_put_device(struct mtd_info *mtd)
2877 {
2878 struct mtd_info *master = mtd_get_master(mtd);
2879 struct spi_nor *nor = mtd_to_spi_nor(master);
2880 struct device *dev;
2881
2882 if (nor->spimem)
2883 dev = nor->spimem->spi->controller->dev.parent;
2884 else
2885 dev = nor->dev;
2886
2887 module_put(dev->driver->owner);
2888 }
2889
spi_nor_restore(struct spi_nor * nor)2890 void spi_nor_restore(struct spi_nor *nor)
2891 {
2892 int ret;
2893
2894 /* restore the addressing mode */
2895 if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
2896 nor->flags & SNOR_F_BROKEN_RESET) {
2897 ret = nor->params->set_4byte_addr_mode(nor, false);
2898 if (ret)
2899 /*
2900 * Do not stop the execution in the hope that the flash
2901 * will default to the 3-byte address mode after the
2902 * software reset.
2903 */
2904 dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
2905 }
2906
2907 if (nor->flags & SNOR_F_SOFT_RESET)
2908 spi_nor_soft_reset(nor);
2909 }
2910 EXPORT_SYMBOL_GPL(spi_nor_restore);
2911
spi_nor_match_name(struct spi_nor * nor,const char * name)2912 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
2913 const char *name)
2914 {
2915 unsigned int i, j;
2916
2917 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2918 for (j = 0; j < manufacturers[i]->nparts; j++) {
2919 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
2920 nor->manufacturer = manufacturers[i];
2921 return &manufacturers[i]->parts[j];
2922 }
2923 }
2924 }
2925
2926 return NULL;
2927 }
2928
spi_nor_get_flash_info(struct spi_nor * nor,const char * name)2929 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
2930 const char *name)
2931 {
2932 const struct flash_info *info = NULL;
2933
2934 if (name)
2935 info = spi_nor_match_name(nor, name);
2936 /* Try to auto-detect if chip name wasn't specified or not found */
2937 if (!info)
2938 return spi_nor_detect(nor);
2939
2940 /*
2941 * If caller has specified name of flash model that can normally be
2942 * detected using JEDEC, let's verify it.
2943 */
2944 if (name && info->id_len) {
2945 const struct flash_info *jinfo;
2946
2947 jinfo = spi_nor_detect(nor);
2948 if (IS_ERR(jinfo)) {
2949 return jinfo;
2950 } else if (jinfo != info) {
2951 /*
2952 * JEDEC knows better, so overwrite platform ID. We
2953 * can't trust partitions any longer, but we'll let
2954 * mtd apply them anyway, since some partitions may be
2955 * marked read-only, and we don't want to lose that
2956 * information, even if it's not 100% accurate.
2957 */
2958 dev_warn(nor->dev, "found %s, expected %s\n",
2959 jinfo->name, info->name);
2960 info = jinfo;
2961 }
2962 }
2963
2964 return info;
2965 }
2966
spi_nor_set_mtd_info(struct spi_nor * nor)2967 static void spi_nor_set_mtd_info(struct spi_nor *nor)
2968 {
2969 struct mtd_info *mtd = &nor->mtd;
2970 struct device *dev = nor->dev;
2971
2972 spi_nor_set_mtd_locking_ops(nor);
2973 spi_nor_set_mtd_otp_ops(nor);
2974
2975 mtd->dev.parent = dev;
2976 if (!mtd->name)
2977 mtd->name = dev_name(dev);
2978 mtd->type = MTD_NORFLASH;
2979 mtd->flags = MTD_CAP_NORFLASH;
2980 if (nor->info->flags & SPI_NOR_NO_ERASE)
2981 mtd->flags |= MTD_NO_ERASE;
2982 else
2983 mtd->_erase = spi_nor_erase;
2984 mtd->writesize = nor->params->writesize;
2985 mtd->writebufsize = nor->params->page_size;
2986 mtd->size = nor->params->size;
2987 mtd->_read = spi_nor_read;
2988 /* Might be already set by some SST flashes. */
2989 if (!mtd->_write)
2990 mtd->_write = spi_nor_write;
2991 mtd->_suspend = spi_nor_suspend;
2992 mtd->_resume = spi_nor_resume;
2993 mtd->_get_device = spi_nor_get_device;
2994 mtd->_put_device = spi_nor_put_device;
2995 }
2996
spi_nor_hw_reset(struct spi_nor * nor)2997 static int spi_nor_hw_reset(struct spi_nor *nor)
2998 {
2999 struct gpio_desc *reset;
3000
3001 reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
3002 if (IS_ERR_OR_NULL(reset))
3003 return PTR_ERR_OR_ZERO(reset);
3004
3005 /*
3006 * Experimental delay values by looking at different flash device
3007 * vendors datasheets.
3008 */
3009 usleep_range(1, 5);
3010 gpiod_set_value_cansleep(reset, 1);
3011 usleep_range(100, 150);
3012 gpiod_set_value_cansleep(reset, 0);
3013 usleep_range(1000, 1200);
3014
3015 return 0;
3016 }
3017
spi_nor_scan(struct spi_nor * nor,const char * name,const struct spi_nor_hwcaps * hwcaps)3018 int spi_nor_scan(struct spi_nor *nor, const char *name,
3019 const struct spi_nor_hwcaps *hwcaps)
3020 {
3021 const struct flash_info *info;
3022 struct device *dev = nor->dev;
3023 struct mtd_info *mtd = &nor->mtd;
3024 int ret;
3025 int i;
3026
3027 ret = spi_nor_check(nor);
3028 if (ret)
3029 return ret;
3030
3031 /* Reset SPI protocol for all commands. */
3032 nor->reg_proto = SNOR_PROTO_1_1_1;
3033 nor->read_proto = SNOR_PROTO_1_1_1;
3034 nor->write_proto = SNOR_PROTO_1_1_1;
3035
3036 /*
3037 * We need the bounce buffer early to read/write registers when going
3038 * through the spi-mem layer (buffers have to be DMA-able).
3039 * For spi-mem drivers, we'll reallocate a new buffer if
3040 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3041 * shouldn't happen before long since NOR pages are usually less
3042 * than 1KB) after spi_nor_scan() returns.
3043 */
3044 nor->bouncebuf_size = PAGE_SIZE;
3045 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3046 GFP_KERNEL);
3047 if (!nor->bouncebuf)
3048 return -ENOMEM;
3049
3050 ret = spi_nor_hw_reset(nor);
3051 if (ret)
3052 return ret;
3053
3054 info = spi_nor_get_flash_info(nor, name);
3055 if (IS_ERR(info))
3056 return PTR_ERR(info);
3057
3058 nor->info = info;
3059
3060 mutex_init(&nor->lock);
3061
3062 /* Init flash parameters based on flash_info struct and SFDP */
3063 ret = spi_nor_init_params(nor);
3064 if (ret)
3065 return ret;
3066
3067 /*
3068 * Configure the SPI memory:
3069 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3070 * - set the number of dummy cycles (mode cycles + wait states).
3071 * - set the SPI protocols for register and memory accesses.
3072 * - set the number of address bytes.
3073 */
3074 ret = spi_nor_setup(nor, hwcaps);
3075 if (ret)
3076 return ret;
3077
3078 /* Send all the required SPI flash commands to initialize device */
3079 ret = spi_nor_init(nor);
3080 if (ret)
3081 return ret;
3082
3083 /* No mtd_info fields should be used up to this point. */
3084 spi_nor_set_mtd_info(nor);
3085
3086 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3087 (long long)mtd->size >> 10);
3088
3089 dev_dbg(dev,
3090 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3091 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3092 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3093 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3094
3095 if (mtd->numeraseregions)
3096 for (i = 0; i < mtd->numeraseregions; i++)
3097 dev_dbg(dev,
3098 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3099 ".erasesize = 0x%.8x (%uKiB), "
3100 ".numblocks = %d }\n",
3101 i, (long long)mtd->eraseregions[i].offset,
3102 mtd->eraseregions[i].erasesize,
3103 mtd->eraseregions[i].erasesize / 1024,
3104 mtd->eraseregions[i].numblocks);
3105 return 0;
3106 }
3107 EXPORT_SYMBOL_GPL(spi_nor_scan);
3108
spi_nor_create_read_dirmap(struct spi_nor * nor)3109 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3110 {
3111 struct spi_mem_dirmap_info info = {
3112 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3113 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3114 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3115 SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3116 .offset = 0,
3117 .length = nor->params->size,
3118 };
3119 struct spi_mem_op *op = &info.op_tmpl;
3120
3121 spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3122
3123 /* convert the dummy cycles to the number of bytes */
3124 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3125 if (spi_nor_protocol_is_dtr(nor->read_proto))
3126 op->dummy.nbytes *= 2;
3127
3128 /*
3129 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3130 * of data bytes is non-zero, the data buswidth won't be set here. So,
3131 * do it explicitly.
3132 */
3133 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3134
3135 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3136 &info);
3137 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3138 }
3139
spi_nor_create_write_dirmap(struct spi_nor * nor)3140 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3141 {
3142 struct spi_mem_dirmap_info info = {
3143 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3144 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3145 SPI_MEM_OP_NO_DUMMY,
3146 SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3147 .offset = 0,
3148 .length = nor->params->size,
3149 };
3150 struct spi_mem_op *op = &info.op_tmpl;
3151
3152 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3153 op->addr.nbytes = 0;
3154
3155 spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3156
3157 /*
3158 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3159 * of data bytes is non-zero, the data buswidth won't be set here. So,
3160 * do it explicitly.
3161 */
3162 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3163
3164 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3165 &info);
3166 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3167 }
3168
spi_nor_probe(struct spi_mem * spimem)3169 static int spi_nor_probe(struct spi_mem *spimem)
3170 {
3171 struct spi_device *spi = spimem->spi;
3172 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3173 struct spi_nor *nor;
3174 /*
3175 * Enable all caps by default. The core will mask them after
3176 * checking what's really supported using spi_mem_supports_op().
3177 */
3178 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3179 char *flash_name;
3180 int ret;
3181
3182 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3183 if (!nor)
3184 return -ENOMEM;
3185
3186 nor->spimem = spimem;
3187 nor->dev = &spi->dev;
3188 spi_nor_set_flash_node(nor, spi->dev.of_node);
3189
3190 spi_mem_set_drvdata(spimem, nor);
3191
3192 if (data && data->name)
3193 nor->mtd.name = data->name;
3194
3195 if (!nor->mtd.name)
3196 nor->mtd.name = spi_mem_get_name(spimem);
3197
3198 /*
3199 * For some (historical?) reason many platforms provide two different
3200 * names in flash_platform_data: "name" and "type". Quite often name is
3201 * set to "m25p80" and then "type" provides a real chip name.
3202 * If that's the case, respect "type" and ignore a "name".
3203 */
3204 if (data && data->type)
3205 flash_name = data->type;
3206 else if (!strcmp(spi->modalias, "spi-nor"))
3207 flash_name = NULL; /* auto-detect */
3208 else
3209 flash_name = spi->modalias;
3210
3211 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3212 if (ret)
3213 return ret;
3214
3215 spi_nor_debugfs_register(nor);
3216
3217 /*
3218 * None of the existing parts have > 512B pages, but let's play safe
3219 * and add this logic so that if anyone ever adds support for such
3220 * a NOR we don't end up with buffer overflows.
3221 */
3222 if (nor->params->page_size > PAGE_SIZE) {
3223 nor->bouncebuf_size = nor->params->page_size;
3224 devm_kfree(nor->dev, nor->bouncebuf);
3225 nor->bouncebuf = devm_kmalloc(nor->dev,
3226 nor->bouncebuf_size,
3227 GFP_KERNEL);
3228 if (!nor->bouncebuf)
3229 return -ENOMEM;
3230 }
3231
3232 ret = spi_nor_create_read_dirmap(nor);
3233 if (ret)
3234 return ret;
3235
3236 ret = spi_nor_create_write_dirmap(nor);
3237 if (ret)
3238 return ret;
3239
3240 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3241 data ? data->nr_parts : 0);
3242 }
3243
spi_nor_remove(struct spi_mem * spimem)3244 static int spi_nor_remove(struct spi_mem *spimem)
3245 {
3246 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3247
3248 spi_nor_restore(nor);
3249
3250 /* Clean up MTD stuff. */
3251 return mtd_device_unregister(&nor->mtd);
3252 }
3253
spi_nor_shutdown(struct spi_mem * spimem)3254 static void spi_nor_shutdown(struct spi_mem *spimem)
3255 {
3256 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3257
3258 spi_nor_restore(nor);
3259 }
3260
3261 /*
3262 * Do NOT add to this array without reading the following:
3263 *
3264 * Historically, many flash devices are bound to this driver by their name. But
3265 * since most of these flash are compatible to some extent, and their
3266 * differences can often be differentiated by the JEDEC read-ID command, we
3267 * encourage new users to add support to the spi-nor library, and simply bind
3268 * against a generic string here (e.g., "jedec,spi-nor").
3269 *
3270 * Many flash names are kept here in this list to keep them available
3271 * as module aliases for existing platforms.
3272 */
3273 static const struct spi_device_id spi_nor_dev_ids[] = {
3274 /*
3275 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3276 * hack around the fact that the SPI core does not provide uevent
3277 * matching for .of_match_table
3278 */
3279 {"spi-nor"},
3280
3281 /*
3282 * Entries not used in DTs that should be safe to drop after replacing
3283 * them with "spi-nor" in platform data.
3284 */
3285 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3286
3287 /*
3288 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3289 * should be kept for backward compatibility.
3290 */
3291 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3292 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3293 {"mx25l25635e"},{"mx66l51235l"},
3294 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3295 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3296 {"s25fl064k"},
3297 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3298 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3299 {"m25p64"}, {"m25p128"},
3300 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3301 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3302
3303 /* Flashes that can't be detected using JEDEC */
3304 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3305 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3306 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3307
3308 /* Everspin MRAMs (non-JEDEC) */
3309 { "mr25h128" }, /* 128 Kib, 40 MHz */
3310 { "mr25h256" }, /* 256 Kib, 40 MHz */
3311 { "mr25h10" }, /* 1 Mib, 40 MHz */
3312 { "mr25h40" }, /* 4 Mib, 40 MHz */
3313
3314 { },
3315 };
3316 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3317
3318 static const struct of_device_id spi_nor_of_table[] = {
3319 /*
3320 * Generic compatibility for SPI NOR that can be identified by the
3321 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3322 */
3323 { .compatible = "jedec,spi-nor" },
3324 { /* sentinel */ },
3325 };
3326 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3327
3328 /*
3329 * REVISIT: many of these chips have deep power-down modes, which
3330 * should clearly be entered on suspend() to minimize power use.
3331 * And also when they're otherwise idle...
3332 */
3333 static struct spi_mem_driver spi_nor_driver = {
3334 .spidrv = {
3335 .driver = {
3336 .name = "spi-nor",
3337 .of_match_table = spi_nor_of_table,
3338 .dev_groups = spi_nor_sysfs_groups,
3339 },
3340 .id_table = spi_nor_dev_ids,
3341 },
3342 .probe = spi_nor_probe,
3343 .remove = spi_nor_remove,
3344 .shutdown = spi_nor_shutdown,
3345 };
3346 module_spi_mem_driver(spi_nor_driver);
3347
3348 MODULE_LICENSE("GPL v2");
3349 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3350 MODULE_AUTHOR("Mike Lavender");
3351 MODULE_DESCRIPTION("framework for SPI NOR");
3352