1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
8 *
9 * Synced from Linux v4.19
10 */
11
12 #include <common.h>
13 #include <log.h>
14 #include <watchdog.h>
15 #include <dm.h>
16 #include <dm/device_compat.h>
17 #include <dm/devres.h>
18 #include <linux/bitops.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/log2.h>
22 #include <linux/math64.h>
23 #include <linux/sizes.h>
24
25 #include <linux/mtd/mtd.h>
26 #include <linux/mtd/spi-nor.h>
27 #include <spi-mem.h>
28 #include <spi.h>
29
30 #include "sf_internal.h"
31
32 /* Define max times to check status register before we give up. */
33
34 /*
35 * For everything but full-chip erase; probably could be much smaller, but kept
36 * around for safety for now
37 */
38
39 #define HZ CONFIG_SYS_HZ
40
41 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
42
spi_nor_read_write_reg(struct spi_nor * nor,struct spi_mem_op * op,void * buf)43 static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op
44 *op, void *buf)
45 {
46 if (op->data.dir == SPI_MEM_DATA_IN)
47 op->data.buf.in = buf;
48 else
49 op->data.buf.out = buf;
50 return spi_mem_exec_op(nor->spi, op);
51 }
52
spi_nor_read_reg(struct spi_nor * nor,u8 code,u8 * val,int len)53 static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
54 {
55 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
56 SPI_MEM_OP_NO_ADDR,
57 SPI_MEM_OP_NO_DUMMY,
58 SPI_MEM_OP_DATA_IN(len, NULL, 1));
59 int ret;
60
61 ret = spi_nor_read_write_reg(nor, &op, val);
62 if (ret < 0)
63 dev_dbg(nor->dev, "error %d reading %x\n", ret, code);
64
65 return ret;
66 }
67
spi_nor_write_reg(struct spi_nor * nor,u8 opcode,u8 * buf,int len)68 static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
69 {
70 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
71 SPI_MEM_OP_NO_ADDR,
72 SPI_MEM_OP_NO_DUMMY,
73 SPI_MEM_OP_DATA_OUT(len, NULL, 1));
74
75 return spi_nor_read_write_reg(nor, &op, buf);
76 }
77
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u_char * buf)78 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
79 u_char *buf)
80 {
81 struct spi_mem_op op =
82 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
83 SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
84 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
85 SPI_MEM_OP_DATA_IN(len, buf, 1));
86 size_t remaining = len;
87 int ret;
88
89 /* get transfer protocols. */
90 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
91 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
92 op.dummy.buswidth = op.addr.buswidth;
93 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
94
95 /* convert the dummy cycles to the number of bytes */
96 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
97
98 while (remaining) {
99 op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
100 ret = spi_mem_adjust_op_size(nor->spi, &op);
101 if (ret)
102 return ret;
103
104 ret = spi_mem_exec_op(nor->spi, &op);
105 if (ret)
106 return ret;
107
108 op.addr.val += op.data.nbytes;
109 remaining -= op.data.nbytes;
110 op.data.buf.in += op.data.nbytes;
111 }
112
113 return len;
114 }
115
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u_char * buf)116 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
117 const u_char *buf)
118 {
119 struct spi_mem_op op =
120 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
121 SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
122 SPI_MEM_OP_NO_DUMMY,
123 SPI_MEM_OP_DATA_OUT(len, buf, 1));
124 int ret;
125
126 /* get transfer protocols. */
127 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
128 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
129 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
130
131 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
132 op.addr.nbytes = 0;
133
134 ret = spi_mem_adjust_op_size(nor->spi, &op);
135 if (ret)
136 return ret;
137 op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
138
139 ret = spi_mem_exec_op(nor->spi, &op);
140 if (ret)
141 return ret;
142
143 return op.data.nbytes;
144 }
145
146 /*
147 * Read the status register, returning its value in the location
148 * Return the status register value.
149 * Returns negative if error occurred.
150 */
read_sr(struct spi_nor * nor)151 static int read_sr(struct spi_nor *nor)
152 {
153 int ret;
154 u8 val;
155
156 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
157 if (ret < 0) {
158 pr_debug("error %d reading SR\n", (int)ret);
159 return ret;
160 }
161
162 return val;
163 }
164
165 /*
166 * Read the flag status register, returning its value in the location
167 * Return the status register value.
168 * Returns negative if error occurred.
169 */
read_fsr(struct spi_nor * nor)170 static int read_fsr(struct spi_nor *nor)
171 {
172 int ret;
173 u8 val;
174
175 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
176 if (ret < 0) {
177 pr_debug("error %d reading FSR\n", ret);
178 return ret;
179 }
180
181 return val;
182 }
183
184 /*
185 * Read configuration register, returning its value in the
186 * location. Return the configuration register value.
187 * Returns negative if error occurred.
188 */
189 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
read_cr(struct spi_nor * nor)190 static int read_cr(struct spi_nor *nor)
191 {
192 int ret;
193 u8 val;
194
195 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
196 if (ret < 0) {
197 dev_dbg(nor->dev, "error %d reading CR\n", ret);
198 return ret;
199 }
200
201 return val;
202 }
203 #endif
204
205 /*
206 * Write status register 1 byte
207 * Returns negative if error occurred.
208 */
write_sr(struct spi_nor * nor,u8 val)209 static int write_sr(struct spi_nor *nor, u8 val)
210 {
211 nor->cmd_buf[0] = val;
212 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
213 }
214
215 /*
216 * Set write enable latch with Write Enable command.
217 * Returns negative if error occurred.
218 */
write_enable(struct spi_nor * nor)219 static int write_enable(struct spi_nor *nor)
220 {
221 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
222 }
223
224 /*
225 * Send write disable instruction to the chip.
226 */
write_disable(struct spi_nor * nor)227 static int write_disable(struct spi_nor *nor)
228 {
229 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
230 }
231
mtd_to_spi_nor(struct mtd_info * mtd)232 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
233 {
234 return mtd->priv;
235 }
236
237 #ifndef CONFIG_SPI_FLASH_BAR
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)238 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
239 {
240 size_t i;
241
242 for (i = 0; i < size; i++)
243 if (table[i][0] == opcode)
244 return table[i][1];
245
246 /* No conversion found, keep input op code. */
247 return opcode;
248 }
249
spi_nor_convert_3to4_read(u8 opcode)250 static u8 spi_nor_convert_3to4_read(u8 opcode)
251 {
252 static const u8 spi_nor_3to4_read[][2] = {
253 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
254 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
255 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
256 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
257 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
258 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
259 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
260 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
261
262 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
263 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
264 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
265 };
266
267 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
268 ARRAY_SIZE(spi_nor_3to4_read));
269 }
270
spi_nor_convert_3to4_program(u8 opcode)271 static u8 spi_nor_convert_3to4_program(u8 opcode)
272 {
273 static const u8 spi_nor_3to4_program[][2] = {
274 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
275 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
276 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
277 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
278 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
279 };
280
281 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
282 ARRAY_SIZE(spi_nor_3to4_program));
283 }
284
spi_nor_convert_3to4_erase(u8 opcode)285 static u8 spi_nor_convert_3to4_erase(u8 opcode)
286 {
287 static const u8 spi_nor_3to4_erase[][2] = {
288 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
289 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
290 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
291 };
292
293 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
294 ARRAY_SIZE(spi_nor_3to4_erase));
295 }
296
spi_nor_set_4byte_opcodes(struct spi_nor * nor,const struct flash_info * info)297 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
298 const struct flash_info *info)
299 {
300 /* Do some manufacturer fixups first */
301 switch (JEDEC_MFR(info)) {
302 case SNOR_MFR_SPANSION:
303 /* No small sector erase for 4-byte command set */
304 nor->erase_opcode = SPINOR_OP_SE;
305 nor->mtd.erasesize = info->sector_size;
306 break;
307
308 default:
309 break;
310 }
311
312 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
313 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
314 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
315 }
316 #endif /* !CONFIG_SPI_FLASH_BAR */
317
318 /* Enable/disable 4-byte addressing mode. */
set_4byte(struct spi_nor * nor,const struct flash_info * info,int enable)319 static int set_4byte(struct spi_nor *nor, const struct flash_info *info,
320 int enable)
321 {
322 int status;
323 bool need_wren = false;
324 u8 cmd;
325
326 switch (JEDEC_MFR(info)) {
327 case SNOR_MFR_ST:
328 case SNOR_MFR_MICRON:
329 /* Some Micron need WREN command; all will accept it */
330 need_wren = true;
331 case SNOR_MFR_ISSI:
332 case SNOR_MFR_MACRONIX:
333 case SNOR_MFR_WINBOND:
334 if (need_wren)
335 write_enable(nor);
336
337 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
338 status = nor->write_reg(nor, cmd, NULL, 0);
339 if (need_wren)
340 write_disable(nor);
341
342 if (!status && !enable &&
343 JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
344 /*
345 * On Winbond W25Q256FV, leaving 4byte mode causes
346 * the Extended Address Register to be set to 1, so all
347 * 3-byte-address reads come from the second 16M.
348 * We must clear the register to enable normal behavior.
349 */
350 write_enable(nor);
351 nor->cmd_buf[0] = 0;
352 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
353 write_disable(nor);
354 }
355
356 return status;
357 default:
358 /* Spansion style */
359 nor->cmd_buf[0] = enable << 7;
360 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
361 }
362 }
363
spi_nor_sr_ready(struct spi_nor * nor)364 static int spi_nor_sr_ready(struct spi_nor *nor)
365 {
366 int sr = read_sr(nor);
367
368 if (sr < 0)
369 return sr;
370
371 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
372 if (sr & SR_E_ERR)
373 dev_dbg(nor->dev, "Erase Error occurred\n");
374 else
375 dev_dbg(nor->dev, "Programming Error occurred\n");
376
377 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
378 return -EIO;
379 }
380
381 return !(sr & SR_WIP);
382 }
383
spi_nor_fsr_ready(struct spi_nor * nor)384 static int spi_nor_fsr_ready(struct spi_nor *nor)
385 {
386 int fsr = read_fsr(nor);
387
388 if (fsr < 0)
389 return fsr;
390
391 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
392 if (fsr & FSR_E_ERR)
393 dev_err(nor->dev, "Erase operation failed.\n");
394 else
395 dev_err(nor->dev, "Program operation failed.\n");
396
397 if (fsr & FSR_PT_ERR)
398 dev_err(nor->dev,
399 "Attempted to modify a protected sector.\n");
400
401 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
402 return -EIO;
403 }
404
405 return fsr & FSR_READY;
406 }
407
spi_nor_ready(struct spi_nor * nor)408 static int spi_nor_ready(struct spi_nor *nor)
409 {
410 int sr, fsr;
411
412 sr = spi_nor_sr_ready(nor);
413 if (sr < 0)
414 return sr;
415 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
416 if (fsr < 0)
417 return fsr;
418 return sr && fsr;
419 }
420
421 /*
422 * Service routine to read status register until ready, or timeout occurs.
423 * Returns non-zero if error.
424 */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout)425 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
426 unsigned long timeout)
427 {
428 unsigned long timebase;
429 int ret;
430
431 timebase = get_timer(0);
432
433 while (get_timer(timebase) < timeout) {
434 ret = spi_nor_ready(nor);
435 if (ret < 0)
436 return ret;
437 if (ret)
438 return 0;
439 }
440
441 dev_err(nor->dev, "flash operation timed out\n");
442
443 return -ETIMEDOUT;
444 }
445
spi_nor_wait_till_ready(struct spi_nor * nor)446 static int spi_nor_wait_till_ready(struct spi_nor *nor)
447 {
448 return spi_nor_wait_till_ready_with_timeout(nor,
449 DEFAULT_READY_WAIT_JIFFIES);
450 }
451
452 #ifdef CONFIG_SPI_FLASH_BAR
453 /*
454 * This "clean_bar" is necessary in a situation when one was accessing
455 * spi flash memory > 16 MiB by using Bank Address Register's BA24 bit.
456 *
457 * After it the BA24 bit shall be cleared to allow access to correct
458 * memory region after SW reset (by calling "reset" command).
459 *
460 * Otherwise, the BA24 bit may be left set and then after reset, the
461 * ROM would read/write/erase SPL from 16 MiB * bank_sel address.
462 */
clean_bar(struct spi_nor * nor)463 static int clean_bar(struct spi_nor *nor)
464 {
465 u8 cmd, bank_sel = 0;
466
467 if (nor->bank_curr == 0)
468 return 0;
469 cmd = nor->bank_write_cmd;
470 nor->bank_curr = 0;
471 write_enable(nor);
472
473 return nor->write_reg(nor, cmd, &bank_sel, 1);
474 }
475
write_bar(struct spi_nor * nor,u32 offset)476 static int write_bar(struct spi_nor *nor, u32 offset)
477 {
478 u8 cmd, bank_sel;
479 int ret;
480
481 bank_sel = offset / SZ_16M;
482 if (bank_sel == nor->bank_curr)
483 goto bar_end;
484
485 cmd = nor->bank_write_cmd;
486 write_enable(nor);
487 ret = nor->write_reg(nor, cmd, &bank_sel, 1);
488 if (ret < 0) {
489 debug("SF: fail to write bank register\n");
490 return ret;
491 }
492
493 bar_end:
494 nor->bank_curr = bank_sel;
495 return nor->bank_curr;
496 }
497
read_bar(struct spi_nor * nor,const struct flash_info * info)498 static int read_bar(struct spi_nor *nor, const struct flash_info *info)
499 {
500 u8 curr_bank = 0;
501 int ret;
502
503 switch (JEDEC_MFR(info)) {
504 case SNOR_MFR_SPANSION:
505 nor->bank_read_cmd = SPINOR_OP_BRRD;
506 nor->bank_write_cmd = SPINOR_OP_BRWR;
507 break;
508 default:
509 nor->bank_read_cmd = SPINOR_OP_RDEAR;
510 nor->bank_write_cmd = SPINOR_OP_WREAR;
511 }
512
513 ret = nor->read_reg(nor, nor->bank_read_cmd,
514 &curr_bank, 1);
515 if (ret) {
516 debug("SF: fail to read bank addr register\n");
517 return ret;
518 }
519 nor->bank_curr = curr_bank;
520
521 return 0;
522 }
523 #endif
524
525 /*
526 * Initiate the erasure of a single sector
527 */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)528 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
529 {
530 struct spi_mem_op op =
531 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
532 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
533 SPI_MEM_OP_NO_DUMMY,
534 SPI_MEM_OP_NO_DATA);
535
536 if (nor->erase)
537 return nor->erase(nor, addr);
538
539 /*
540 * Default implementation, if driver doesn't have a specialized HW
541 * control
542 */
543 return spi_mem_exec_op(nor->spi, &op);
544 }
545
546 /*
547 * Erase an address range on the nor chip. The address range may extend
548 * one or more erase sectors. Return an error is there is a problem erasing.
549 */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)550 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
551 {
552 struct spi_nor *nor = mtd_to_spi_nor(mtd);
553 u32 addr, len, rem;
554 int ret;
555
556 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
557 (long long)instr->len);
558
559 if (!instr->len)
560 return 0;
561
562 div_u64_rem(instr->len, mtd->erasesize, &rem);
563 if (rem)
564 return -EINVAL;
565
566 addr = instr->addr;
567 len = instr->len;
568
569 while (len) {
570 WATCHDOG_RESET();
571 #ifdef CONFIG_SPI_FLASH_BAR
572 ret = write_bar(nor, addr);
573 if (ret < 0)
574 return ret;
575 #endif
576 write_enable(nor);
577
578 ret = spi_nor_erase_sector(nor, addr);
579 if (ret)
580 goto erase_err;
581
582 addr += mtd->erasesize;
583 len -= mtd->erasesize;
584
585 ret = spi_nor_wait_till_ready(nor);
586 if (ret)
587 goto erase_err;
588 }
589
590 erase_err:
591 #ifdef CONFIG_SPI_FLASH_BAR
592 ret = clean_bar(nor);
593 #endif
594 write_disable(nor);
595
596 return ret;
597 }
598
599 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
600 /* Write status register and ensure bits in mask match written values */
write_sr_and_check(struct spi_nor * nor,u8 status_new,u8 mask)601 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
602 {
603 int ret;
604
605 write_enable(nor);
606 ret = write_sr(nor, status_new);
607 if (ret)
608 return ret;
609
610 ret = spi_nor_wait_till_ready(nor);
611 if (ret)
612 return ret;
613
614 ret = read_sr(nor);
615 if (ret < 0)
616 return ret;
617
618 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
619 }
620
stm_get_locked_range(struct spi_nor * nor,u8 sr,loff_t * ofs,uint64_t * len)621 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
622 uint64_t *len)
623 {
624 struct mtd_info *mtd = &nor->mtd;
625 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
626 int shift = ffs(mask) - 1;
627 int pow;
628
629 if (!(sr & mask)) {
630 /* No protection */
631 *ofs = 0;
632 *len = 0;
633 } else {
634 pow = ((sr & mask) ^ mask) >> shift;
635 *len = mtd->size >> pow;
636 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
637 *ofs = 0;
638 else
639 *ofs = mtd->size - *len;
640 }
641 }
642
643 /*
644 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
645 * @locked is false); 0 otherwise
646 */
stm_check_lock_status_sr(struct spi_nor * nor,loff_t ofs,u64 len,u8 sr,bool locked)647 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len,
648 u8 sr, bool locked)
649 {
650 loff_t lock_offs;
651 uint64_t lock_len;
652
653 if (!len)
654 return 1;
655
656 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
657
658 if (locked)
659 /* Requested range is a sub-range of locked range */
660 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
661 else
662 /* Requested range does not overlap with locked range */
663 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
664 }
665
stm_is_locked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)666 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
667 u8 sr)
668 {
669 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
670 }
671
stm_is_unlocked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)672 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
673 u8 sr)
674 {
675 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
676 }
677
678 /*
679 * Lock a region of the flash. Compatible with ST Micro and similar flash.
680 * Supports the block protection bits BP{0,1,2} in the status register
681 * (SR). Does not support these features found in newer SR bitfields:
682 * - SEC: sector/block protect - only handle SEC=0 (block protect)
683 * - CMP: complement protect - only support CMP=0 (range is not complemented)
684 *
685 * Support for the following is provided conditionally for some flash:
686 * - TB: top/bottom protect
687 *
688 * Sample table portion for 8MB flash (Winbond w25q64fw):
689 *
690 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
691 * --------------------------------------------------------------------------
692 * X | X | 0 | 0 | 0 | NONE | NONE
693 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
694 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
695 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
696 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
697 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
698 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
699 * X | X | 1 | 1 | 1 | 8 MB | ALL
700 * ------|-------|-------|-------|-------|---------------|-------------------
701 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
702 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
703 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
704 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
705 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
706 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
707 *
708 * Returns negative on errors, 0 on success.
709 */
stm_lock(struct spi_nor * nor,loff_t ofs,uint64_t len)710 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
711 {
712 struct mtd_info *mtd = &nor->mtd;
713 int status_old, status_new;
714 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
715 u8 shift = ffs(mask) - 1, pow, val;
716 loff_t lock_len;
717 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
718 bool use_top;
719
720 status_old = read_sr(nor);
721 if (status_old < 0)
722 return status_old;
723
724 /* If nothing in our range is unlocked, we don't need to do anything */
725 if (stm_is_locked_sr(nor, ofs, len, status_old))
726 return 0;
727
728 /* If anything below us is unlocked, we can't use 'bottom' protection */
729 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
730 can_be_bottom = false;
731
732 /* If anything above us is unlocked, we can't use 'top' protection */
733 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
734 status_old))
735 can_be_top = false;
736
737 if (!can_be_bottom && !can_be_top)
738 return -EINVAL;
739
740 /* Prefer top, if both are valid */
741 use_top = can_be_top;
742
743 /* lock_len: length of region that should end up locked */
744 if (use_top)
745 lock_len = mtd->size - ofs;
746 else
747 lock_len = ofs + len;
748
749 /*
750 * Need smallest pow such that:
751 *
752 * 1 / (2^pow) <= (len / size)
753 *
754 * so (assuming power-of-2 size) we do:
755 *
756 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
757 */
758 pow = ilog2(mtd->size) - ilog2(lock_len);
759 val = mask - (pow << shift);
760 if (val & ~mask)
761 return -EINVAL;
762 /* Don't "lock" with no region! */
763 if (!(val & mask))
764 return -EINVAL;
765
766 status_new = (status_old & ~mask & ~SR_TB) | val;
767
768 /* Disallow further writes if WP pin is asserted */
769 status_new |= SR_SRWD;
770
771 if (!use_top)
772 status_new |= SR_TB;
773
774 /* Don't bother if they're the same */
775 if (status_new == status_old)
776 return 0;
777
778 /* Only modify protection if it will not unlock other areas */
779 if ((status_new & mask) < (status_old & mask))
780 return -EINVAL;
781
782 return write_sr_and_check(nor, status_new, mask);
783 }
784
785 /*
786 * Unlock a region of the flash. See stm_lock() for more info
787 *
788 * Returns negative on errors, 0 on success.
789 */
stm_unlock(struct spi_nor * nor,loff_t ofs,uint64_t len)790 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
791 {
792 struct mtd_info *mtd = &nor->mtd;
793 int status_old, status_new;
794 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
795 u8 shift = ffs(mask) - 1, pow, val;
796 loff_t lock_len;
797 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
798 bool use_top;
799
800 status_old = read_sr(nor);
801 if (status_old < 0)
802 return status_old;
803
804 /* If nothing in our range is locked, we don't need to do anything */
805 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
806 return 0;
807
808 /* If anything below us is locked, we can't use 'top' protection */
809 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
810 can_be_top = false;
811
812 /* If anything above us is locked, we can't use 'bottom' protection */
813 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
814 status_old))
815 can_be_bottom = false;
816
817 if (!can_be_bottom && !can_be_top)
818 return -EINVAL;
819
820 /* Prefer top, if both are valid */
821 use_top = can_be_top;
822
823 /* lock_len: length of region that should remain locked */
824 if (use_top)
825 lock_len = mtd->size - (ofs + len);
826 else
827 lock_len = ofs;
828
829 /*
830 * Need largest pow such that:
831 *
832 * 1 / (2^pow) >= (len / size)
833 *
834 * so (assuming power-of-2 size) we do:
835 *
836 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
837 */
838 pow = ilog2(mtd->size) - order_base_2(lock_len);
839 if (lock_len == 0) {
840 val = 0; /* fully unlocked */
841 } else {
842 val = mask - (pow << shift);
843 /* Some power-of-two sizes are not supported */
844 if (val & ~mask)
845 return -EINVAL;
846 }
847
848 status_new = (status_old & ~mask & ~SR_TB) | val;
849
850 /* Don't protect status register if we're fully unlocked */
851 if (lock_len == 0)
852 status_new &= ~SR_SRWD;
853
854 if (!use_top)
855 status_new |= SR_TB;
856
857 /* Don't bother if they're the same */
858 if (status_new == status_old)
859 return 0;
860
861 /* Only modify protection if it will not lock other areas */
862 if ((status_new & mask) > (status_old & mask))
863 return -EINVAL;
864
865 return write_sr_and_check(nor, status_new, mask);
866 }
867
868 /*
869 * Check if a region of the flash is (completely) locked. See stm_lock() for
870 * more info.
871 *
872 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
873 * negative on errors.
874 */
stm_is_locked(struct spi_nor * nor,loff_t ofs,uint64_t len)875 static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
876 {
877 int status;
878
879 status = read_sr(nor);
880 if (status < 0)
881 return status;
882
883 return stm_is_locked_sr(nor, ofs, len, status);
884 }
885 #endif /* CONFIG_SPI_FLASH_STMICRO */
886
spi_nor_read_id(struct spi_nor * nor)887 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
888 {
889 int tmp;
890 u8 id[SPI_NOR_MAX_ID_LEN];
891 const struct flash_info *info;
892
893 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
894 if (tmp < 0) {
895 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
896 return ERR_PTR(tmp);
897 }
898
899 info = spi_nor_ids;
900 for (; info->name; info++) {
901 if (info->id_len) {
902 if (!memcmp(info->id, id, info->id_len))
903 return info;
904 }
905 }
906
907 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
908 id[0], id[1], id[2]);
909 return ERR_PTR(-ENODEV);
910 }
911
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)912 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
913 size_t *retlen, u_char *buf)
914 {
915 struct spi_nor *nor = mtd_to_spi_nor(mtd);
916 int ret;
917
918 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
919
920 while (len) {
921 loff_t addr = from;
922 size_t read_len = len;
923
924 #ifdef CONFIG_SPI_FLASH_BAR
925 u32 remain_len;
926
927 ret = write_bar(nor, addr);
928 if (ret < 0)
929 return log_ret(ret);
930 remain_len = (SZ_16M * (nor->bank_curr + 1)) - addr;
931
932 if (len < remain_len)
933 read_len = len;
934 else
935 read_len = remain_len;
936 #endif
937
938 ret = nor->read(nor, addr, read_len, buf);
939 if (ret == 0) {
940 /* We shouldn't see 0-length reads */
941 ret = -EIO;
942 goto read_err;
943 }
944 if (ret < 0)
945 goto read_err;
946
947 *retlen += ret;
948 buf += ret;
949 from += ret;
950 len -= ret;
951 }
952 ret = 0;
953
954 read_err:
955 #ifdef CONFIG_SPI_FLASH_BAR
956 ret = clean_bar(nor);
957 #endif
958 return ret;
959 }
960
961 #ifdef CONFIG_SPI_FLASH_SST
962 /*
963 * sst26 flash series has its own block protection implementation:
964 * 4x - 8 KByte blocks - read & write protection bits - upper addresses
965 * 1x - 32 KByte blocks - write protection bits
966 * rest - 64 KByte blocks - write protection bits
967 * 1x - 32 KByte blocks - write protection bits
968 * 4x - 8 KByte blocks - read & write protection bits - lower addresses
969 *
970 * We'll support only per 64k lock/unlock so lower and upper 64 KByte region
971 * will be treated as single block.
972 */
973 #define SST26_BPR_8K_NUM 4
974 #define SST26_MAX_BPR_REG_LEN (18 + 1)
975 #define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K)
976
977 enum lock_ctl {
978 SST26_CTL_LOCK,
979 SST26_CTL_UNLOCK,
980 SST26_CTL_CHECK
981 };
982
sst26_process_bpr(u32 bpr_size,u8 * cmd,u32 bit,enum lock_ctl ctl)983 static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl)
984 {
985 switch (ctl) {
986 case SST26_CTL_LOCK:
987 cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8);
988 break;
989 case SST26_CTL_UNLOCK:
990 cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8);
991 break;
992 case SST26_CTL_CHECK:
993 return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8));
994 }
995
996 return false;
997 }
998
999 /*
1000 * Lock, unlock or check lock status of the flash region of the flash (depending
1001 * on the lock_ctl value)
1002 */
sst26_lock_ctl(struct spi_nor * nor,loff_t ofs,uint64_t len,enum lock_ctl ctl)1003 static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl)
1004 {
1005 struct mtd_info *mtd = &nor->mtd;
1006 u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size;
1007 bool lower_64k = false, upper_64k = false;
1008 u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {};
1009 int ret;
1010
1011 /* Check length and offset for 64k alignment */
1012 if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) {
1013 dev_err(nor->dev, "length or offset is not 64KiB allighned\n");
1014 return -EINVAL;
1015 }
1016
1017 if (ofs + len > mtd->size) {
1018 dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n",
1019 ofs, len, mtd->size);
1020 return -EINVAL;
1021 }
1022
1023 /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */
1024 if (mtd->size != SZ_2M &&
1025 mtd->size != SZ_4M &&
1026 mtd->size != SZ_8M)
1027 return -EINVAL;
1028
1029 bpr_size = 2 + (mtd->size / SZ_64K / 8);
1030
1031 ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size);
1032 if (ret < 0) {
1033 dev_err(nor->dev, "fail to read block-protection register\n");
1034 return ret;
1035 }
1036
1037 rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE);
1038 lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE);
1039
1040 upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE));
1041 lower_64k = (ofs < SST26_BOUND_REG_SIZE);
1042
1043 /* Lower bits in block-protection register are about 64k region */
1044 bpr_ptr = lptr_64k / SZ_64K - 1;
1045
1046 /* Process 64K blocks region */
1047 while (lptr_64k < rptr_64k) {
1048 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1049 return EACCES;
1050
1051 bpr_ptr++;
1052 lptr_64k += SZ_64K;
1053 }
1054
1055 /* 32K and 8K region bits in BPR are after 64k region bits */
1056 bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K;
1057
1058 /* Process lower 32K block region */
1059 if (lower_64k)
1060 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1061 return EACCES;
1062
1063 bpr_ptr++;
1064
1065 /* Process upper 32K block region */
1066 if (upper_64k)
1067 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1068 return EACCES;
1069
1070 bpr_ptr++;
1071
1072 /* Process lower 8K block regions */
1073 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1074 if (lower_64k)
1075 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1076 return EACCES;
1077
1078 /* In 8K area BPR has both read and write protection bits */
1079 bpr_ptr += 2;
1080 }
1081
1082 /* Process upper 8K block regions */
1083 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1084 if (upper_64k)
1085 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1086 return EACCES;
1087
1088 /* In 8K area BPR has both read and write protection bits */
1089 bpr_ptr += 2;
1090 }
1091
1092 /* If we check region status we don't need to write BPR back */
1093 if (ctl == SST26_CTL_CHECK)
1094 return 0;
1095
1096 ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size);
1097 if (ret < 0) {
1098 dev_err(nor->dev, "fail to write block-protection register\n");
1099 return ret;
1100 }
1101
1102 return 0;
1103 }
1104
sst26_unlock(struct spi_nor * nor,loff_t ofs,uint64_t len)1105 static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1106 {
1107 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK);
1108 }
1109
sst26_lock(struct spi_nor * nor,loff_t ofs,uint64_t len)1110 static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1111 {
1112 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK);
1113 }
1114
1115 /*
1116 * Returns EACCES (positive value) if region is locked, 0 if region is unlocked,
1117 * and negative on errors.
1118 */
sst26_is_locked(struct spi_nor * nor,loff_t ofs,uint64_t len)1119 static int sst26_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1120 {
1121 /*
1122 * is_locked function is used for check before reading or erasing flash
1123 * region, so offset and length might be not 64k allighned, so adjust
1124 * them to be 64k allighned as sst26_lock_ctl works only with 64k
1125 * allighned regions.
1126 */
1127 ofs -= ofs & (SZ_64K - 1);
1128 len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len;
1129
1130 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK);
1131 }
1132
sst_write_byteprogram(struct spi_nor * nor,loff_t to,size_t len,size_t * retlen,const u_char * buf)1133 static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len,
1134 size_t *retlen, const u_char *buf)
1135 {
1136 size_t actual;
1137 int ret = 0;
1138
1139 for (actual = 0; actual < len; actual++) {
1140 nor->program_opcode = SPINOR_OP_BP;
1141
1142 write_enable(nor);
1143 /* write one byte. */
1144 ret = nor->write(nor, to, 1, buf + actual);
1145 if (ret < 0)
1146 goto sst_write_err;
1147 ret = spi_nor_wait_till_ready(nor);
1148 if (ret)
1149 goto sst_write_err;
1150 to++;
1151 }
1152
1153 sst_write_err:
1154 write_disable(nor);
1155 return ret;
1156 }
1157
sst_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1158 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
1159 size_t *retlen, const u_char *buf)
1160 {
1161 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1162 struct spi_slave *spi = nor->spi;
1163 size_t actual;
1164 int ret;
1165
1166 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1167 if (spi->mode & SPI_TX_BYTE)
1168 return sst_write_byteprogram(nor, to, len, retlen, buf);
1169
1170 write_enable(nor);
1171
1172 nor->sst_write_second = false;
1173
1174 actual = to % 2;
1175 /* Start write from odd address. */
1176 if (actual) {
1177 nor->program_opcode = SPINOR_OP_BP;
1178
1179 /* write one byte. */
1180 ret = nor->write(nor, to, 1, buf);
1181 if (ret < 0)
1182 goto sst_write_err;
1183 ret = spi_nor_wait_till_ready(nor);
1184 if (ret)
1185 goto sst_write_err;
1186 }
1187 to += actual;
1188
1189 /* Write out most of the data here. */
1190 for (; actual < len - 1; actual += 2) {
1191 nor->program_opcode = SPINOR_OP_AAI_WP;
1192
1193 /* write two bytes. */
1194 ret = nor->write(nor, to, 2, buf + actual);
1195 if (ret < 0)
1196 goto sst_write_err;
1197 ret = spi_nor_wait_till_ready(nor);
1198 if (ret)
1199 goto sst_write_err;
1200 to += 2;
1201 nor->sst_write_second = true;
1202 }
1203 nor->sst_write_second = false;
1204
1205 write_disable(nor);
1206 ret = spi_nor_wait_till_ready(nor);
1207 if (ret)
1208 goto sst_write_err;
1209
1210 /* Write out trailing byte if it exists. */
1211 if (actual != len) {
1212 write_enable(nor);
1213
1214 nor->program_opcode = SPINOR_OP_BP;
1215 ret = nor->write(nor, to, 1, buf + actual);
1216 if (ret < 0)
1217 goto sst_write_err;
1218 ret = spi_nor_wait_till_ready(nor);
1219 if (ret)
1220 goto sst_write_err;
1221 write_disable(nor);
1222 actual += 1;
1223 }
1224 sst_write_err:
1225 *retlen += actual;
1226 return ret;
1227 }
1228 #endif
1229 /*
1230 * Write an address range to the nor chip. Data must be written in
1231 * FLASH_PAGESIZE chunks. The address range may be any size provided
1232 * it is within the physical boundaries.
1233 */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1234 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1235 size_t *retlen, const u_char *buf)
1236 {
1237 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1238 size_t page_offset, page_remain, i;
1239 ssize_t ret;
1240
1241 #ifdef CONFIG_SPI_FLASH_SST
1242 /* sst nor chips use AAI word program */
1243 if (nor->info->flags & SST_WRITE)
1244 return sst_write(mtd, to, len, retlen, buf);
1245 #endif
1246
1247 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1248
1249 if (!len)
1250 return 0;
1251
1252 for (i = 0; i < len; ) {
1253 ssize_t written;
1254 loff_t addr = to + i;
1255 WATCHDOG_RESET();
1256
1257 /*
1258 * If page_size is a power of two, the offset can be quickly
1259 * calculated with an AND operation. On the other cases we
1260 * need to do a modulus operation (more expensive).
1261 */
1262 if (is_power_of_2(nor->page_size)) {
1263 page_offset = addr & (nor->page_size - 1);
1264 } else {
1265 u64 aux = addr;
1266
1267 page_offset = do_div(aux, nor->page_size);
1268 }
1269 /* the size of data remaining on the first page */
1270 page_remain = min_t(size_t,
1271 nor->page_size - page_offset, len - i);
1272
1273 #ifdef CONFIG_SPI_FLASH_BAR
1274 ret = write_bar(nor, addr);
1275 if (ret < 0)
1276 return ret;
1277 #endif
1278 write_enable(nor);
1279 ret = nor->write(nor, addr, page_remain, buf + i);
1280 if (ret < 0)
1281 goto write_err;
1282 written = ret;
1283
1284 ret = spi_nor_wait_till_ready(nor);
1285 if (ret)
1286 goto write_err;
1287 *retlen += written;
1288 i += written;
1289 }
1290
1291 write_err:
1292 #ifdef CONFIG_SPI_FLASH_BAR
1293 ret = clean_bar(nor);
1294 #endif
1295 return ret;
1296 }
1297
1298 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
1299 /**
1300 * macronix_quad_enable() - set QE bit in Status Register.
1301 * @nor: pointer to a 'struct spi_nor'
1302 *
1303 * Set the Quad Enable (QE) bit in the Status Register.
1304 *
1305 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1306 *
1307 * Return: 0 on success, -errno otherwise.
1308 */
macronix_quad_enable(struct spi_nor * nor)1309 static int macronix_quad_enable(struct spi_nor *nor)
1310 {
1311 int ret, val;
1312
1313 val = read_sr(nor);
1314 if (val < 0)
1315 return val;
1316 if (val & SR_QUAD_EN_MX)
1317 return 0;
1318
1319 write_enable(nor);
1320
1321 write_sr(nor, val | SR_QUAD_EN_MX);
1322
1323 ret = spi_nor_wait_till_ready(nor);
1324 if (ret)
1325 return ret;
1326
1327 ret = read_sr(nor);
1328 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1329 dev_err(nor->dev, "Macronix Quad bit not set\n");
1330 return -EINVAL;
1331 }
1332
1333 return 0;
1334 }
1335 #endif
1336
1337 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
1338 /*
1339 * Write status Register and configuration register with 2 bytes
1340 * The first byte will be written to the status register, while the
1341 * second byte will be written to the configuration register.
1342 * Return negative if error occurred.
1343 */
write_sr_cr(struct spi_nor * nor,u8 * sr_cr)1344 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1345 {
1346 int ret;
1347
1348 write_enable(nor);
1349
1350 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1351 if (ret < 0) {
1352 dev_dbg(nor->dev,
1353 "error while writing configuration register\n");
1354 return -EINVAL;
1355 }
1356
1357 ret = spi_nor_wait_till_ready(nor);
1358 if (ret) {
1359 dev_dbg(nor->dev,
1360 "timeout while writing configuration register\n");
1361 return ret;
1362 }
1363
1364 return 0;
1365 }
1366
1367 /**
1368 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1369 * @nor: pointer to a 'struct spi_nor'
1370 *
1371 * Set the Quad Enable (QE) bit in the Configuration Register.
1372 * This function should be used with QSPI memories supporting the Read
1373 * Configuration Register (35h) instruction.
1374 *
1375 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1376 * memories.
1377 *
1378 * Return: 0 on success, -errno otherwise.
1379 */
spansion_read_cr_quad_enable(struct spi_nor * nor)1380 static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1381 {
1382 u8 sr_cr[2];
1383 int ret;
1384
1385 /* Check current Quad Enable bit value. */
1386 ret = read_cr(nor);
1387 if (ret < 0) {
1388 dev_dbg(nor->dev,
1389 "error while reading configuration register\n");
1390 return -EINVAL;
1391 }
1392
1393 if (ret & CR_QUAD_EN_SPAN)
1394 return 0;
1395
1396 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1397
1398 /* Keep the current value of the Status Register. */
1399 ret = read_sr(nor);
1400 if (ret < 0) {
1401 dev_dbg(nor->dev, "error while reading status register\n");
1402 return -EINVAL;
1403 }
1404 sr_cr[0] = ret;
1405
1406 ret = write_sr_cr(nor, sr_cr);
1407 if (ret)
1408 return ret;
1409
1410 /* Read back and check it. */
1411 ret = read_cr(nor);
1412 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1413 dev_dbg(nor->dev, "Spansion Quad bit not set\n");
1414 return -EINVAL;
1415 }
1416
1417 return 0;
1418 }
1419
1420 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
1421 /**
1422 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1423 * @nor: pointer to a 'struct spi_nor'
1424 *
1425 * Set the Quad Enable (QE) bit in the Configuration Register.
1426 * This function should be used with QSPI memories not supporting the Read
1427 * Configuration Register (35h) instruction.
1428 *
1429 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1430 * memories.
1431 *
1432 * Return: 0 on success, -errno otherwise.
1433 */
spansion_no_read_cr_quad_enable(struct spi_nor * nor)1434 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1435 {
1436 u8 sr_cr[2];
1437 int ret;
1438
1439 /* Keep the current value of the Status Register. */
1440 ret = read_sr(nor);
1441 if (ret < 0) {
1442 dev_dbg(nor->dev, "error while reading status register\n");
1443 return -EINVAL;
1444 }
1445 sr_cr[0] = ret;
1446 sr_cr[1] = CR_QUAD_EN_SPAN;
1447
1448 return write_sr_cr(nor, sr_cr);
1449 }
1450
1451 #endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
1452 #endif /* CONFIG_SPI_FLASH_SPANSION */
1453
1454 struct spi_nor_read_command {
1455 u8 num_mode_clocks;
1456 u8 num_wait_states;
1457 u8 opcode;
1458 enum spi_nor_protocol proto;
1459 };
1460
1461 struct spi_nor_pp_command {
1462 u8 opcode;
1463 enum spi_nor_protocol proto;
1464 };
1465
1466 enum spi_nor_read_command_index {
1467 SNOR_CMD_READ,
1468 SNOR_CMD_READ_FAST,
1469 SNOR_CMD_READ_1_1_1_DTR,
1470
1471 /* Dual SPI */
1472 SNOR_CMD_READ_1_1_2,
1473 SNOR_CMD_READ_1_2_2,
1474 SNOR_CMD_READ_2_2_2,
1475 SNOR_CMD_READ_1_2_2_DTR,
1476
1477 /* Quad SPI */
1478 SNOR_CMD_READ_1_1_4,
1479 SNOR_CMD_READ_1_4_4,
1480 SNOR_CMD_READ_4_4_4,
1481 SNOR_CMD_READ_1_4_4_DTR,
1482
1483 /* Octo SPI */
1484 SNOR_CMD_READ_1_1_8,
1485 SNOR_CMD_READ_1_8_8,
1486 SNOR_CMD_READ_8_8_8,
1487 SNOR_CMD_READ_1_8_8_DTR,
1488
1489 SNOR_CMD_READ_MAX
1490 };
1491
1492 enum spi_nor_pp_command_index {
1493 SNOR_CMD_PP,
1494
1495 /* Quad SPI */
1496 SNOR_CMD_PP_1_1_4,
1497 SNOR_CMD_PP_1_4_4,
1498 SNOR_CMD_PP_4_4_4,
1499
1500 /* Octo SPI */
1501 SNOR_CMD_PP_1_1_8,
1502 SNOR_CMD_PP_1_8_8,
1503 SNOR_CMD_PP_8_8_8,
1504
1505 SNOR_CMD_PP_MAX
1506 };
1507
1508 struct spi_nor_flash_parameter {
1509 u64 size;
1510 u32 page_size;
1511
1512 struct spi_nor_hwcaps hwcaps;
1513 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
1514 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
1515
1516 int (*quad_enable)(struct spi_nor *nor);
1517 };
1518
1519 static void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)1520 spi_nor_set_read_settings(struct spi_nor_read_command *read,
1521 u8 num_mode_clocks,
1522 u8 num_wait_states,
1523 u8 opcode,
1524 enum spi_nor_protocol proto)
1525 {
1526 read->num_mode_clocks = num_mode_clocks;
1527 read->num_wait_states = num_wait_states;
1528 read->opcode = opcode;
1529 read->proto = proto;
1530 }
1531
1532 static void
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)1533 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
1534 u8 opcode,
1535 enum spi_nor_protocol proto)
1536 {
1537 pp->opcode = opcode;
1538 pp->proto = proto;
1539 }
1540
1541 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
1542 /*
1543 * Serial Flash Discoverable Parameters (SFDP) parsing.
1544 */
1545
1546 /**
1547 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
1548 * @nor: pointer to a 'struct spi_nor'
1549 * @addr: offset in the SFDP area to start reading data from
1550 * @len: number of bytes to read
1551 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
1552 *
1553 * Whatever the actual numbers of bytes for address and dummy cycles are
1554 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
1555 * followed by a 3-byte address and 8 dummy clock cycles.
1556 *
1557 * Return: 0 on success, -errno otherwise.
1558 */
spi_nor_read_sfdp(struct spi_nor * nor,u32 addr,size_t len,void * buf)1559 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
1560 size_t len, void *buf)
1561 {
1562 u8 addr_width, read_opcode, read_dummy;
1563 int ret;
1564
1565 read_opcode = nor->read_opcode;
1566 addr_width = nor->addr_width;
1567 read_dummy = nor->read_dummy;
1568
1569 nor->read_opcode = SPINOR_OP_RDSFDP;
1570 nor->addr_width = 3;
1571 nor->read_dummy = 8;
1572
1573 while (len) {
1574 ret = nor->read(nor, addr, len, (u8 *)buf);
1575 if (!ret || ret > len) {
1576 ret = -EIO;
1577 goto read_err;
1578 }
1579 if (ret < 0)
1580 goto read_err;
1581
1582 buf += ret;
1583 addr += ret;
1584 len -= ret;
1585 }
1586 ret = 0;
1587
1588 read_err:
1589 nor->read_opcode = read_opcode;
1590 nor->addr_width = addr_width;
1591 nor->read_dummy = read_dummy;
1592
1593 return ret;
1594 }
1595
1596 struct sfdp_parameter_header {
1597 u8 id_lsb;
1598 u8 minor;
1599 u8 major;
1600 u8 length; /* in double words */
1601 u8 parameter_table_pointer[3]; /* byte address */
1602 u8 id_msb;
1603 };
1604
1605 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
1606 #define SFDP_PARAM_HEADER_PTP(p) \
1607 (((p)->parameter_table_pointer[2] << 16) | \
1608 ((p)->parameter_table_pointer[1] << 8) | \
1609 ((p)->parameter_table_pointer[0] << 0))
1610
1611 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
1612 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
1613 #define SFDP_SST_ID 0x01bf /* Manufacturer specific Table */
1614
1615 #define SFDP_SIGNATURE 0x50444653U
1616 #define SFDP_JESD216_MAJOR 1
1617 #define SFDP_JESD216_MINOR 0
1618 #define SFDP_JESD216A_MINOR 5
1619 #define SFDP_JESD216B_MINOR 6
1620
1621 struct sfdp_header {
1622 u32 signature; /* Ox50444653U <=> "SFDP" */
1623 u8 minor;
1624 u8 major;
1625 u8 nph; /* 0-base number of parameter headers */
1626 u8 unused;
1627
1628 /* Basic Flash Parameter Table. */
1629 struct sfdp_parameter_header bfpt_header;
1630 };
1631
1632 /* Basic Flash Parameter Table */
1633
1634 /*
1635 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
1636 * They are indexed from 1 but C arrays are indexed from 0.
1637 */
1638 #define BFPT_DWORD(i) ((i) - 1)
1639 #define BFPT_DWORD_MAX 16
1640
1641 /* The first version of JESB216 defined only 9 DWORDs. */
1642 #define BFPT_DWORD_MAX_JESD216 9
1643
1644 /* 1st DWORD. */
1645 #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
1646 #define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
1647 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
1648 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
1649 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
1650 #define BFPT_DWORD1_DTR BIT(19)
1651 #define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
1652 #define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
1653 #define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
1654
1655 /* 5th DWORD. */
1656 #define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
1657 #define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
1658
1659 /* 11th DWORD. */
1660 #define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
1661 #define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
1662
1663 /* 15th DWORD. */
1664
1665 /*
1666 * (from JESD216 rev B)
1667 * Quad Enable Requirements (QER):
1668 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
1669 * reads based on instruction. DQ3/HOLD# functions are hold during
1670 * instruction phase.
1671 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
1672 * two data bytes where bit 1 of the second byte is one.
1673 * [...]
1674 * Writing only one byte to the status register has the side-effect of
1675 * clearing status register 2, including the QE bit. The 100b code is
1676 * used if writing one byte to the status register does not modify
1677 * status register 2.
1678 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
1679 * one data byte where bit 6 is one.
1680 * [...]
1681 * - 011b: QE is bit 7 of status register 2. It is set via Write status
1682 * register 2 instruction 3Eh with one data byte where bit 7 is one.
1683 * [...]
1684 * The status register 2 is read using instruction 3Fh.
1685 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
1686 * two data bytes where bit 1 of the second byte is one.
1687 * [...]
1688 * In contrast to the 001b code, writing one byte to the status
1689 * register does not modify status register 2.
1690 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
1691 * Read Status instruction 05h. Status register2 is read using
1692 * instruction 35h. QE is set via Writ Status instruction 01h with
1693 * two data bytes where bit 1 of the second byte is one.
1694 * [...]
1695 */
1696 #define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
1697 #define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
1698 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
1699 #define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
1700 #define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
1701 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
1702 #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
1703
1704 struct sfdp_bfpt {
1705 u32 dwords[BFPT_DWORD_MAX];
1706 };
1707
1708 /* Fast Read settings. */
1709
1710 static void
spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command * read,u16 half,enum spi_nor_protocol proto)1711 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
1712 u16 half,
1713 enum spi_nor_protocol proto)
1714 {
1715 read->num_mode_clocks = (half >> 5) & 0x07;
1716 read->num_wait_states = (half >> 0) & 0x1f;
1717 read->opcode = (half >> 8) & 0xff;
1718 read->proto = proto;
1719 }
1720
1721 struct sfdp_bfpt_read {
1722 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
1723 u32 hwcaps;
1724
1725 /*
1726 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
1727 * whether the Fast Read x-y-z command is supported.
1728 */
1729 u32 supported_dword;
1730 u32 supported_bit;
1731
1732 /*
1733 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
1734 * encodes the op code, the number of mode clocks and the number of wait
1735 * states to be used by Fast Read x-y-z command.
1736 */
1737 u32 settings_dword;
1738 u32 settings_shift;
1739
1740 /* The SPI protocol for this Fast Read x-y-z command. */
1741 enum spi_nor_protocol proto;
1742 };
1743
1744 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
1745 /* Fast Read 1-1-2 */
1746 {
1747 SNOR_HWCAPS_READ_1_1_2,
1748 BFPT_DWORD(1), BIT(16), /* Supported bit */
1749 BFPT_DWORD(4), 0, /* Settings */
1750 SNOR_PROTO_1_1_2,
1751 },
1752
1753 /* Fast Read 1-2-2 */
1754 {
1755 SNOR_HWCAPS_READ_1_2_2,
1756 BFPT_DWORD(1), BIT(20), /* Supported bit */
1757 BFPT_DWORD(4), 16, /* Settings */
1758 SNOR_PROTO_1_2_2,
1759 },
1760
1761 /* Fast Read 2-2-2 */
1762 {
1763 SNOR_HWCAPS_READ_2_2_2,
1764 BFPT_DWORD(5), BIT(0), /* Supported bit */
1765 BFPT_DWORD(6), 16, /* Settings */
1766 SNOR_PROTO_2_2_2,
1767 },
1768
1769 /* Fast Read 1-1-4 */
1770 {
1771 SNOR_HWCAPS_READ_1_1_4,
1772 BFPT_DWORD(1), BIT(22), /* Supported bit */
1773 BFPT_DWORD(3), 16, /* Settings */
1774 SNOR_PROTO_1_1_4,
1775 },
1776
1777 /* Fast Read 1-4-4 */
1778 {
1779 SNOR_HWCAPS_READ_1_4_4,
1780 BFPT_DWORD(1), BIT(21), /* Supported bit */
1781 BFPT_DWORD(3), 0, /* Settings */
1782 SNOR_PROTO_1_4_4,
1783 },
1784
1785 /* Fast Read 4-4-4 */
1786 {
1787 SNOR_HWCAPS_READ_4_4_4,
1788 BFPT_DWORD(5), BIT(4), /* Supported bit */
1789 BFPT_DWORD(7), 16, /* Settings */
1790 SNOR_PROTO_4_4_4,
1791 },
1792 };
1793
1794 struct sfdp_bfpt_erase {
1795 /*
1796 * The half-word at offset <shift> in DWORD <dwoard> encodes the
1797 * op code and erase sector size to be used by Sector Erase commands.
1798 */
1799 u32 dword;
1800 u32 shift;
1801 };
1802
1803 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
1804 /* Erase Type 1 in DWORD8 bits[15:0] */
1805 {BFPT_DWORD(8), 0},
1806
1807 /* Erase Type 2 in DWORD8 bits[31:16] */
1808 {BFPT_DWORD(8), 16},
1809
1810 /* Erase Type 3 in DWORD9 bits[15:0] */
1811 {BFPT_DWORD(9), 0},
1812
1813 /* Erase Type 4 in DWORD9 bits[31:16] */
1814 {BFPT_DWORD(9), 16},
1815 };
1816
1817 static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
1818
1819 /**
1820 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
1821 * @nor: pointer to a 'struct spi_nor'
1822 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
1823 * the Basic Flash Parameter Table length and version
1824 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
1825 * filled
1826 *
1827 * The Basic Flash Parameter Table is the main and only mandatory table as
1828 * defined by the SFDP (JESD216) specification.
1829 * It provides us with the total size (memory density) of the data array and
1830 * the number of address bytes for Fast Read, Page Program and Sector Erase
1831 * commands.
1832 * For Fast READ commands, it also gives the number of mode clock cycles and
1833 * wait states (regrouped in the number of dummy clock cycles) for each
1834 * supported instruction op code.
1835 * For Page Program, the page size is now available since JESD216 rev A, however
1836 * the supported instruction op codes are still not provided.
1837 * For Sector Erase commands, this table stores the supported instruction op
1838 * codes and the associated sector sizes.
1839 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
1840 * rev A. The QER bits encode the manufacturer dependent procedure to be
1841 * executed to set the Quad Enable (QE) bit in some internal register of the
1842 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
1843 * sending any Quad SPI command to the memory. Actually, setting the QE bit
1844 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
1845 * and IO3 hence enabling 4 (Quad) I/O lines.
1846 *
1847 * Return: 0 on success, -errno otherwise.
1848 */
spi_nor_parse_bfpt(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,struct spi_nor_flash_parameter * params)1849 static int spi_nor_parse_bfpt(struct spi_nor *nor,
1850 const struct sfdp_parameter_header *bfpt_header,
1851 struct spi_nor_flash_parameter *params)
1852 {
1853 struct mtd_info *mtd = &nor->mtd;
1854 struct sfdp_bfpt bfpt;
1855 size_t len;
1856 int i, cmd, err;
1857 u32 addr;
1858 u16 half;
1859
1860 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
1861 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
1862 return -EINVAL;
1863
1864 /* Read the Basic Flash Parameter Table. */
1865 len = min_t(size_t, sizeof(bfpt),
1866 bfpt_header->length * sizeof(u32));
1867 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
1868 memset(&bfpt, 0, sizeof(bfpt));
1869 err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
1870 if (err < 0)
1871 return err;
1872
1873 /* Fix endianness of the BFPT DWORDs. */
1874 for (i = 0; i < BFPT_DWORD_MAX; i++)
1875 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
1876
1877 /* Number of address bytes. */
1878 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
1879 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
1880 nor->addr_width = 3;
1881 break;
1882
1883 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
1884 nor->addr_width = 4;
1885 break;
1886
1887 default:
1888 break;
1889 }
1890
1891 /* Flash Memory Density (in bits). */
1892 params->size = bfpt.dwords[BFPT_DWORD(2)];
1893 if (params->size & BIT(31)) {
1894 params->size &= ~BIT(31);
1895
1896 /*
1897 * Prevent overflows on params->size. Anyway, a NOR of 2^64
1898 * bits is unlikely to exist so this error probably means
1899 * the BFPT we are reading is corrupted/wrong.
1900 */
1901 if (params->size > 63)
1902 return -EINVAL;
1903
1904 params->size = 1ULL << params->size;
1905 } else {
1906 params->size++;
1907 }
1908 params->size >>= 3; /* Convert to bytes. */
1909
1910 /* Fast Read settings. */
1911 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
1912 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
1913 struct spi_nor_read_command *read;
1914
1915 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
1916 params->hwcaps.mask &= ~rd->hwcaps;
1917 continue;
1918 }
1919
1920 params->hwcaps.mask |= rd->hwcaps;
1921 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
1922 read = ¶ms->reads[cmd];
1923 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
1924 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
1925 }
1926
1927 /* Sector Erase settings. */
1928 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
1929 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
1930 u32 erasesize;
1931 u8 opcode;
1932
1933 half = bfpt.dwords[er->dword] >> er->shift;
1934 erasesize = half & 0xff;
1935
1936 /* erasesize == 0 means this Erase Type is not supported. */
1937 if (!erasesize)
1938 continue;
1939
1940 erasesize = 1U << erasesize;
1941 opcode = (half >> 8) & 0xff;
1942 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
1943 if (erasesize == SZ_4K) {
1944 nor->erase_opcode = opcode;
1945 mtd->erasesize = erasesize;
1946 break;
1947 }
1948 #endif
1949 if (!mtd->erasesize || mtd->erasesize < erasesize) {
1950 nor->erase_opcode = opcode;
1951 mtd->erasesize = erasesize;
1952 }
1953 }
1954
1955 /* Stop here if not JESD216 rev A or later. */
1956 if (bfpt_header->length < BFPT_DWORD_MAX)
1957 return 0;
1958
1959 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
1960 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
1961 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
1962 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
1963 params->page_size = 1U << params->page_size;
1964
1965 /* Quad Enable Requirements. */
1966 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
1967 case BFPT_DWORD15_QER_NONE:
1968 params->quad_enable = NULL;
1969 break;
1970 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
1971 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
1972 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
1973 params->quad_enable = spansion_no_read_cr_quad_enable;
1974 break;
1975 #endif
1976 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
1977 case BFPT_DWORD15_QER_SR1_BIT6:
1978 params->quad_enable = macronix_quad_enable;
1979 break;
1980 #endif
1981 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
1982 case BFPT_DWORD15_QER_SR2_BIT1:
1983 params->quad_enable = spansion_read_cr_quad_enable;
1984 break;
1985 #endif
1986 default:
1987 return -EINVAL;
1988 }
1989
1990 return 0;
1991 }
1992
1993 /**
1994 * spi_nor_parse_microchip_sfdp() - parse the Microchip manufacturer specific
1995 * SFDP table.
1996 * @nor: pointer to a 'struct spi_nor'.
1997 * @param_header: pointer to the SFDP parameter header.
1998 *
1999 * Return: 0 on success, -errno otherwise.
2000 */
2001 static int
spi_nor_parse_microchip_sfdp(struct spi_nor * nor,const struct sfdp_parameter_header * param_header)2002 spi_nor_parse_microchip_sfdp(struct spi_nor *nor,
2003 const struct sfdp_parameter_header *param_header)
2004 {
2005 size_t size;
2006 u32 addr;
2007 int ret;
2008
2009 size = param_header->length * sizeof(u32);
2010 addr = SFDP_PARAM_HEADER_PTP(param_header);
2011
2012 nor->manufacturer_sfdp = devm_kmalloc(nor->dev, size, GFP_KERNEL);
2013 if (!nor->manufacturer_sfdp)
2014 return -ENOMEM;
2015
2016 ret = spi_nor_read_sfdp(nor, addr, size, nor->manufacturer_sfdp);
2017
2018 return ret;
2019 }
2020
2021 /**
2022 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
2023 * @nor: pointer to a 'struct spi_nor'
2024 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
2025 * filled
2026 *
2027 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
2028 * specification. This is a standard which tends to supported by almost all
2029 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
2030 * runtime the main parameters needed to perform basic SPI flash operations such
2031 * as Fast Read, Page Program or Sector Erase commands.
2032 *
2033 * Return: 0 on success, -errno otherwise.
2034 */
spi_nor_parse_sfdp(struct spi_nor * nor,struct spi_nor_flash_parameter * params)2035 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2036 struct spi_nor_flash_parameter *params)
2037 {
2038 const struct sfdp_parameter_header *param_header, *bfpt_header;
2039 struct sfdp_parameter_header *param_headers = NULL;
2040 struct sfdp_header header;
2041 size_t psize;
2042 int i, err;
2043
2044 /* Get the SFDP header. */
2045 err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
2046 if (err < 0)
2047 return err;
2048
2049 /* Check the SFDP header version. */
2050 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
2051 header.major != SFDP_JESD216_MAJOR)
2052 return -EINVAL;
2053
2054 /*
2055 * Verify that the first and only mandatory parameter header is a
2056 * Basic Flash Parameter Table header as specified in JESD216.
2057 */
2058 bfpt_header = &header.bfpt_header;
2059 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
2060 bfpt_header->major != SFDP_JESD216_MAJOR)
2061 return -EINVAL;
2062
2063 /*
2064 * Allocate memory then read all parameter headers with a single
2065 * Read SFDP command. These parameter headers will actually be parsed
2066 * twice: a first time to get the latest revision of the basic flash
2067 * parameter table, then a second time to handle the supported optional
2068 * tables.
2069 * Hence we read the parameter headers once for all to reduce the
2070 * processing time. Also we use kmalloc() instead of devm_kmalloc()
2071 * because we don't need to keep these parameter headers: the allocated
2072 * memory is always released with kfree() before exiting this function.
2073 */
2074 if (header.nph) {
2075 psize = header.nph * sizeof(*param_headers);
2076
2077 param_headers = kmalloc(psize, GFP_KERNEL);
2078 if (!param_headers)
2079 return -ENOMEM;
2080
2081 err = spi_nor_read_sfdp(nor, sizeof(header),
2082 psize, param_headers);
2083 if (err < 0) {
2084 dev_err(nor->dev,
2085 "failed to read SFDP parameter headers\n");
2086 goto exit;
2087 }
2088 }
2089
2090 /*
2091 * Check other parameter headers to get the latest revision of
2092 * the basic flash parameter table.
2093 */
2094 for (i = 0; i < header.nph; i++) {
2095 param_header = ¶m_headers[i];
2096
2097 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
2098 param_header->major == SFDP_JESD216_MAJOR &&
2099 (param_header->minor > bfpt_header->minor ||
2100 (param_header->minor == bfpt_header->minor &&
2101 param_header->length > bfpt_header->length)))
2102 bfpt_header = param_header;
2103 }
2104
2105 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
2106 if (err)
2107 goto exit;
2108
2109 /* Parse other parameter headers. */
2110 for (i = 0; i < header.nph; i++) {
2111 param_header = ¶m_headers[i];
2112
2113 switch (SFDP_PARAM_HEADER_ID(param_header)) {
2114 case SFDP_SECTOR_MAP_ID:
2115 dev_info(nor->dev,
2116 "non-uniform erase sector maps are not supported yet.\n");
2117 break;
2118
2119 case SFDP_SST_ID:
2120 err = spi_nor_parse_microchip_sfdp(nor, param_header);
2121 break;
2122
2123 default:
2124 break;
2125 }
2126
2127 if (err) {
2128 dev_warn(nor->dev,
2129 "Failed to parse optional parameter table: %04x\n",
2130 SFDP_PARAM_HEADER_ID(param_header));
2131 /*
2132 * Let's not drop all information we extracted so far
2133 * if optional table parsers fail. In case of failing,
2134 * each optional parser is responsible to roll back to
2135 * the previously known spi_nor data.
2136 */
2137 err = 0;
2138 }
2139 }
2140
2141 exit:
2142 kfree(param_headers);
2143 return err;
2144 }
2145 #else
spi_nor_parse_sfdp(struct spi_nor * nor,struct spi_nor_flash_parameter * params)2146 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2147 struct spi_nor_flash_parameter *params)
2148 {
2149 return -EINVAL;
2150 }
2151 #endif /* SPI_FLASH_SFDP_SUPPORT */
2152
spi_nor_init_params(struct spi_nor * nor,const struct flash_info * info,struct spi_nor_flash_parameter * params)2153 static int spi_nor_init_params(struct spi_nor *nor,
2154 const struct flash_info *info,
2155 struct spi_nor_flash_parameter *params)
2156 {
2157 /* Set legacy flash parameters as default. */
2158 memset(params, 0, sizeof(*params));
2159
2160 /* Set SPI NOR sizes. */
2161 params->size = info->sector_size * info->n_sectors;
2162 params->page_size = info->page_size;
2163
2164 /* (Fast) Read settings. */
2165 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2166 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2167 0, 0, SPINOR_OP_READ,
2168 SNOR_PROTO_1_1_1);
2169
2170 if (!(info->flags & SPI_NOR_NO_FR)) {
2171 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2172 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2173 0, 8, SPINOR_OP_READ_FAST,
2174 SNOR_PROTO_1_1_1);
2175 }
2176
2177 if (info->flags & SPI_NOR_DUAL_READ) {
2178 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2179 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2180 0, 8, SPINOR_OP_READ_1_1_2,
2181 SNOR_PROTO_1_1_2);
2182 }
2183
2184 if (info->flags & SPI_NOR_QUAD_READ) {
2185 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2186 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2187 0, 8, SPINOR_OP_READ_1_1_4,
2188 SNOR_PROTO_1_1_4);
2189 }
2190
2191 if (info->flags & SPI_NOR_OCTAL_READ) {
2192 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2193 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2194 0, 8, SPINOR_OP_READ_1_1_8,
2195 SNOR_PROTO_1_1_8);
2196 }
2197
2198 /* Page Program settings. */
2199 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2200 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2201 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2202
2203 if (info->flags & SPI_NOR_QUAD_READ) {
2204 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2205 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
2206 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2207 }
2208
2209 /* Select the procedure to set the Quad Enable bit. */
2210 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
2211 SNOR_HWCAPS_PP_QUAD)) {
2212 switch (JEDEC_MFR(info)) {
2213 #if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
2214 case SNOR_MFR_MACRONIX:
2215 case SNOR_MFR_ISSI:
2216 params->quad_enable = macronix_quad_enable;
2217 break;
2218 #endif
2219 case SNOR_MFR_ST:
2220 case SNOR_MFR_MICRON:
2221 break;
2222
2223 default:
2224 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2225 /* Kept only for backward compatibility purpose. */
2226 params->quad_enable = spansion_read_cr_quad_enable;
2227 #endif
2228 break;
2229 }
2230 }
2231
2232 /* Override the parameters with data read from SFDP tables. */
2233 nor->addr_width = 0;
2234 nor->mtd.erasesize = 0;
2235 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
2236 !(info->flags & SPI_NOR_SKIP_SFDP)) {
2237 struct spi_nor_flash_parameter sfdp_params;
2238
2239 memcpy(&sfdp_params, params, sizeof(sfdp_params));
2240 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
2241 nor->addr_width = 0;
2242 nor->mtd.erasesize = 0;
2243 } else {
2244 memcpy(params, &sfdp_params, sizeof(*params));
2245 }
2246 }
2247
2248 return 0;
2249 }
2250
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)2251 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2252 {
2253 size_t i;
2254
2255 for (i = 0; i < size; i++)
2256 if (table[i][0] == (int)hwcaps)
2257 return table[i][1];
2258
2259 return -EINVAL;
2260 }
2261
spi_nor_hwcaps_read2cmd(u32 hwcaps)2262 static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2263 {
2264 static const int hwcaps_read2cmd[][2] = {
2265 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2266 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2267 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2268 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2269 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2270 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2271 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2272 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2273 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2274 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2275 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2276 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2277 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2278 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2279 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2280 };
2281
2282 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2283 ARRAY_SIZE(hwcaps_read2cmd));
2284 }
2285
spi_nor_hwcaps_pp2cmd(u32 hwcaps)2286 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2287 {
2288 static const int hwcaps_pp2cmd[][2] = {
2289 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2290 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2291 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2292 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2293 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2294 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2295 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2296 };
2297
2298 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2299 ARRAY_SIZE(hwcaps_pp2cmd));
2300 }
2301
spi_nor_select_read(struct spi_nor * nor,const struct spi_nor_flash_parameter * params,u32 shared_hwcaps)2302 static int spi_nor_select_read(struct spi_nor *nor,
2303 const struct spi_nor_flash_parameter *params,
2304 u32 shared_hwcaps)
2305 {
2306 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2307 const struct spi_nor_read_command *read;
2308
2309 if (best_match < 0)
2310 return -EINVAL;
2311
2312 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2313 if (cmd < 0)
2314 return -EINVAL;
2315
2316 read = ¶ms->reads[cmd];
2317 nor->read_opcode = read->opcode;
2318 nor->read_proto = read->proto;
2319
2320 /*
2321 * In the spi-nor framework, we don't need to make the difference
2322 * between mode clock cycles and wait state clock cycles.
2323 * Indeed, the value of the mode clock cycles is used by a QSPI
2324 * flash memory to know whether it should enter or leave its 0-4-4
2325 * (Continuous Read / XIP) mode.
2326 * eXecution In Place is out of the scope of the mtd sub-system.
2327 * Hence we choose to merge both mode and wait state clock cycles
2328 * into the so called dummy clock cycles.
2329 */
2330 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2331 return 0;
2332 }
2333
spi_nor_select_pp(struct spi_nor * nor,const struct spi_nor_flash_parameter * params,u32 shared_hwcaps)2334 static int spi_nor_select_pp(struct spi_nor *nor,
2335 const struct spi_nor_flash_parameter *params,
2336 u32 shared_hwcaps)
2337 {
2338 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2339 const struct spi_nor_pp_command *pp;
2340
2341 if (best_match < 0)
2342 return -EINVAL;
2343
2344 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2345 if (cmd < 0)
2346 return -EINVAL;
2347
2348 pp = ¶ms->page_programs[cmd];
2349 nor->program_opcode = pp->opcode;
2350 nor->write_proto = pp->proto;
2351 return 0;
2352 }
2353
spi_nor_select_erase(struct spi_nor * nor,const struct flash_info * info)2354 static int spi_nor_select_erase(struct spi_nor *nor,
2355 const struct flash_info *info)
2356 {
2357 struct mtd_info *mtd = &nor->mtd;
2358
2359 /* Do nothing if already configured from SFDP. */
2360 if (mtd->erasesize)
2361 return 0;
2362
2363 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
2364 /* prefer "small sector" erase if possible */
2365 if (info->flags & SECT_4K) {
2366 nor->erase_opcode = SPINOR_OP_BE_4K;
2367 mtd->erasesize = 4096;
2368 } else if (info->flags & SECT_4K_PMC) {
2369 nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
2370 mtd->erasesize = 4096;
2371 } else
2372 #endif
2373 {
2374 nor->erase_opcode = SPINOR_OP_SE;
2375 mtd->erasesize = info->sector_size;
2376 }
2377 return 0;
2378 }
2379
spi_nor_setup(struct spi_nor * nor,const struct flash_info * info,const struct spi_nor_flash_parameter * params,const struct spi_nor_hwcaps * hwcaps)2380 static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
2381 const struct spi_nor_flash_parameter *params,
2382 const struct spi_nor_hwcaps *hwcaps)
2383 {
2384 u32 ignored_mask, shared_mask;
2385 bool enable_quad_io;
2386 int err;
2387
2388 /*
2389 * Keep only the hardware capabilities supported by both the SPI
2390 * controller and the SPI flash memory.
2391 */
2392 shared_mask = hwcaps->mask & params->hwcaps.mask;
2393
2394 /* SPI n-n-n protocols are not supported yet. */
2395 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
2396 SNOR_HWCAPS_READ_4_4_4 |
2397 SNOR_HWCAPS_READ_8_8_8 |
2398 SNOR_HWCAPS_PP_4_4_4 |
2399 SNOR_HWCAPS_PP_8_8_8);
2400 if (shared_mask & ignored_mask) {
2401 dev_dbg(nor->dev,
2402 "SPI n-n-n protocols are not supported yet.\n");
2403 shared_mask &= ~ignored_mask;
2404 }
2405
2406 /* Select the (Fast) Read command. */
2407 err = spi_nor_select_read(nor, params, shared_mask);
2408 if (err) {
2409 dev_dbg(nor->dev,
2410 "can't select read settings supported by both the SPI controller and memory.\n");
2411 return err;
2412 }
2413
2414 /* Select the Page Program command. */
2415 err = spi_nor_select_pp(nor, params, shared_mask);
2416 if (err) {
2417 dev_dbg(nor->dev,
2418 "can't select write settings supported by both the SPI controller and memory.\n");
2419 return err;
2420 }
2421
2422 /* Select the Sector Erase command. */
2423 err = spi_nor_select_erase(nor, info);
2424 if (err) {
2425 dev_dbg(nor->dev,
2426 "can't select erase settings supported by both the SPI controller and memory.\n");
2427 return err;
2428 }
2429
2430 /* Enable Quad I/O if needed. */
2431 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2432 spi_nor_get_protocol_width(nor->write_proto) == 4);
2433 if (enable_quad_io && params->quad_enable)
2434 nor->quad_enable = params->quad_enable;
2435 else
2436 nor->quad_enable = NULL;
2437
2438 return 0;
2439 }
2440
spi_nor_init(struct spi_nor * nor)2441 static int spi_nor_init(struct spi_nor *nor)
2442 {
2443 int err;
2444
2445 /*
2446 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
2447 * with the software protection bits set
2448 */
2449 if (IS_ENABLED(CONFIG_SPI_FLASH_UNLOCK_ALL) &&
2450 (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
2451 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
2452 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
2453 nor->info->flags & SPI_NOR_HAS_LOCK)) {
2454 write_enable(nor);
2455 write_sr(nor, 0);
2456 spi_nor_wait_till_ready(nor);
2457 }
2458
2459 if (nor->quad_enable) {
2460 err = nor->quad_enable(nor);
2461 if (err) {
2462 dev_dbg(nor->dev, "quad mode not supported\n");
2463 return err;
2464 }
2465 }
2466
2467 if (nor->addr_width == 4 &&
2468 (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
2469 !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
2470 /*
2471 * If the RESET# pin isn't hooked up properly, or the system
2472 * otherwise doesn't perform a reset command in the boot
2473 * sequence, it's impossible to 100% protect against unexpected
2474 * reboots (e.g., crashes). Warn the user (or hopefully, system
2475 * designer) that this is bad.
2476 */
2477 if (nor->flags & SNOR_F_BROKEN_RESET)
2478 debug("enabling reset hack; may not recover from unexpected reboots\n");
2479 set_4byte(nor, nor->info, 1);
2480 }
2481
2482 return 0;
2483 }
2484
spi_nor_scan(struct spi_nor * nor)2485 int spi_nor_scan(struct spi_nor *nor)
2486 {
2487 struct spi_nor_flash_parameter params;
2488 const struct flash_info *info = NULL;
2489 struct mtd_info *mtd = &nor->mtd;
2490 struct spi_nor_hwcaps hwcaps = {
2491 .mask = SNOR_HWCAPS_READ |
2492 SNOR_HWCAPS_READ_FAST |
2493 SNOR_HWCAPS_PP,
2494 };
2495 struct spi_slave *spi = nor->spi;
2496 int ret;
2497
2498 /* Reset SPI protocol for all commands. */
2499 nor->reg_proto = SNOR_PROTO_1_1_1;
2500 nor->read_proto = SNOR_PROTO_1_1_1;
2501 nor->write_proto = SNOR_PROTO_1_1_1;
2502 nor->read = spi_nor_read_data;
2503 nor->write = spi_nor_write_data;
2504 nor->read_reg = spi_nor_read_reg;
2505 nor->write_reg = spi_nor_write_reg;
2506
2507 if (spi->mode & SPI_RX_OCTAL) {
2508 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2509
2510 if (spi->mode & SPI_TX_OCTAL)
2511 hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 |
2512 SNOR_HWCAPS_PP_1_1_8 |
2513 SNOR_HWCAPS_PP_1_8_8);
2514 } else if (spi->mode & SPI_RX_QUAD) {
2515 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2516
2517 if (spi->mode & SPI_TX_QUAD)
2518 hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
2519 SNOR_HWCAPS_PP_1_1_4 |
2520 SNOR_HWCAPS_PP_1_4_4);
2521 } else if (spi->mode & SPI_RX_DUAL) {
2522 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2523
2524 if (spi->mode & SPI_TX_DUAL)
2525 hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
2526 }
2527
2528 info = spi_nor_read_id(nor);
2529 if (IS_ERR_OR_NULL(info))
2530 return -ENOENT;
2531 /* Parse the Serial Flash Discoverable Parameters table. */
2532 ret = spi_nor_init_params(nor, info, ¶ms);
2533 if (ret)
2534 return ret;
2535
2536 if (!mtd->name)
2537 mtd->name = info->name;
2538 mtd->priv = nor;
2539 mtd->type = MTD_NORFLASH;
2540 mtd->writesize = 1;
2541 mtd->flags = MTD_CAP_NORFLASH;
2542 mtd->size = params.size;
2543 mtd->_erase = spi_nor_erase;
2544 mtd->_read = spi_nor_read;
2545 mtd->_write = spi_nor_write;
2546
2547 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
2548 /* NOR protection support for STmicro/Micron chips and similar */
2549 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
2550 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
2551 JEDEC_MFR(info) == SNOR_MFR_SST ||
2552 info->flags & SPI_NOR_HAS_LOCK) {
2553 nor->flash_lock = stm_lock;
2554 nor->flash_unlock = stm_unlock;
2555 nor->flash_is_locked = stm_is_locked;
2556 }
2557 #endif
2558
2559 #ifdef CONFIG_SPI_FLASH_SST
2560 /*
2561 * sst26 series block protection implementation differs from other
2562 * series.
2563 */
2564 if (info->flags & SPI_NOR_HAS_SST26LOCK) {
2565 nor->flash_lock = sst26_lock;
2566 nor->flash_unlock = sst26_unlock;
2567 nor->flash_is_locked = sst26_is_locked;
2568 }
2569 #endif
2570
2571 if (info->flags & USE_FSR)
2572 nor->flags |= SNOR_F_USE_FSR;
2573 if (info->flags & SPI_NOR_HAS_TB)
2574 nor->flags |= SNOR_F_HAS_SR_TB;
2575 if (info->flags & NO_CHIP_ERASE)
2576 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2577 if (info->flags & USE_CLSR)
2578 nor->flags |= SNOR_F_USE_CLSR;
2579
2580 if (info->flags & SPI_NOR_NO_ERASE)
2581 mtd->flags |= MTD_NO_ERASE;
2582
2583 nor->page_size = params.page_size;
2584 mtd->writebufsize = nor->page_size;
2585
2586 /* Some devices cannot do fast-read, no matter what DT tells us */
2587 if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW))
2588 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2589
2590 /*
2591 * Configure the SPI memory:
2592 * - select op codes for (Fast) Read, Page Program and Sector Erase.
2593 * - set the number of dummy cycles (mode cycles + wait states).
2594 * - set the SPI protocols for register and memory accesses.
2595 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
2596 */
2597 ret = spi_nor_setup(nor, info, ¶ms, &hwcaps);
2598 if (ret)
2599 return ret;
2600
2601 if (nor->addr_width) {
2602 /* already configured from SFDP */
2603 } else if (info->addr_width) {
2604 nor->addr_width = info->addr_width;
2605 } else if (mtd->size > SZ_16M) {
2606 #ifndef CONFIG_SPI_FLASH_BAR
2607 /* enable 4-byte addressing if the device exceeds 16MiB */
2608 nor->addr_width = 4;
2609 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
2610 info->flags & SPI_NOR_4B_OPCODES)
2611 spi_nor_set_4byte_opcodes(nor, info);
2612 #else
2613 /* Configure the BAR - discover bank cmds and read current bank */
2614 nor->addr_width = 3;
2615 ret = read_bar(nor, info);
2616 if (ret < 0)
2617 return ret;
2618 #endif
2619 } else {
2620 nor->addr_width = 3;
2621 }
2622
2623 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
2624 dev_dbg(nor->dev, "address width is too large: %u\n",
2625 nor->addr_width);
2626 return -EINVAL;
2627 }
2628
2629 /* Send all the required SPI flash commands to initialize device */
2630 nor->info = info;
2631 ret = spi_nor_init(nor);
2632 if (ret)
2633 return ret;
2634
2635 nor->name = mtd->name;
2636 nor->size = mtd->size;
2637 nor->erase_size = mtd->erasesize;
2638 nor->sector_size = mtd->erasesize;
2639
2640 #ifndef CONFIG_SPL_BUILD
2641 printf("SF: Detected %s with page size ", nor->name);
2642 print_size(nor->page_size, ", erase size ");
2643 print_size(nor->erase_size, ", total ");
2644 print_size(nor->size, "");
2645 puts("\n");
2646 #endif
2647
2648 return 0;
2649 }
2650