1 /*
2  * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stddef.h>
10 
11 #include <common/debug.h>
12 #include <drivers/delay_timer.h>
13 #include <drivers/raw_nand.h>
14 #include <lib/utils.h>
15 
16 #include <platform_def.h>
17 
18 #define ONFI_SIGNATURE_ADDR	0x20U
19 
20 /* CRC calculation */
21 #define CRC_POLYNOM		0x8005U
22 #define CRC_INIT_VALUE		0x4F4EU
23 
24 /* Status register */
25 #define NAND_STATUS_READY	BIT(6)
26 
27 static struct rawnand_device rawnand_dev;
28 
29 #pragma weak plat_get_raw_nand_data
plat_get_raw_nand_data(struct rawnand_device * device)30 int plat_get_raw_nand_data(struct rawnand_device *device)
31 {
32 	return 0;
33 }
34 
nand_send_cmd(uint8_t cmd,unsigned int tim)35 static int nand_send_cmd(uint8_t cmd, unsigned int tim)
36 {
37 	struct nand_req req;
38 
39 	zeromem(&req, sizeof(struct nand_req));
40 	req.nand = rawnand_dev.nand_dev;
41 	req.type = NAND_REQ_CMD | cmd;
42 	req.inst_delay = tim;
43 
44 	return rawnand_dev.ops->exec(&req);
45 }
46 
nand_send_addr(uint8_t addr,unsigned int tim)47 static int nand_send_addr(uint8_t addr, unsigned int tim)
48 {
49 	struct nand_req req;
50 
51 	zeromem(&req, sizeof(struct nand_req));
52 	req.nand = rawnand_dev.nand_dev;
53 	req.type = NAND_REQ_ADDR;
54 	req.addr = &addr;
55 	req.inst_delay = tim;
56 
57 	return rawnand_dev.ops->exec(&req);
58 }
59 
nand_send_wait(unsigned int delay,unsigned int tim)60 static int nand_send_wait(unsigned int delay, unsigned int tim)
61 {
62 	struct nand_req req;
63 
64 	zeromem(&req, sizeof(struct nand_req));
65 	req.nand = rawnand_dev.nand_dev;
66 	req.type = NAND_REQ_WAIT;
67 	req.inst_delay = tim;
68 	req.delay_ms = delay;
69 
70 	return rawnand_dev.ops->exec(&req);
71 }
72 
73 
nand_read_data(uint8_t * data,unsigned int length,bool use_8bit)74 static int nand_read_data(uint8_t *data, unsigned int length, bool use_8bit)
75 {
76 	struct nand_req req;
77 
78 	zeromem(&req, sizeof(struct nand_req));
79 	req.nand = rawnand_dev.nand_dev;
80 	req.type = NAND_REQ_DATAIN | (use_8bit ? NAND_REQ_BUS_WIDTH_8 : 0U);
81 	req.addr = data;
82 	req.length = length;
83 
84 	return rawnand_dev.ops->exec(&req);
85 }
86 
nand_change_read_column_cmd(unsigned int offset,uintptr_t buffer,unsigned int len)87 int nand_change_read_column_cmd(unsigned int offset, uintptr_t buffer,
88 				unsigned int len)
89 {
90 	int ret;
91 	uint8_t addr[2];
92 	unsigned int i;
93 
94 	ret = nand_send_cmd(NAND_CMD_CHANGE_1ST, 0U);
95 	if (ret !=  0) {
96 		return ret;
97 	}
98 
99 	if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
100 		offset /= 2U;
101 	}
102 
103 	addr[0] = offset;
104 	addr[1] = offset >> 8;
105 
106 	for (i = 0; i < 2U; i++) {
107 		ret = nand_send_addr(addr[i], 0U);
108 		if (ret !=  0) {
109 			return ret;
110 		}
111 	}
112 
113 	ret = nand_send_cmd(NAND_CMD_CHANGE_2ND, NAND_TCCS_MIN);
114 	if (ret !=  0) {
115 		return ret;
116 	}
117 
118 	return nand_read_data((uint8_t *)buffer, len, false);
119 }
120 
nand_read_page_cmd(unsigned int page,unsigned int offset,uintptr_t buffer,unsigned int len)121 int nand_read_page_cmd(unsigned int page, unsigned int offset,
122 		       uintptr_t buffer, unsigned int len)
123 {
124 	uint8_t addr[5];
125 	uint8_t i = 0U;
126 	uint8_t j;
127 	int ret;
128 
129 	VERBOSE(">%s page %u offset %u buffer 0x%lx\n", __func__, page, offset,
130 		buffer);
131 
132 	if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
133 		offset /= 2U;
134 	}
135 
136 	addr[i++] = offset;
137 	addr[i++] = offset >> 8;
138 
139 	addr[i++] = page;
140 	addr[i++] = page >> 8;
141 	if (rawnand_dev.nand_dev->size > SZ_128M) {
142 		addr[i++] = page >> 16;
143 	}
144 
145 	ret = nand_send_cmd(NAND_CMD_READ_1ST, 0U);
146 	if (ret != 0) {
147 		return ret;
148 	}
149 
150 	for (j = 0U; j < i; j++) {
151 		ret = nand_send_addr(addr[j], 0U);
152 		if (ret != 0) {
153 			return ret;
154 		}
155 	}
156 
157 	ret = nand_send_cmd(NAND_CMD_READ_2ND, NAND_TWB_MAX);
158 	if (ret != 0) {
159 		return ret;
160 	}
161 
162 	ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
163 	if (ret != 0) {
164 		return ret;
165 	}
166 
167 	if (buffer != 0U) {
168 		ret = nand_read_data((uint8_t *)buffer, len, false);
169 	}
170 
171 	return ret;
172 }
173 
nand_status(uint8_t * status)174 static int nand_status(uint8_t *status)
175 {
176 	int ret;
177 
178 	ret = nand_send_cmd(NAND_CMD_STATUS, NAND_TWHR_MIN);
179 	if (ret != 0) {
180 		return ret;
181 	}
182 
183 	if (status != NULL) {
184 		ret = nand_read_data(status, 1U, true);
185 	}
186 
187 	return ret;
188 }
189 
nand_wait_ready(unsigned int delay_ms)190 int nand_wait_ready(unsigned int delay_ms)
191 {
192 	uint8_t status;
193 	int ret;
194 	uint64_t timeout;
195 
196 	/* Wait before reading status */
197 	udelay(1);
198 
199 	ret = nand_status(NULL);
200 	if (ret != 0) {
201 		return ret;
202 	}
203 
204 	timeout = timeout_init_us(delay_ms * 1000U);
205 	while (!timeout_elapsed(timeout)) {
206 		ret = nand_read_data(&status, 1U, true);
207 		if (ret != 0) {
208 			return ret;
209 		}
210 
211 		if ((status & NAND_STATUS_READY) != 0U) {
212 			return nand_send_cmd(NAND_CMD_READ_1ST, 0U);
213 		}
214 
215 		udelay(10);
216 	}
217 
218 	return -ETIMEDOUT;
219 }
220 
221 #if NAND_ONFI_DETECT
nand_check_crc(uint16_t crc,uint8_t * data_in,unsigned int data_len)222 static uint16_t nand_check_crc(uint16_t crc, uint8_t *data_in,
223 			       unsigned int data_len)
224 {
225 	uint32_t i;
226 	uint32_t j;
227 	uint32_t bit;
228 
229 	for (i = 0U; i < data_len; i++) {
230 		uint8_t cur_param = *data_in++;
231 
232 		for (j = BIT(7); j != 0U; j >>= 1) {
233 			bit = crc & BIT(15);
234 			crc <<= 1;
235 
236 			if ((cur_param & j) != 0U) {
237 				bit ^= BIT(15);
238 			}
239 
240 			if (bit != 0U) {
241 				crc ^= CRC_POLYNOM;
242 			}
243 		}
244 
245 		crc &= GENMASK(15, 0);
246 	}
247 
248 	return crc;
249 }
250 
nand_read_id(uint8_t addr,uint8_t * id,unsigned int size)251 static int nand_read_id(uint8_t addr, uint8_t *id, unsigned int size)
252 {
253 	int ret;
254 
255 	ret = nand_send_cmd(NAND_CMD_READID, 0U);
256 	if (ret !=  0) {
257 		return ret;
258 	}
259 
260 	ret = nand_send_addr(addr, NAND_TWHR_MIN);
261 	if (ret !=  0) {
262 		return ret;
263 	}
264 
265 	return nand_read_data(id, size, true);
266 }
267 
nand_reset(void)268 static int nand_reset(void)
269 {
270 	int ret;
271 
272 	ret = nand_send_cmd(NAND_CMD_RESET, NAND_TWB_MAX);
273 	if (ret != 0) {
274 		return ret;
275 	}
276 
277 	return nand_send_wait(PSEC_TO_MSEC(NAND_TRST_MAX), 0U);
278 }
279 
nand_read_param_page(void)280 static int nand_read_param_page(void)
281 {
282 	struct nand_param_page page;
283 	uint8_t addr = 0U;
284 	int ret;
285 
286 	ret = nand_send_cmd(NAND_CMD_READ_PARAM_PAGE, 0U);
287 	if (ret != 0) {
288 		return ret;
289 	}
290 
291 	ret = nand_send_addr(addr, NAND_TWB_MAX);
292 	if (ret != 0) {
293 		return ret;
294 	}
295 
296 	ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
297 	if (ret != 0) {
298 		return ret;
299 	}
300 
301 	ret = nand_read_data((uint8_t *)&page, sizeof(page), true);
302 	if (ret != 0) {
303 		return ret;
304 	}
305 
306 	if (strncmp((char *)&page.page_sig, "ONFI", 4) != 0) {
307 		WARN("Error ONFI detection\n");
308 		return -EINVAL;
309 	}
310 
311 	if (nand_check_crc(CRC_INIT_VALUE, (uint8_t *)&page, 254U) !=
312 	    page.crc16) {
313 		WARN("Error reading param\n");
314 		return -EINVAL;
315 	}
316 
317 	if ((page.features & ONFI_FEAT_BUS_WIDTH_16) != 0U) {
318 		rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_16;
319 	} else {
320 		rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_8;
321 	}
322 
323 	rawnand_dev.nand_dev->block_size = page.num_pages_per_blk *
324 					   page.bytes_per_page;
325 	rawnand_dev.nand_dev->page_size = page.bytes_per_page;
326 	rawnand_dev.nand_dev->size = page.num_pages_per_blk *
327 				     page.bytes_per_page *
328 				     page.num_blk_in_lun * page.num_lun;
329 
330 	if (page.nb_ecc_bits != GENMASK_32(7, 0)) {
331 		rawnand_dev.nand_dev->ecc.max_bit_corr = page.nb_ecc_bits;
332 		rawnand_dev.nand_dev->ecc.size = SZ_512;
333 	}
334 
335 	VERBOSE("Page size %u, block_size %u, Size %llu, ecc %u, buswidth %u\n",
336 		rawnand_dev.nand_dev->page_size,
337 		rawnand_dev.nand_dev->block_size, rawnand_dev.nand_dev->size,
338 		rawnand_dev.nand_dev->ecc.max_bit_corr,
339 		rawnand_dev.nand_dev->buswidth);
340 
341 	return 0;
342 }
343 
detect_onfi(void)344 static int detect_onfi(void)
345 {
346 	int ret;
347 	char id[4];
348 
349 	ret = nand_reset();
350 	if (ret != 0) {
351 		return ret;
352 	}
353 
354 	ret = nand_read_id(ONFI_SIGNATURE_ADDR, (uint8_t *)id, sizeof(id));
355 	if (ret != 0) {
356 		return ret;
357 	}
358 
359 	if (strncmp(id, "ONFI", sizeof(id)) != 0) {
360 		WARN("NAND Non ONFI detected\n");
361 		return -ENODEV;
362 	}
363 
364 	return nand_read_param_page();
365 }
366 #endif
367 
nand_mtd_block_is_bad(unsigned int block)368 static int nand_mtd_block_is_bad(unsigned int block)
369 {
370 	unsigned int nbpages_per_block = rawnand_dev.nand_dev->block_size /
371 					 rawnand_dev.nand_dev->page_size;
372 	uint8_t bbm_marker[2];
373 	uint8_t page;
374 	int ret;
375 
376 	for (page = 0U; page < 2U; page++) {
377 		ret = nand_read_page_cmd(block * nbpages_per_block,
378 					 rawnand_dev.nand_dev->page_size,
379 					 (uintptr_t)bbm_marker,
380 					 sizeof(bbm_marker));
381 		if (ret != 0) {
382 			return ret;
383 		}
384 
385 		if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
386 		    (bbm_marker[1] != GENMASK_32(7, 0))) {
387 			WARN("Block %u is bad\n", block);
388 			return 1;
389 		}
390 	}
391 
392 	return 0;
393 }
394 
nand_mtd_read_page_raw(struct nand_device * nand,unsigned int page,uintptr_t buffer)395 static int nand_mtd_read_page_raw(struct nand_device *nand, unsigned int page,
396 				  uintptr_t buffer)
397 {
398 	return nand_read_page_cmd(page, 0U, buffer,
399 				  rawnand_dev.nand_dev->page_size);
400 }
401 
nand_raw_ctrl_init(const struct nand_ctrl_ops * ops)402 void nand_raw_ctrl_init(const struct nand_ctrl_ops *ops)
403 {
404 	rawnand_dev.ops = ops;
405 }
406 
nand_raw_init(unsigned long long * size,unsigned int * erase_size)407 int nand_raw_init(unsigned long long *size, unsigned int *erase_size)
408 {
409 	rawnand_dev.nand_dev = get_nand_device();
410 	if (rawnand_dev.nand_dev == NULL) {
411 		return -EINVAL;
412 	}
413 
414 	rawnand_dev.nand_dev->mtd_block_is_bad = nand_mtd_block_is_bad;
415 	rawnand_dev.nand_dev->mtd_read_page = nand_mtd_read_page_raw;
416 	rawnand_dev.nand_dev->ecc.mode = NAND_ECC_NONE;
417 
418 	if ((rawnand_dev.ops->setup == NULL) ||
419 	    (rawnand_dev.ops->exec == NULL)) {
420 		return -ENODEV;
421 	}
422 
423 #if NAND_ONFI_DETECT
424 	if (detect_onfi() != 0) {
425 		WARN("Detect ONFI failed\n");
426 	}
427 #endif
428 
429 	if (plat_get_raw_nand_data(&rawnand_dev) != 0) {
430 		return -EINVAL;
431 	}
432 
433 	assert((rawnand_dev.nand_dev->page_size != 0U) &&
434 	       (rawnand_dev.nand_dev->block_size != 0U) &&
435 	       (rawnand_dev.nand_dev->size != 0U));
436 
437 	*size = rawnand_dev.nand_dev->size;
438 	*erase_size = rawnand_dev.nand_dev->block_size;
439 
440 	rawnand_dev.ops->setup(rawnand_dev.nand_dev);
441 
442 	return 0;
443 }
444