1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * MTD device concatenation layer
4  *
5  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * NAND support by Christian Gan <cgan@iders.ca>
9  *
10  */
11 
12 #ifndef __UBOOT__
13 #include <log.h>
14 #include <dm/devres.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/backing-dev.h>
21 #include <asm/div64.h>
22 #else
23 #include <div64.h>
24 #include <linux/bug.h>
25 #include <linux/compat.h>
26 #endif
27 
28 #include <linux/mtd/mtd.h>
29 #include <linux/mtd/concat.h>
30 
31 #include <ubi_uboot.h>
32 
33 /*
34  * Our storage structure:
35  * Subdev points to an array of pointers to struct mtd_info objects
36  * which is allocated along with this structure
37  *
38  */
39 struct mtd_concat {
40 	struct mtd_info mtd;
41 	int num_subdev;
42 	struct mtd_info **subdev;
43 };
44 
45 /*
46  * how to calculate the size required for the above structure,
47  * including the pointer array subdev points to:
48  */
49 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\
50 	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
51 
52 /*
53  * Given a pointer to the MTD object in the mtd_concat structure,
54  * we can retrieve the pointer to that structure with this macro.
55  */
56 #define CONCAT(x)  ((struct mtd_concat *)(x))
57 
58 /*
59  * MTD methods which look up the relevant subdevice, translate the
60  * effective address and pass through to the subdevice.
61  */
62 
63 static int
concat_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)64 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
65 	    size_t * retlen, u_char * buf)
66 {
67 	struct mtd_concat *concat = CONCAT(mtd);
68 	int ret = 0, err;
69 	int i;
70 
71 #ifdef __UBOOT__
72 	*retlen = 0;
73 #endif
74 
75 	for (i = 0; i < concat->num_subdev; i++) {
76 		struct mtd_info *subdev = concat->subdev[i];
77 		size_t size, retsize;
78 
79 		if (from >= subdev->size) {
80 			/* Not destined for this subdev */
81 			size = 0;
82 			from -= subdev->size;
83 			continue;
84 		}
85 		if (from + len > subdev->size)
86 			/* First part goes into this subdev */
87 			size = subdev->size - from;
88 		else
89 			/* Entire transaction goes into this subdev */
90 			size = len;
91 
92 		err = mtd_read(subdev, from, size, &retsize, buf);
93 
94 		/* Save information about bitflips! */
95 		if (unlikely(err)) {
96 			if (mtd_is_eccerr(err)) {
97 				mtd->ecc_stats.failed++;
98 				ret = err;
99 			} else if (mtd_is_bitflip(err)) {
100 				mtd->ecc_stats.corrected++;
101 				/* Do not overwrite -EBADMSG !! */
102 				if (!ret)
103 					ret = err;
104 			} else
105 				return err;
106 		}
107 
108 		*retlen += retsize;
109 		len -= size;
110 		if (len == 0)
111 			return ret;
112 
113 		buf += size;
114 		from = 0;
115 	}
116 	return -EINVAL;
117 }
118 
119 static int
concat_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)120 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
121 	     size_t * retlen, const u_char * buf)
122 {
123 	struct mtd_concat *concat = CONCAT(mtd);
124 	int err = -EINVAL;
125 	int i;
126 
127 #ifdef __UBOOT__
128 	*retlen = 0;
129 #endif
130 
131 	for (i = 0; i < concat->num_subdev; i++) {
132 		struct mtd_info *subdev = concat->subdev[i];
133 		size_t size, retsize;
134 
135 		if (to >= subdev->size) {
136 			size = 0;
137 			to -= subdev->size;
138 			continue;
139 		}
140 		if (to + len > subdev->size)
141 			size = subdev->size - to;
142 		else
143 			size = len;
144 
145 		err = mtd_write(subdev, to, size, &retsize, buf);
146 		if (err)
147 			break;
148 
149 		*retlen += retsize;
150 		len -= size;
151 		if (len == 0)
152 			break;
153 
154 		err = -EINVAL;
155 		buf += size;
156 		to = 0;
157 	}
158 	return err;
159 }
160 
161 #ifndef __UBOOT__
162 static int
concat_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)163 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
164 		unsigned long count, loff_t to, size_t * retlen)
165 {
166 	struct mtd_concat *concat = CONCAT(mtd);
167 	struct kvec *vecs_copy;
168 	unsigned long entry_low, entry_high;
169 	size_t total_len = 0;
170 	int i;
171 	int err = -EINVAL;
172 
173 	/* Calculate total length of data */
174 	for (i = 0; i < count; i++)
175 		total_len += vecs[i].iov_len;
176 
177 	/* Check alignment */
178 	if (mtd->writesize > 1) {
179 		uint64_t __to = to;
180 		if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
181 			return -EINVAL;
182 	}
183 
184 	/* make a copy of vecs */
185 	vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
186 	if (!vecs_copy)
187 		return -ENOMEM;
188 
189 	entry_low = 0;
190 	for (i = 0; i < concat->num_subdev; i++) {
191 		struct mtd_info *subdev = concat->subdev[i];
192 		size_t size, wsize, retsize, old_iov_len;
193 
194 		if (to >= subdev->size) {
195 			to -= subdev->size;
196 			continue;
197 		}
198 
199 		size = min_t(uint64_t, total_len, subdev->size - to);
200 		wsize = size; /* store for future use */
201 
202 		entry_high = entry_low;
203 		while (entry_high < count) {
204 			if (size <= vecs_copy[entry_high].iov_len)
205 				break;
206 			size -= vecs_copy[entry_high++].iov_len;
207 		}
208 
209 		old_iov_len = vecs_copy[entry_high].iov_len;
210 		vecs_copy[entry_high].iov_len = size;
211 
212 		err = mtd_writev(subdev, &vecs_copy[entry_low],
213 				 entry_high - entry_low + 1, to, &retsize);
214 
215 		vecs_copy[entry_high].iov_len = old_iov_len - size;
216 		vecs_copy[entry_high].iov_base += size;
217 
218 		entry_low = entry_high;
219 
220 		if (err)
221 			break;
222 
223 		*retlen += retsize;
224 		total_len -= wsize;
225 
226 		if (total_len == 0)
227 			break;
228 
229 		err = -EINVAL;
230 		to = 0;
231 	}
232 
233 	kfree(vecs_copy);
234 	return err;
235 }
236 #endif
237 
238 static int
concat_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)239 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
240 {
241 	struct mtd_concat *concat = CONCAT(mtd);
242 	struct mtd_oob_ops devops = *ops;
243 	int i, err, ret = 0;
244 
245 	ops->retlen = ops->oobretlen = 0;
246 
247 	for (i = 0; i < concat->num_subdev; i++) {
248 		struct mtd_info *subdev = concat->subdev[i];
249 
250 		if (from >= subdev->size) {
251 			from -= subdev->size;
252 			continue;
253 		}
254 
255 		/* partial read ? */
256 		if (from + devops.len > subdev->size)
257 			devops.len = subdev->size - from;
258 
259 		err = mtd_read_oob(subdev, from, &devops);
260 		ops->retlen += devops.retlen;
261 		ops->oobretlen += devops.oobretlen;
262 
263 		/* Save information about bitflips! */
264 		if (unlikely(err)) {
265 			if (mtd_is_eccerr(err)) {
266 				mtd->ecc_stats.failed++;
267 				ret = err;
268 			} else if (mtd_is_bitflip(err)) {
269 				mtd->ecc_stats.corrected++;
270 				/* Do not overwrite -EBADMSG !! */
271 				if (!ret)
272 					ret = err;
273 			} else
274 				return err;
275 		}
276 
277 		if (devops.datbuf) {
278 			devops.len = ops->len - ops->retlen;
279 			if (!devops.len)
280 				return ret;
281 			devops.datbuf += devops.retlen;
282 		}
283 		if (devops.oobbuf) {
284 			devops.ooblen = ops->ooblen - ops->oobretlen;
285 			if (!devops.ooblen)
286 				return ret;
287 			devops.oobbuf += ops->oobretlen;
288 		}
289 
290 		from = 0;
291 	}
292 	return -EINVAL;
293 }
294 
295 static int
concat_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)296 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
297 {
298 	struct mtd_concat *concat = CONCAT(mtd);
299 	struct mtd_oob_ops devops = *ops;
300 	int i, err;
301 
302 	if (!(mtd->flags & MTD_WRITEABLE))
303 		return -EROFS;
304 
305 	ops->retlen = ops->oobretlen = 0;
306 
307 	for (i = 0; i < concat->num_subdev; i++) {
308 		struct mtd_info *subdev = concat->subdev[i];
309 
310 		if (to >= subdev->size) {
311 			to -= subdev->size;
312 			continue;
313 		}
314 
315 		/* partial write ? */
316 		if (to + devops.len > subdev->size)
317 			devops.len = subdev->size - to;
318 
319 		err = mtd_write_oob(subdev, to, &devops);
320 		ops->retlen += devops.oobretlen;
321 		if (err)
322 			return err;
323 
324 		if (devops.datbuf) {
325 			devops.len = ops->len - ops->retlen;
326 			if (!devops.len)
327 				return 0;
328 			devops.datbuf += devops.retlen;
329 		}
330 		if (devops.oobbuf) {
331 			devops.ooblen = ops->ooblen - ops->oobretlen;
332 			if (!devops.ooblen)
333 				return 0;
334 			devops.oobbuf += devops.oobretlen;
335 		}
336 		to = 0;
337 	}
338 	return -EINVAL;
339 }
340 
concat_dev_erase(struct mtd_info * mtd,struct erase_info * erase)341 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
342 {
343 	int err;
344 	wait_queue_head_t waitq;
345 	DECLARE_WAITQUEUE(wait, current);
346 
347 	/*
348 	 * This code was stol^H^H^H^Hinspired by mtdchar.c
349 	 */
350 	init_waitqueue_head(&waitq);
351 
352 	erase->mtd = mtd;
353 	erase->priv = (unsigned long) &waitq;
354 
355 	/*
356 	 * FIXME: Allow INTERRUPTIBLE. Which means
357 	 * not having the wait_queue head on the stack.
358 	 */
359 	err = mtd_erase(mtd, erase);
360 	if (!err) {
361 		set_current_state(TASK_UNINTERRUPTIBLE);
362 		add_wait_queue(&waitq, &wait);
363 		if (erase->state != MTD_ERASE_DONE
364 		    && erase->state != MTD_ERASE_FAILED)
365 			schedule();
366 		remove_wait_queue(&waitq, &wait);
367 		set_current_state(TASK_RUNNING);
368 
369 		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
370 	}
371 	return err;
372 }
373 
concat_erase(struct mtd_info * mtd,struct erase_info * instr)374 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
375 {
376 	struct mtd_concat *concat = CONCAT(mtd);
377 	struct mtd_info *subdev;
378 	int i, err;
379 	uint64_t length, offset = 0;
380 	struct erase_info *erase;
381 
382 	/*
383 	 * Check for proper erase block alignment of the to-be-erased area.
384 	 * It is easier to do this based on the super device's erase
385 	 * region info rather than looking at each particular sub-device
386 	 * in turn.
387 	 */
388 	if (!concat->mtd.numeraseregions) {
389 		/* the easy case: device has uniform erase block size */
390 		if (instr->addr & (concat->mtd.erasesize - 1))
391 			return -EINVAL;
392 		if (instr->len & (concat->mtd.erasesize - 1))
393 			return -EINVAL;
394 	} else {
395 		/* device has variable erase size */
396 		struct mtd_erase_region_info *erase_regions =
397 		    concat->mtd.eraseregions;
398 
399 		/*
400 		 * Find the erase region where the to-be-erased area begins:
401 		 */
402 		for (i = 0; i < concat->mtd.numeraseregions &&
403 		     instr->addr >= erase_regions[i].offset; i++) ;
404 		--i;
405 
406 		/*
407 		 * Now erase_regions[i] is the region in which the
408 		 * to-be-erased area begins. Verify that the starting
409 		 * offset is aligned to this region's erase size:
410 		 */
411 		if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
412 			return -EINVAL;
413 
414 		/*
415 		 * now find the erase region where the to-be-erased area ends:
416 		 */
417 		for (; i < concat->mtd.numeraseregions &&
418 		     (instr->addr + instr->len) >= erase_regions[i].offset;
419 		     ++i) ;
420 		--i;
421 		/*
422 		 * check if the ending offset is aligned to this region's erase size
423 		 */
424 		if (i < 0 || ((instr->addr + instr->len) &
425 					(erase_regions[i].erasesize - 1)))
426 			return -EINVAL;
427 	}
428 
429 	/* make a local copy of instr to avoid modifying the caller's struct */
430 	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
431 
432 	if (!erase)
433 		return -ENOMEM;
434 
435 	*erase = *instr;
436 	length = instr->len;
437 
438 	/*
439 	 * find the subdevice where the to-be-erased area begins, adjust
440 	 * starting offset to be relative to the subdevice start
441 	 */
442 	for (i = 0; i < concat->num_subdev; i++) {
443 		subdev = concat->subdev[i];
444 		if (subdev->size <= erase->addr) {
445 			erase->addr -= subdev->size;
446 			offset += subdev->size;
447 		} else {
448 			break;
449 		}
450 	}
451 
452 	/* must never happen since size limit has been verified above */
453 	BUG_ON(i >= concat->num_subdev);
454 
455 	/* now do the erase: */
456 	err = 0;
457 	for (; length > 0; i++) {
458 		/* loop for all subdevices affected by this request */
459 		subdev = concat->subdev[i];	/* get current subdevice */
460 
461 		/* limit length to subdevice's size: */
462 		if (erase->addr + length > subdev->size)
463 			erase->len = subdev->size - erase->addr;
464 		else
465 			erase->len = length;
466 
467 		length -= erase->len;
468 		if ((err = concat_dev_erase(subdev, erase))) {
469 			/* sanity check: should never happen since
470 			 * block alignment has been checked above */
471 			BUG_ON(err == -EINVAL);
472 			if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
473 				instr->fail_addr = erase->fail_addr + offset;
474 			break;
475 		}
476 		/*
477 		 * erase->addr specifies the offset of the area to be
478 		 * erased *within the current subdevice*. It can be
479 		 * non-zero only the first time through this loop, i.e.
480 		 * for the first subdevice where blocks need to be erased.
481 		 * All the following erases must begin at the start of the
482 		 * current subdevice, i.e. at offset zero.
483 		 */
484 		erase->addr = 0;
485 		offset += subdev->size;
486 	}
487 	instr->state = erase->state;
488 	kfree(erase);
489 	if (err)
490 		return err;
491 
492 	return 0;
493 }
494 
concat_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)495 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
496 {
497 	struct mtd_concat *concat = CONCAT(mtd);
498 	int i, err = -EINVAL;
499 
500 	for (i = 0; i < concat->num_subdev; i++) {
501 		struct mtd_info *subdev = concat->subdev[i];
502 		uint64_t size;
503 
504 		if (ofs >= subdev->size) {
505 			size = 0;
506 			ofs -= subdev->size;
507 			continue;
508 		}
509 		if (ofs + len > subdev->size)
510 			size = subdev->size - ofs;
511 		else
512 			size = len;
513 
514 		err = mtd_lock(subdev, ofs, size);
515 		if (err)
516 			break;
517 
518 		len -= size;
519 		if (len == 0)
520 			break;
521 
522 		err = -EINVAL;
523 		ofs = 0;
524 	}
525 
526 	return err;
527 }
528 
concat_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)529 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
530 {
531 	struct mtd_concat *concat = CONCAT(mtd);
532 	int i, err = 0;
533 
534 	for (i = 0; i < concat->num_subdev; i++) {
535 		struct mtd_info *subdev = concat->subdev[i];
536 		uint64_t size;
537 
538 		if (ofs >= subdev->size) {
539 			size = 0;
540 			ofs -= subdev->size;
541 			continue;
542 		}
543 		if (ofs + len > subdev->size)
544 			size = subdev->size - ofs;
545 		else
546 			size = len;
547 
548 		err = mtd_unlock(subdev, ofs, size);
549 		if (err)
550 			break;
551 
552 		len -= size;
553 		if (len == 0)
554 			break;
555 
556 		err = -EINVAL;
557 		ofs = 0;
558 	}
559 
560 	return err;
561 }
562 
concat_sync(struct mtd_info * mtd)563 static void concat_sync(struct mtd_info *mtd)
564 {
565 	struct mtd_concat *concat = CONCAT(mtd);
566 	int i;
567 
568 	for (i = 0; i < concat->num_subdev; i++) {
569 		struct mtd_info *subdev = concat->subdev[i];
570 		mtd_sync(subdev);
571 	}
572 }
573 
574 #ifndef __UBOOT__
concat_suspend(struct mtd_info * mtd)575 static int concat_suspend(struct mtd_info *mtd)
576 {
577 	struct mtd_concat *concat = CONCAT(mtd);
578 	int i, rc = 0;
579 
580 	for (i = 0; i < concat->num_subdev; i++) {
581 		struct mtd_info *subdev = concat->subdev[i];
582 		if ((rc = mtd_suspend(subdev)) < 0)
583 			return rc;
584 	}
585 	return rc;
586 }
587 
concat_resume(struct mtd_info * mtd)588 static void concat_resume(struct mtd_info *mtd)
589 {
590 	struct mtd_concat *concat = CONCAT(mtd);
591 	int i;
592 
593 	for (i = 0; i < concat->num_subdev; i++) {
594 		struct mtd_info *subdev = concat->subdev[i];
595 		mtd_resume(subdev);
596 	}
597 }
598 #endif
599 
concat_block_isbad(struct mtd_info * mtd,loff_t ofs)600 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
601 {
602 	struct mtd_concat *concat = CONCAT(mtd);
603 	int i, res = 0;
604 
605 	if (!mtd_can_have_bb(concat->subdev[0]))
606 		return res;
607 
608 	for (i = 0; i < concat->num_subdev; i++) {
609 		struct mtd_info *subdev = concat->subdev[i];
610 
611 		if (ofs >= subdev->size) {
612 			ofs -= subdev->size;
613 			continue;
614 		}
615 
616 		res = mtd_block_isbad(subdev, ofs);
617 		break;
618 	}
619 
620 	return res;
621 }
622 
concat_block_markbad(struct mtd_info * mtd,loff_t ofs)623 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
624 {
625 	struct mtd_concat *concat = CONCAT(mtd);
626 	int i, err = -EINVAL;
627 
628 	for (i = 0; i < concat->num_subdev; i++) {
629 		struct mtd_info *subdev = concat->subdev[i];
630 
631 		if (ofs >= subdev->size) {
632 			ofs -= subdev->size;
633 			continue;
634 		}
635 
636 		err = mtd_block_markbad(subdev, ofs);
637 		if (!err)
638 			mtd->ecc_stats.badblocks++;
639 		break;
640 	}
641 
642 	return err;
643 }
644 
645 /*
646  * try to support NOMMU mmaps on concatenated devices
647  * - we don't support subdev spanning as we can't guarantee it'll work
648  */
concat_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)649 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
650 					      unsigned long len,
651 					      unsigned long offset,
652 					      unsigned long flags)
653 {
654 	struct mtd_concat *concat = CONCAT(mtd);
655 	int i;
656 
657 	for (i = 0; i < concat->num_subdev; i++) {
658 		struct mtd_info *subdev = concat->subdev[i];
659 
660 		if (offset >= subdev->size) {
661 			offset -= subdev->size;
662 			continue;
663 		}
664 
665 		return mtd_get_unmapped_area(subdev, len, offset, flags);
666 	}
667 
668 	return (unsigned long) -ENOSYS;
669 }
670 
671 /*
672  * This function constructs a virtual MTD device by concatenating
673  * num_devs MTD devices. A pointer to the new device object is
674  * stored to *new_dev upon success. This function does _not_
675  * register any devices: this is the caller's responsibility.
676  */
mtd_concat_create(struct mtd_info * subdev[],int num_devs,const char * name)677 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */
678 				   int num_devs,	/* number of subdevices      */
679 #ifndef __UBOOT__
680 				   const char *name)
681 #else
682 				   char *name)
683 #endif
684 {				/* name for the new device   */
685 	int i;
686 	size_t size;
687 	struct mtd_concat *concat;
688 	uint32_t max_erasesize, curr_erasesize;
689 	int num_erase_region;
690 	int max_writebufsize = 0;
691 
692 	debug("Concatenating MTD devices:\n");
693 	for (i = 0; i < num_devs; i++)
694 		printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
695 	debug("into device \"%s\"\n", name);
696 
697 	/* allocate the device structure */
698 	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
699 	concat = kzalloc(size, GFP_KERNEL);
700 	if (!concat) {
701 		printk
702 		    ("memory allocation error while creating concatenated device \"%s\"\n",
703 		     name);
704 		return NULL;
705 	}
706 	concat->subdev = (struct mtd_info **) (concat + 1);
707 
708 	/*
709 	 * Set up the new "super" device's MTD object structure, check for
710 	 * incompatibilities between the subdevices.
711 	 */
712 	concat->mtd.type = subdev[0]->type;
713 	concat->mtd.flags = subdev[0]->flags;
714 	concat->mtd.size = subdev[0]->size;
715 	concat->mtd.erasesize = subdev[0]->erasesize;
716 	concat->mtd.writesize = subdev[0]->writesize;
717 
718 	for (i = 0; i < num_devs; i++)
719 		if (max_writebufsize < subdev[i]->writebufsize)
720 			max_writebufsize = subdev[i]->writebufsize;
721 	concat->mtd.writebufsize = max_writebufsize;
722 
723 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
724 	concat->mtd.oobsize = subdev[0]->oobsize;
725 	concat->mtd.oobavail = subdev[0]->oobavail;
726 #ifndef __UBOOT__
727 	if (subdev[0]->_writev)
728 		concat->mtd._writev = concat_writev;
729 #endif
730 	if (subdev[0]->_read_oob)
731 		concat->mtd._read_oob = concat_read_oob;
732 	if (subdev[0]->_write_oob)
733 		concat->mtd._write_oob = concat_write_oob;
734 	if (subdev[0]->_block_isbad)
735 		concat->mtd._block_isbad = concat_block_isbad;
736 	if (subdev[0]->_block_markbad)
737 		concat->mtd._block_markbad = concat_block_markbad;
738 
739 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
740 
741 #ifndef __UBOOT__
742 	concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
743 #endif
744 
745 	concat->subdev[0] = subdev[0];
746 
747 	for (i = 1; i < num_devs; i++) {
748 		if (concat->mtd.type != subdev[i]->type) {
749 			kfree(concat);
750 			printk("Incompatible device type on \"%s\"\n",
751 			       subdev[i]->name);
752 			return NULL;
753 		}
754 		if (concat->mtd.flags != subdev[i]->flags) {
755 			/*
756 			 * Expect all flags except MTD_WRITEABLE to be
757 			 * equal on all subdevices.
758 			 */
759 			if ((concat->mtd.flags ^ subdev[i]->
760 			     flags) & ~MTD_WRITEABLE) {
761 				kfree(concat);
762 				printk("Incompatible device flags on \"%s\"\n",
763 				       subdev[i]->name);
764 				return NULL;
765 			} else
766 				/* if writeable attribute differs,
767 				   make super device writeable */
768 				concat->mtd.flags |=
769 				    subdev[i]->flags & MTD_WRITEABLE;
770 		}
771 
772 #ifndef __UBOOT__
773 		/* only permit direct mapping if the BDIs are all the same
774 		 * - copy-mapping is still permitted
775 		 */
776 		if (concat->mtd.backing_dev_info !=
777 		    subdev[i]->backing_dev_info)
778 			concat->mtd.backing_dev_info =
779 				&default_backing_dev_info;
780 #endif
781 
782 		concat->mtd.size += subdev[i]->size;
783 		concat->mtd.ecc_stats.badblocks +=
784 			subdev[i]->ecc_stats.badblocks;
785 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
786 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
787 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
788 		    !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
789 		    !concat->mtd._write_oob != !subdev[i]->_write_oob) {
790 			kfree(concat);
791 			printk("Incompatible OOB or ECC data on \"%s\"\n",
792 			       subdev[i]->name);
793 			return NULL;
794 		}
795 		concat->subdev[i] = subdev[i];
796 
797 	}
798 
799 	concat->mtd.ecclayout = subdev[0]->ecclayout;
800 
801 	concat->num_subdev = num_devs;
802 	concat->mtd.name = name;
803 
804 	concat->mtd._erase = concat_erase;
805 	concat->mtd._read = concat_read;
806 	concat->mtd._write = concat_write;
807 	concat->mtd._sync = concat_sync;
808 	concat->mtd._lock = concat_lock;
809 	concat->mtd._unlock = concat_unlock;
810 #ifndef __UBOOT__
811 	concat->mtd._suspend = concat_suspend;
812 	concat->mtd._resume = concat_resume;
813 #endif
814 	concat->mtd._get_unmapped_area = concat_get_unmapped_area;
815 
816 	/*
817 	 * Combine the erase block size info of the subdevices:
818 	 *
819 	 * first, walk the map of the new device and see how
820 	 * many changes in erase size we have
821 	 */
822 	max_erasesize = curr_erasesize = subdev[0]->erasesize;
823 	num_erase_region = 1;
824 	for (i = 0; i < num_devs; i++) {
825 		if (subdev[i]->numeraseregions == 0) {
826 			/* current subdevice has uniform erase size */
827 			if (subdev[i]->erasesize != curr_erasesize) {
828 				/* if it differs from the last subdevice's erase size, count it */
829 				++num_erase_region;
830 				curr_erasesize = subdev[i]->erasesize;
831 				if (curr_erasesize > max_erasesize)
832 					max_erasesize = curr_erasesize;
833 			}
834 		} else {
835 			/* current subdevice has variable erase size */
836 			int j;
837 			for (j = 0; j < subdev[i]->numeraseregions; j++) {
838 
839 				/* walk the list of erase regions, count any changes */
840 				if (subdev[i]->eraseregions[j].erasesize !=
841 				    curr_erasesize) {
842 					++num_erase_region;
843 					curr_erasesize =
844 					    subdev[i]->eraseregions[j].
845 					    erasesize;
846 					if (curr_erasesize > max_erasesize)
847 						max_erasesize = curr_erasesize;
848 				}
849 			}
850 		}
851 	}
852 
853 	if (num_erase_region == 1) {
854 		/*
855 		 * All subdevices have the same uniform erase size.
856 		 * This is easy:
857 		 */
858 		concat->mtd.erasesize = curr_erasesize;
859 		concat->mtd.numeraseregions = 0;
860 	} else {
861 		uint64_t tmp64;
862 
863 		/*
864 		 * erase block size varies across the subdevices: allocate
865 		 * space to store the data describing the variable erase regions
866 		 */
867 		struct mtd_erase_region_info *erase_region_p;
868 		uint64_t begin, position;
869 
870 		concat->mtd.erasesize = max_erasesize;
871 		concat->mtd.numeraseregions = num_erase_region;
872 		concat->mtd.eraseregions = erase_region_p =
873 		    kmalloc(num_erase_region *
874 			    sizeof (struct mtd_erase_region_info), GFP_KERNEL);
875 		if (!erase_region_p) {
876 			kfree(concat);
877 			printk
878 			    ("memory allocation error while creating erase region list"
879 			     " for device \"%s\"\n", name);
880 			return NULL;
881 		}
882 
883 		/*
884 		 * walk the map of the new device once more and fill in
885 		 * in erase region info:
886 		 */
887 		curr_erasesize = subdev[0]->erasesize;
888 		begin = position = 0;
889 		for (i = 0; i < num_devs; i++) {
890 			if (subdev[i]->numeraseregions == 0) {
891 				/* current subdevice has uniform erase size */
892 				if (subdev[i]->erasesize != curr_erasesize) {
893 					/*
894 					 *  fill in an mtd_erase_region_info structure for the area
895 					 *  we have walked so far:
896 					 */
897 					erase_region_p->offset = begin;
898 					erase_region_p->erasesize =
899 					    curr_erasesize;
900 					tmp64 = position - begin;
901 					do_div(tmp64, curr_erasesize);
902 					erase_region_p->numblocks = tmp64;
903 					begin = position;
904 
905 					curr_erasesize = subdev[i]->erasesize;
906 					++erase_region_p;
907 				}
908 				position += subdev[i]->size;
909 			} else {
910 				/* current subdevice has variable erase size */
911 				int j;
912 				for (j = 0; j < subdev[i]->numeraseregions; j++) {
913 					/* walk the list of erase regions, count any changes */
914 					if (subdev[i]->eraseregions[j].
915 					    erasesize != curr_erasesize) {
916 						erase_region_p->offset = begin;
917 						erase_region_p->erasesize =
918 						    curr_erasesize;
919 						tmp64 = position - begin;
920 						do_div(tmp64, curr_erasesize);
921 						erase_region_p->numblocks = tmp64;
922 						begin = position;
923 
924 						curr_erasesize =
925 						    subdev[i]->eraseregions[j].
926 						    erasesize;
927 						++erase_region_p;
928 					}
929 					position +=
930 					    subdev[i]->eraseregions[j].
931 					    numblocks * (uint64_t)curr_erasesize;
932 				}
933 			}
934 		}
935 		/* Now write the final entry */
936 		erase_region_p->offset = begin;
937 		erase_region_p->erasesize = curr_erasesize;
938 		tmp64 = position - begin;
939 		do_div(tmp64, curr_erasesize);
940 		erase_region_p->numblocks = tmp64;
941 	}
942 
943 	return &concat->mtd;
944 }
945 
946 /*
947  * This function destroys an MTD object obtained from concat_mtd_devs()
948  */
949 
mtd_concat_destroy(struct mtd_info * mtd)950 void mtd_concat_destroy(struct mtd_info *mtd)
951 {
952 	struct mtd_concat *concat = CONCAT(mtd);
953 	if (concat->mtd.numeraseregions)
954 		kfree(concat->mtd.eraseregions);
955 	kfree(concat);
956 }
957 
958 EXPORT_SYMBOL(mtd_concat_create);
959 EXPORT_SYMBOL(mtd_concat_destroy);
960 
961 MODULE_LICENSE("GPL");
962 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
963 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
964