blob: c4b6c2e1751e385ea4ddf9433f1811a490dccd8a [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Simple MTD partitioning layer
3 *
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/kmod.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h>
32#include <linux/magic.h>
33#include <linux/err.h>
34#include <linux/of.h>
35
36#include "mtdcore.h"
37#include "mtdsplit/mtdsplit.h"
38
39#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
40
41/* Our partition linked list */
42static LIST_HEAD(mtd_partitions);
43static DEFINE_MUTEX(mtd_partitions_mutex);
44
45/**
46 * struct mtd_part - our partition node structure
47 *
48 * @mtd: struct holding partition details
49 * @parent: parent mtd - flash device or another partition
50 * @offset: partition offset relative to the *flash device*
51 */
52struct mtd_part {
53 struct mtd_info mtd;
54 struct mtd_info *parent;
55 uint64_t offset;
56 struct list_head list;
57};
58
59static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
60static int parse_mtd_partitions_by_type(struct mtd_info *master,
61 enum mtd_parser_type type,
62 const struct mtd_partition **pparts,
63 struct mtd_part_parser_data *data);
64
65/*
66 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
67 * the pointer to that structure.
68 */
69static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
70{
71 return container_of(mtd, struct mtd_part, mtd);
72}
73
74static u64 part_absolute_offset(struct mtd_info *mtd)
75{
76 struct mtd_part *part = mtd_to_part(mtd);
77
78 if (!mtd_is_partition(mtd))
79 return 0;
80
81 return part_absolute_offset(part->parent) + part->offset;
82}
83
84/*
85 * MTD methods which simply translate the effective address and pass through
86 * to the _real_ device.
87 */
88
89static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
90 size_t *retlen, u_char *buf)
91{
92 struct mtd_part *part = mtd_to_part(mtd);
93 struct mtd_ecc_stats stats;
94 int res;
95
96 stats = part->parent->ecc_stats;
97 res = part->parent->_read(part->parent, from + part->offset, len,
98 retlen, buf);
99 if (unlikely(mtd_is_eccerr(res)))
100 mtd->ecc_stats.failed +=
101 part->parent->ecc_stats.failed - stats.failed;
102 else
103 mtd->ecc_stats.corrected +=
104 part->parent->ecc_stats.corrected - stats.corrected;
105 return res;
106}
107
108static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
109 size_t *retlen, void **virt, resource_size_t *phys)
110{
111 struct mtd_part *part = mtd_to_part(mtd);
112
113 return part->parent->_point(part->parent, from + part->offset, len,
114 retlen, virt, phys);
115}
116
117static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
118{
119 struct mtd_part *part = mtd_to_part(mtd);
120
121 return part->parent->_unpoint(part->parent, from + part->offset, len);
122}
123
124static int part_read_oob(struct mtd_info *mtd, loff_t from,
125 struct mtd_oob_ops *ops)
126{
127 struct mtd_part *part = mtd_to_part(mtd);
128 struct mtd_ecc_stats stats;
129 int res;
130
131 stats = part->parent->ecc_stats;
132 res = part->parent->_read_oob(part->parent, from + part->offset, ops);
133 if (unlikely(mtd_is_eccerr(res)))
134 mtd->ecc_stats.failed +=
135 part->parent->ecc_stats.failed - stats.failed;
136 else
137 mtd->ecc_stats.corrected +=
138 part->parent->ecc_stats.corrected - stats.corrected;
139 return res;
140}
141
142static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
143 size_t len, size_t *retlen, u_char *buf)
144{
145 struct mtd_part *part = mtd_to_part(mtd);
146 return part->parent->_read_user_prot_reg(part->parent, from, len,
147 retlen, buf);
148}
149
150static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
151 size_t *retlen, struct otp_info *buf)
152{
153 struct mtd_part *part = mtd_to_part(mtd);
154 return part->parent->_get_user_prot_info(part->parent, len, retlen,
155 buf);
156}
157
158static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
159 size_t len, size_t *retlen, u_char *buf)
160{
161 struct mtd_part *part = mtd_to_part(mtd);
162 return part->parent->_read_fact_prot_reg(part->parent, from, len,
163 retlen, buf);
164}
165
166static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
167 size_t *retlen, struct otp_info *buf)
168{
169 struct mtd_part *part = mtd_to_part(mtd);
170 return part->parent->_get_fact_prot_info(part->parent, len, retlen,
171 buf);
172}
173
174static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
175 size_t *retlen, const u_char *buf)
176{
177 struct mtd_part *part = mtd_to_part(mtd);
178 return part->parent->_write(part->parent, to + part->offset, len,
179 retlen, buf);
180}
181
182static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
183 size_t *retlen, const u_char *buf)
184{
185 struct mtd_part *part = mtd_to_part(mtd);
186 return part->parent->_panic_write(part->parent, to + part->offset, len,
187 retlen, buf);
188}
189
190static int part_write_oob(struct mtd_info *mtd, loff_t to,
191 struct mtd_oob_ops *ops)
192{
193 struct mtd_part *part = mtd_to_part(mtd);
194
195 return part->parent->_write_oob(part->parent, to + part->offset, ops);
196}
197
198static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
199 size_t len, size_t *retlen, u_char *buf)
200{
201 struct mtd_part *part = mtd_to_part(mtd);
202 return part->parent->_write_user_prot_reg(part->parent, from, len,
203 retlen, buf);
204}
205
206static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
207 size_t len)
208{
209 struct mtd_part *part = mtd_to_part(mtd);
210 return part->parent->_lock_user_prot_reg(part->parent, from, len);
211}
212
213static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
214 unsigned long count, loff_t to, size_t *retlen)
215{
216 struct mtd_part *part = mtd_to_part(mtd);
217 return part->parent->_writev(part->parent, vecs, count,
218 to + part->offset, retlen);
219}
220
221static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
222{
223 struct mtd_part *part = mtd_to_part(mtd);
224 int ret;
225 size_t wrlen = 0;
226 u8 *erase_buf = NULL;
227 u32 erase_buf_ofs = 0;
228 bool partial_start = false;
229
230 if (mtd->flags & MTD_ERASE_PARTIAL) {
231 size_t readlen = 0;
232 u64 mtd_ofs;
233
234 erase_buf = kmalloc(part->parent->erasesize, GFP_ATOMIC);
235 if (!erase_buf)
236 return -ENOMEM;
237
238 mtd_ofs = part->offset + instr->addr;
239 erase_buf_ofs = do_div(mtd_ofs, part->parent->erasesize);
240
241 if (erase_buf_ofs > 0) {
242 instr->addr -= erase_buf_ofs;
243 ret = mtd_read(part->parent,
244 instr->addr + part->offset,
245 part->parent->erasesize,
246 &readlen, erase_buf);
247
248 instr->len += erase_buf_ofs;
249 partial_start = true;
250 } else {
251 mtd_ofs = part->offset + part->mtd.size;
252 erase_buf_ofs = part->parent->erasesize -
253 do_div(mtd_ofs, part->parent->erasesize);
254
255 if (erase_buf_ofs > 0) {
256 instr->len += erase_buf_ofs;
257 ret = mtd_read(part->parent,
258 part->offset + instr->addr +
259 instr->len - part->parent->erasesize,
260 part->parent->erasesize, &readlen,
261 erase_buf);
262 } else {
263 ret = 0;
264 }
265 }
266 if (ret < 0) {
267 kfree(erase_buf);
268 return ret;
269 }
270
271 }
272
273 instr->addr += part->offset;
274 ret = part->parent->_erase(part->parent, instr);
275 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
276 instr->fail_addr -= part->offset;
277 instr->addr -= part->offset;
278
279 if (mtd->flags & MTD_ERASE_PARTIAL) {
280 if (partial_start) {
281 part->parent->_write(part->parent,
282 instr->addr, erase_buf_ofs,
283 &wrlen, erase_buf);
284 instr->addr += erase_buf_ofs;
285 } else {
286 instr->len -= erase_buf_ofs;
287 part->parent->_write(part->parent,
288 instr->addr + instr->len,
289 erase_buf_ofs, &wrlen,
290 erase_buf +
291 part->parent->erasesize -
292 erase_buf_ofs);
293 }
294 kfree(erase_buf);
295 }
296
297 return ret;
298}
299
300static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
301{
302 struct mtd_part *part = mtd_to_part(mtd);
303 return part->parent->_lock(part->parent, ofs + part->offset, len);
304}
305
306static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
307{
308 struct mtd_part *part = mtd_to_part(mtd);
309
310 ofs += part->offset;
311
312 if (mtd->flags & MTD_ERASE_PARTIAL) {
313 /* round up len to next erasesize and round down offset to prev block */
314 len = (mtd_div_by_eb(len, part->parent) + 1) * part->parent->erasesize;
315 ofs &= ~(part->parent->erasesize - 1);
316 }
317
318 return part->parent->_unlock(part->parent, ofs, len);
319}
320
321static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
322{
323 struct mtd_part *part = mtd_to_part(mtd);
324 return part->parent->_is_locked(part->parent, ofs + part->offset, len);
325}
326
327static void part_sync(struct mtd_info *mtd)
328{
329 struct mtd_part *part = mtd_to_part(mtd);
330 part->parent->_sync(part->parent);
331}
332
333static int part_suspend(struct mtd_info *mtd)
334{
335 struct mtd_part *part = mtd_to_part(mtd);
336 return part->parent->_suspend(part->parent);
337}
338
339static void part_resume(struct mtd_info *mtd)
340{
341 struct mtd_part *part = mtd_to_part(mtd);
342 part->parent->_resume(part->parent);
343}
344
345static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
346{
347 struct mtd_part *part = mtd_to_part(mtd);
348 ofs += part->offset;
349 return part->parent->_block_isreserved(part->parent, ofs);
350}
351
352static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
353{
354 struct mtd_part *part = mtd_to_part(mtd);
355 ofs += part->offset;
356 return part->parent->_block_isbad(part->parent, ofs);
357}
358
359static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
360{
361 struct mtd_part *part = mtd_to_part(mtd);
362 int res;
363
364 ofs += part->offset;
365 res = part->parent->_block_markbad(part->parent, ofs);
366 if (!res)
367 mtd->ecc_stats.badblocks++;
368 return res;
369}
370
371static int part_get_device(struct mtd_info *mtd)
372{
373 struct mtd_part *part = mtd_to_part(mtd);
374 return part->parent->_get_device(part->parent);
375}
376
377static void part_put_device(struct mtd_info *mtd)
378{
379 struct mtd_part *part = mtd_to_part(mtd);
380 part->parent->_put_device(part->parent);
381}
382
383static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
384 struct mtd_oob_region *oobregion)
385{
386 struct mtd_part *part = mtd_to_part(mtd);
387
388 return mtd_ooblayout_ecc(part->parent, section, oobregion);
389}
390
391static int part_ooblayout_free(struct mtd_info *mtd, int section,
392 struct mtd_oob_region *oobregion)
393{
394 struct mtd_part *part = mtd_to_part(mtd);
395
396 return mtd_ooblayout_free(part->parent, section, oobregion);
397}
398
399static const struct mtd_ooblayout_ops part_ooblayout_ops = {
400 .ecc = part_ooblayout_ecc,
401 .free = part_ooblayout_free,
402};
403
404static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
405{
406 struct mtd_part *part = mtd_to_part(mtd);
407
408 return part->parent->_max_bad_blocks(part->parent,
409 ofs + part->offset, len);
410}
411
412static inline void free_partition(struct mtd_part *p)
413{
414 kfree(p->mtd.name);
415 kfree(p);
416}
417
418static struct mtd_part *allocate_partition(struct mtd_info *parent,
419 const struct mtd_partition *part, int partno,
420 uint64_t cur_offset)
421{
422 int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
423 parent->erasesize;
424 struct mtd_part *slave;
425 u32 remainder;
426 char *name;
427 u64 tmp;
428
429 /* allocate the partition structure */
430 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
431 name = kstrdup(part->name, GFP_KERNEL);
432 if (!name || !slave) {
433 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
434 parent->name);
435 kfree(name);
436 kfree(slave);
437 return ERR_PTR(-ENOMEM);
438 }
439
440 /* set up the MTD object for this partition */
441 slave->mtd.type = parent->type;
442 slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
443 slave->mtd.orig_flags = slave->mtd.flags;
444 slave->mtd.size = part->size;
445 slave->mtd.writesize = parent->writesize;
446 slave->mtd.writebufsize = parent->writebufsize;
447 slave->mtd.oobsize = parent->oobsize;
448 slave->mtd.oobavail = parent->oobavail;
449 slave->mtd.subpage_sft = parent->subpage_sft;
450 slave->mtd.pairing = parent->pairing;
451
452 slave->mtd.name = name;
453 slave->mtd.owner = parent->owner;
454
455 /* NOTE: Historically, we didn't arrange MTDs as a tree out of
456 * concern for showing the same data in multiple partitions.
457 * However, it is very useful to have the master node present,
458 * so the MTD_PARTITIONED_MASTER option allows that. The master
459 * will have device nodes etc only if this is set, so make the
460 * parent conditional on that option. Note, this is a way to
461 * distinguish between the master and the partition in sysfs.
462 */
463 slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
464 &parent->dev :
465 parent->dev.parent;
466 slave->mtd.dev.of_node = part->of_node;
467
468 if (parent->_read)
469 slave->mtd._read = part_read;
470 if (parent->_write)
471 slave->mtd._write = part_write;
472
473 if (parent->_panic_write)
474 slave->mtd._panic_write = part_panic_write;
475
476 if (parent->_point && parent->_unpoint) {
477 slave->mtd._point = part_point;
478 slave->mtd._unpoint = part_unpoint;
479 }
480
481 if (parent->_read_oob)
482 slave->mtd._read_oob = part_read_oob;
483 if (parent->_write_oob)
484 slave->mtd._write_oob = part_write_oob;
485 if (parent->_read_user_prot_reg)
486 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
487 if (parent->_read_fact_prot_reg)
488 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
489 if (parent->_write_user_prot_reg)
490 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
491 if (parent->_lock_user_prot_reg)
492 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
493 if (parent->_get_user_prot_info)
494 slave->mtd._get_user_prot_info = part_get_user_prot_info;
495 if (parent->_get_fact_prot_info)
496 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
497 if (parent->_sync)
498 slave->mtd._sync = part_sync;
499 if (!partno && !parent->dev.class && parent->_suspend &&
500 parent->_resume) {
501 slave->mtd._suspend = part_suspend;
502 slave->mtd._resume = part_resume;
503 }
504 if (parent->_writev)
505 slave->mtd._writev = part_writev;
506 if (parent->_lock)
507 slave->mtd._lock = part_lock;
508 if (parent->_unlock)
509 slave->mtd._unlock = part_unlock;
510 if (parent->_is_locked)
511 slave->mtd._is_locked = part_is_locked;
512 if (parent->_block_isreserved)
513 slave->mtd._block_isreserved = part_block_isreserved;
514 if (parent->_block_isbad)
515 slave->mtd._block_isbad = part_block_isbad;
516 if (parent->_block_markbad)
517 slave->mtd._block_markbad = part_block_markbad;
518 if (parent->_max_bad_blocks)
519 slave->mtd._max_bad_blocks = part_max_bad_blocks;
520
521 if (parent->_get_device)
522 slave->mtd._get_device = part_get_device;
523 if (parent->_put_device)
524 slave->mtd._put_device = part_put_device;
525
526 slave->mtd._erase = part_erase;
527 slave->parent = parent;
528 slave->offset = part->offset;
529
530 if (slave->offset == MTDPART_OFS_APPEND)
531 slave->offset = cur_offset;
532 if (slave->offset == MTDPART_OFS_NXTBLK) {
533 tmp = cur_offset;
534 slave->offset = cur_offset;
535 remainder = do_div(tmp, wr_alignment);
536 if (remainder) {
537 slave->offset += wr_alignment - remainder;
538 printk(KERN_NOTICE "Moving partition %d: "
539 "0x%012llx -> 0x%012llx\n", partno,
540 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
541 }
542 }
543 if (slave->offset == MTDPART_OFS_RETAIN) {
544 slave->offset = cur_offset;
545 if (parent->size - slave->offset >= slave->mtd.size) {
546 slave->mtd.size = parent->size - slave->offset
547 - slave->mtd.size;
548 } else {
549 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
550 part->name, parent->size - slave->offset,
551 slave->mtd.size);
552 /* register to preserve ordering */
553 goto out_register;
554 }
555 }
556 if (slave->mtd.size == MTDPART_SIZ_FULL)
557 slave->mtd.size = parent->size - slave->offset;
558
559 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
560 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
561
562 /* let's do some sanity checks */
563 if (slave->offset >= parent->size) {
564 /* let's register it anyway to preserve ordering */
565 slave->offset = 0;
566 slave->mtd.size = 0;
567
568 /* Initialize ->erasesize to make add_mtd_device() happy. */
569 slave->mtd.erasesize = parent->erasesize;
570
571 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
572 part->name);
573 goto out_register;
574 }
575 if (slave->offset + slave->mtd.size > parent->size) {
576 slave->mtd.size = parent->size - slave->offset;
577 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
578 part->name, parent->name, (unsigned long long)slave->mtd.size);
579 }
580 if (parent->numeraseregions > 1) {
581 /* Deal with variable erase size stuff */
582 int i, max = parent->numeraseregions;
583 u64 end = slave->offset + slave->mtd.size;
584 struct mtd_erase_region_info *regions = parent->eraseregions;
585
586 /* Find the first erase regions which is part of this
587 * partition. */
588 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
589 ;
590 /* The loop searched for the region _behind_ the first one */
591 if (i > 0)
592 i--;
593
594 /* Pick biggest erasesize */
595 for (; i < max && regions[i].offset < end; i++) {
596 if (slave->mtd.erasesize < regions[i].erasesize) {
597 slave->mtd.erasesize = regions[i].erasesize;
598 }
599 }
600 BUG_ON(slave->mtd.erasesize == 0);
601 } else {
602 /* Single erase size */
603 slave->mtd.erasesize = parent->erasesize;
604 }
605
606 /*
607 * Slave erasesize might differ from the master one if the master
608 * exposes several regions with different erasesize. Adjust
609 * wr_alignment accordingly.
610 */
611 if (!(slave->mtd.flags & MTD_NO_ERASE))
612 wr_alignment = slave->mtd.erasesize;
613
614 tmp = part_absolute_offset(parent) + slave->offset;
615 remainder = do_div(tmp, wr_alignment);
616 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
617 /* Doesn't start on a boundary of major erase size */
618 slave->mtd.flags |= MTD_ERASE_PARTIAL;
619 if (((u32)slave->mtd.size) > parent->erasesize)
620 slave->mtd.flags &= ~MTD_WRITEABLE;
621 else
622 slave->mtd.erasesize = slave->mtd.size;
623 }
624
625 tmp = part_absolute_offset(parent) + slave->offset + slave->mtd.size;
626 remainder = do_div(tmp, wr_alignment);
627 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
628 slave->mtd.flags |= MTD_ERASE_PARTIAL;
629
630 if ((u32)slave->mtd.size > parent->erasesize)
631 slave->mtd.flags &= ~MTD_WRITEABLE;
632 else
633 slave->mtd.erasesize = slave->mtd.size;
634 }
635
636 mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
637 slave->mtd.ecc_step_size = parent->ecc_step_size;
638 slave->mtd.ecc_strength = parent->ecc_strength;
639 slave->mtd.bitflip_threshold = parent->bitflip_threshold;
640
641 if (parent->_block_isbad) {
642 uint64_t offs = 0;
643
644 while (offs < slave->mtd.size) {
645 if (mtd_block_isreserved(parent, offs + slave->offset))
646 slave->mtd.ecc_stats.bbtblocks++;
647 else if (mtd_block_isbad(parent, offs + slave->offset))
648 slave->mtd.ecc_stats.badblocks++;
649 offs += slave->mtd.erasesize;
650 }
651 }
652
653out_register:
654 return slave;
655}
656
657static ssize_t mtd_partition_offset_show(struct device *dev,
658 struct device_attribute *attr, char *buf)
659{
660 struct mtd_info *mtd = dev_get_drvdata(dev);
661 struct mtd_part *part = mtd_to_part(mtd);
662 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
663}
664
665static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
666
667static const struct attribute *mtd_partition_attrs[] = {
668 &dev_attr_offset.attr,
669 NULL
670};
671
672static int mtd_add_partition_attrs(struct mtd_part *new)
673{
674 int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
675 if (ret)
676 printk(KERN_WARNING
677 "mtd: failed to create partition attrs, err=%d\n", ret);
678 return ret;
679}
680
681int mtd_add_partition(struct mtd_info *parent, const char *name,
682 long long offset, long long length)
683{
684 struct mtd_partition part;
685 struct mtd_part *new;
686 int ret = 0;
687
688 /* the direct offset is expected */
689 if (offset == MTDPART_OFS_APPEND ||
690 offset == MTDPART_OFS_NXTBLK)
691 return -EINVAL;
692
693 if (length == MTDPART_SIZ_FULL)
694 length = parent->size - offset;
695
696 if (length <= 0)
697 return -EINVAL;
698
699 memset(&part, 0, sizeof(part));
700 part.name = name;
701 part.size = length;
702 part.offset = offset;
703
704 new = allocate_partition(parent, &part, -1, offset);
705 if (IS_ERR(new))
706 return PTR_ERR(new);
707
708 mutex_lock(&mtd_partitions_mutex);
709 list_add(&new->list, &mtd_partitions);
710 mutex_unlock(&mtd_partitions_mutex);
711
712 ret = add_mtd_device(&new->mtd);
713 if (ret)
714 goto err_remove_part;
715
716 mtd_partition_split(parent, new);
717 mtd_add_partition_attrs(new);
718
719 return 0;
720
721err_remove_part:
722 mutex_lock(&mtd_partitions_mutex);
723 list_del(&new->list);
724 mutex_unlock(&mtd_partitions_mutex);
725
726 free_partition(new);
727
728 return ret;
729}
730EXPORT_SYMBOL_GPL(mtd_add_partition);
731
732/**
733 * __mtd_del_partition - delete MTD partition
734 *
735 * @priv: internal MTD struct for partition to be deleted
736 *
737 * This function must be called with the partitions mutex locked.
738 */
739static int __mtd_del_partition(struct mtd_part *priv)
740{
741 struct mtd_part *child, *next;
742 int err;
743
744 list_for_each_entry_safe(child, next, &mtd_partitions, list) {
745 if (child->parent == &priv->mtd) {
746 err = __mtd_del_partition(child);
747 if (err)
748 return err;
749 }
750 }
751
752 sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
753
754 err = del_mtd_device(&priv->mtd);
755 if (err)
756 return err;
757
758 list_del(&priv->list);
759 free_partition(priv);
760
761 return 0;
762}
763
764/*
765 * This function unregisters and destroy all slave MTD objects which are
766 * attached to the given MTD object.
767 */
768int del_mtd_partitions(struct mtd_info *mtd)
769{
770 struct mtd_part *slave, *next;
771 int ret, err = 0;
772
773 mutex_lock(&mtd_partitions_mutex);
774 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
775 if (slave->parent == mtd) {
776 ret = __mtd_del_partition(slave);
777 if (ret < 0)
778 err = ret;
779 }
780 mutex_unlock(&mtd_partitions_mutex);
781
782 return err;
783}
784
785int mtd_del_partition(struct mtd_info *mtd, int partno)
786{
787 struct mtd_part *slave, *next;
788 int ret = -EINVAL;
789
790 mutex_lock(&mtd_partitions_mutex);
791 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
792 if ((slave->parent == mtd) &&
793 (slave->mtd.index == partno)) {
794 ret = __mtd_del_partition(slave);
795 break;
796 }
797 mutex_unlock(&mtd_partitions_mutex);
798
799 return ret;
800}
801EXPORT_SYMBOL_GPL(mtd_del_partition);
802
803static int
804run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
805{
806 struct mtd_partition *parts;
807 int nr_parts;
808 int i;
809
810 nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, (const struct mtd_partition **)&parts,
811 NULL);
812 if (nr_parts <= 0)
813 return nr_parts;
814
815 if (WARN_ON(!parts))
816 return 0;
817
818 for (i = 0; i < nr_parts; i++) {
819 /* adjust partition offsets */
820 parts[i].offset += slave->offset;
821
822 mtd_add_partition(slave->parent,
823 parts[i].name,
824 parts[i].offset,
825 parts[i].size);
826 }
827
828 kfree(parts);
829
830 return nr_parts;
831}
832
833#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
834#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
835#else
836#define SPLIT_FIRMWARE_NAME "unused"
837#endif
838
839static void split_firmware(struct mtd_info *master, struct mtd_part *part)
840{
841 run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
842}
843
844static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
845{
846 static int rootfs_found = 0;
847
848 if (rootfs_found)
849 return;
850
851 if (!strcmp(part->mtd.name, "rootfs")) {
852 run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
853
854 rootfs_found = 1;
855 }
856
857 if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) &&
858 !strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
859 !of_find_property(mtd_get_of_node(&part->mtd), "compatible", NULL))
860 split_firmware(master, part);
861}
862
863/*
864 * This function, given a master MTD object and a partition table, creates
865 * and registers slave MTD objects which are bound to the master according to
866 * the partition definitions.
867 *
868 * For historical reasons, this function's caller only registers the master
869 * if the MTD_PARTITIONED_MASTER config option is set.
870 */
871
872int add_mtd_partitions(struct mtd_info *master,
873 const struct mtd_partition *parts,
874 int nbparts)
875{
876 struct mtd_part *slave;
877 uint64_t cur_offset = 0;
878 int i, ret;
879
880 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
881
882 for (i = 0; i < nbparts; i++) {
883 slave = allocate_partition(master, parts + i, i, cur_offset);
884 if (IS_ERR(slave)) {
885 ret = PTR_ERR(slave);
886 goto err_del_partitions;
887 }
888
889 mutex_lock(&mtd_partitions_mutex);
890 list_add(&slave->list, &mtd_partitions);
891 mutex_unlock(&mtd_partitions_mutex);
892
893 ret = add_mtd_device(&slave->mtd);
894 if (ret) {
895 mutex_lock(&mtd_partitions_mutex);
896 list_del(&slave->list);
897 mutex_unlock(&mtd_partitions_mutex);
898
899 free_partition(slave);
900 goto err_del_partitions;
901 }
902
903 mtd_partition_split(master, slave);
904 mtd_add_partition_attrs(slave);
905 /* Look for subpartitions */
906 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
907
908 cur_offset = slave->offset + slave->mtd.size;
909 }
910
911 return 0;
912
913err_del_partitions:
914 del_mtd_partitions(master);
915
916 return ret;
917}
918
919static DEFINE_SPINLOCK(part_parser_lock);
920static LIST_HEAD(part_parsers);
921
922static struct mtd_part_parser *mtd_part_parser_get(const char *name)
923{
924 struct mtd_part_parser *p, *ret = NULL;
925
926 spin_lock(&part_parser_lock);
927
928 list_for_each_entry(p, &part_parsers, list)
929 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
930 ret = p;
931 break;
932 }
933
934 spin_unlock(&part_parser_lock);
935
936 return ret;
937}
938
939static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
940{
941 module_put(p->owner);
942}
943
944/*
945 * Many partition parsers just expected the core to kfree() all their data in
946 * one chunk. Do that by default.
947 */
948static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
949 int nr_parts)
950{
951 kfree(pparts);
952}
953
954int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
955{
956 p->owner = owner;
957
958 if (!p->cleanup)
959 p->cleanup = &mtd_part_parser_cleanup_default;
960
961 spin_lock(&part_parser_lock);
962 list_add(&p->list, &part_parsers);
963 spin_unlock(&part_parser_lock);
964
965 return 0;
966}
967EXPORT_SYMBOL_GPL(__register_mtd_parser);
968
969void deregister_mtd_parser(struct mtd_part_parser *p)
970{
971 spin_lock(&part_parser_lock);
972 list_del(&p->list);
973 spin_unlock(&part_parser_lock);
974}
975EXPORT_SYMBOL_GPL(deregister_mtd_parser);
976
977/*
978 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
979 * are changing this array!
980 */
981static const char * const default_mtd_part_types[] = {
982 "cmdlinepart",
983 "ofpart",
984 NULL
985};
986
987/* Check DT only when looking for subpartitions. */
988static const char * const default_subpartition_types[] = {
989 "ofpart",
990 NULL
991};
992
993static int mtd_part_do_parse(struct mtd_part_parser *parser,
994 struct mtd_info *master,
995 struct mtd_partitions *pparts,
996 struct mtd_part_parser_data *data)
997{
998 int ret;
999
1000 ret = (*parser->parse_fn)(master, &pparts->parts, data);
1001 pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
1002 if (ret <= 0)
1003 return ret;
1004
1005 pr_notice("%d %s partitions found on MTD device %s\n", ret,
1006 parser->name, master->name);
1007
1008 pparts->nr_parts = ret;
1009 pparts->parser = parser;
1010
1011 return ret;
1012}
1013
1014/**
1015 * mtd_part_get_compatible_parser - find MTD parser by a compatible string
1016 *
1017 * @compat: compatible string describing partitions in a device tree
1018 *
1019 * MTD parsers can specify supported partitions by providing a table of
1020 * compatibility strings. This function finds a parser that advertises support
1021 * for a passed value of "compatible".
1022 */
1023static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
1024{
1025 struct mtd_part_parser *p, *ret = NULL;
1026
1027 spin_lock(&part_parser_lock);
1028
1029 list_for_each_entry(p, &part_parsers, list) {
1030 const struct of_device_id *matches;
1031
1032 matches = p->of_match_table;
1033 if (!matches)
1034 continue;
1035
1036 for (; matches->compatible[0]; matches++) {
1037 if (!strcmp(matches->compatible, compat) &&
1038 try_module_get(p->owner)) {
1039 ret = p;
1040 break;
1041 }
1042 }
1043
1044 if (ret)
1045 break;
1046 }
1047
1048 spin_unlock(&part_parser_lock);
1049
1050 return ret;
1051}
1052
1053static int mtd_part_of_parse(struct mtd_info *master,
1054 struct mtd_partitions *pparts)
1055{
1056 struct mtd_part_parser *parser;
1057 struct device_node *np;
1058 struct property *prop;
1059 const char *compat;
1060 const char *fixed = "fixed-partitions";
1061 int ret, err = 0;
1062
1063 np = mtd_get_of_node(master);
1064 if (mtd_is_partition(master))
1065 of_node_get(np);
1066 else
1067 np = of_get_child_by_name(np, "partitions");
1068
1069 of_property_for_each_string(np, "compatible", prop, compat) {
1070 parser = mtd_part_get_compatible_parser(compat);
1071 if (!parser)
1072 continue;
1073 ret = mtd_part_do_parse(parser, master, pparts, NULL);
1074 if (ret > 0) {
1075 of_node_put(np);
1076 return ret;
1077 }
1078 mtd_part_parser_put(parser);
1079 if (ret < 0 && !err)
1080 err = ret;
1081 }
1082 of_node_put(np);
1083
1084 /*
1085 * For backward compatibility we have to try the "fixed-partitions"
1086 * parser. It supports old DT format with partitions specified as a
1087 * direct subnodes of a flash device DT node without any compatibility
1088 * specified we could match.
1089 */
1090 parser = mtd_part_parser_get(fixed);
1091 if (!parser && !request_module("%s", fixed))
1092 parser = mtd_part_parser_get(fixed);
1093 if (parser) {
1094 ret = mtd_part_do_parse(parser, master, pparts, NULL);
1095 if (ret > 0)
1096 return ret;
1097 mtd_part_parser_put(parser);
1098 if (ret < 0 && !err)
1099 err = ret;
1100 }
1101
1102 return err;
1103}
1104
1105/**
1106 * parse_mtd_partitions - parse and register MTD partitions
1107 *
1108 * @master: the master partition (describes whole MTD device)
1109 * @types: names of partition parsers to try or %NULL
1110 * @data: MTD partition parser-specific data
1111 *
1112 * This function tries to find & register partitions on MTD device @master. It
1113 * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
1114 * then the default list of parsers is used. The default list contains only the
1115 * "cmdlinepart" and "ofpart" parsers ATM.
1116 * Note: If there are more then one parser in @types, the kernel only takes the
1117 * partitions parsed out by the first parser.
1118 *
1119 * This function may return:
1120 * o a negative error code in case of failure
1121 * o number of found partitions otherwise
1122 */
1123int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
1124 struct mtd_part_parser_data *data)
1125{
1126 struct mtd_partitions pparts = { };
1127 struct mtd_part_parser *parser;
1128 int ret, err = 0;
1129
1130 if (!types)
1131 types = mtd_is_partition(master) ? default_subpartition_types :
1132 default_mtd_part_types;
1133
1134 for ( ; *types; types++) {
1135 /*
1136 * ofpart is a special type that means OF partitioning info
1137 * should be used. It requires a bit different logic so it is
1138 * handled in a separated function.
1139 */
1140 if (!strcmp(*types, "ofpart")) {
1141 ret = mtd_part_of_parse(master, &pparts);
1142 } else {
1143 pr_debug("%s: parsing partitions %s\n", master->name,
1144 *types);
1145 parser = mtd_part_parser_get(*types);
1146 if (!parser && !request_module("%s", *types))
1147 parser = mtd_part_parser_get(*types);
1148 pr_debug("%s: got parser %s\n", master->name,
1149 parser ? parser->name : NULL);
1150 if (!parser)
1151 continue;
1152 ret = mtd_part_do_parse(parser, master, &pparts, data);
1153 if (ret <= 0)
1154 mtd_part_parser_put(parser);
1155 }
1156 /* Found partitions! */
1157 if (ret > 0) {
1158 err = add_mtd_partitions(master, pparts.parts,
1159 pparts.nr_parts);
1160 mtd_part_parser_cleanup(&pparts);
1161 return err ? err : pparts.nr_parts;
1162 }
1163 /*
1164 * Stash the first error we see; only report it if no parser
1165 * succeeds
1166 */
1167 if (ret < 0 && !err)
1168 err = ret;
1169 }
1170 return err;
1171}
1172
1173void mtd_part_parser_cleanup(struct mtd_partitions *parts)
1174{
1175 const struct mtd_part_parser *parser;
1176
1177 if (!parts)
1178 return;
1179
1180 parser = parts->parser;
1181 if (parser) {
1182 if (parser->cleanup)
1183 parser->cleanup(parts->parts, parts->nr_parts);
1184
1185 mtd_part_parser_put(parser);
1186 }
1187}
1188
1189static struct mtd_part_parser *
1190get_partition_parser_by_type(enum mtd_parser_type type,
1191 struct mtd_part_parser *start)
1192{
1193 struct mtd_part_parser *p, *ret = NULL;
1194
1195 spin_lock(&part_parser_lock);
1196
1197 p = list_prepare_entry(start, &part_parsers, list);
1198 if (start)
1199 mtd_part_parser_put(start);
1200
1201 list_for_each_entry_continue(p, &part_parsers, list) {
1202 if (p->type == type && try_module_get(p->owner)) {
1203 ret = p;
1204 break;
1205 }
1206 }
1207
1208 spin_unlock(&part_parser_lock);
1209
1210 return ret;
1211}
1212
1213static int parse_mtd_partitions_by_type(struct mtd_info *master,
1214 enum mtd_parser_type type,
1215 const struct mtd_partition **pparts,
1216 struct mtd_part_parser_data *data)
1217{
1218 struct mtd_part_parser *prev = NULL;
1219 int ret = 0;
1220
1221 while (1) {
1222 struct mtd_part_parser *parser;
1223
1224 parser = get_partition_parser_by_type(type, prev);
1225 if (!parser)
1226 break;
1227
1228 ret = (*parser->parse_fn)(master, pparts, data);
1229
1230 if (ret > 0) {
1231 mtd_part_parser_put(parser);
1232 printk(KERN_NOTICE
1233 "%d %s partitions found on MTD device %s\n",
1234 ret, parser->name, master->name);
1235 break;
1236 }
1237
1238 prev = parser;
1239 }
1240
1241 return ret;
1242}
1243
1244int mtd_is_partition(const struct mtd_info *mtd)
1245{
1246 struct mtd_part *part;
1247 int ispart = 0;
1248
1249 mutex_lock(&mtd_partitions_mutex);
1250 list_for_each_entry(part, &mtd_partitions, list)
1251 if (&part->mtd == mtd) {
1252 ispart = 1;
1253 break;
1254 }
1255 mutex_unlock(&mtd_partitions_mutex);
1256
1257 return ispart;
1258}
1259EXPORT_SYMBOL_GPL(mtd_is_partition);
1260
1261struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
1262{
1263 if (!mtd_is_partition(mtd))
1264 return (struct mtd_info *)mtd;
1265
1266 return mtd_to_part(mtd)->parent;
1267}
1268EXPORT_SYMBOL_GPL(mtdpart_get_master);
1269
1270uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
1271{
1272 if (!mtd_is_partition(mtd))
1273 return 0;
1274
1275 return mtd_to_part(mtd)->offset;
1276}
1277EXPORT_SYMBOL_GPL(mtdpart_get_offset);
1278
1279/* Returns the size of the entire flash chip */
1280uint64_t mtd_get_device_size(const struct mtd_info *mtd)
1281{
1282 if (!mtd_is_partition(mtd))
1283 return mtd->size;
1284
1285 return mtd_get_device_size(mtd_to_part(mtd)->parent);
1286}
1287EXPORT_SYMBOL_GPL(mtd_get_device_size);