blob: 8081fc760d34f054af6987f6947d71f4b4a7d837 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8#include <linux/crc32.h>
9#include <linux/bitmap.h>
10#include "ubi.h"
11
12/**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
16static inline unsigned long *init_seen(struct ubi_device *ubi)
17{
18 unsigned long *ret;
19
20 if (!ubi_dbg_chk_fastmap(ubi))
21 return NULL;
22
23 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
24 GFP_KERNEL);
25 if (!ret)
26 return ERR_PTR(-ENOMEM);
27
28 return ret;
29}
30
31/**
32 * free_seen - free the seen logic integer array.
33 * @seen: integer array of @ubi->peb_count size
34 */
35static inline void free_seen(unsigned long *seen)
36{
37 kfree(seen);
38}
39
40/**
41 * set_seen - mark a PEB as seen.
42 * @ubi: UBI device description object
43 * @pnum: The PEB to be makred as seen
44 * @seen: integer array of @ubi->peb_count size
45 */
46static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
47{
48 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
49 return;
50
51 set_bit(pnum, seen);
52}
53
54/**
55 * self_check_seen - check whether all PEB have been seen by fastmap.
56 * @ubi: UBI device description object
57 * @seen: integer array of @ubi->peb_count size
58 */
59static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
60{
61 int pnum, ret = 0;
62
63 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
64 return 0;
65
66 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
67 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
68 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
69 ret = -EINVAL;
70 }
71 }
72
73 return ret;
74}
75
76/**
77 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
78 * @ubi: UBI device description object
79 */
80size_t ubi_calc_fm_size(struct ubi_device *ubi)
81{
82 size_t size;
83
84 size = sizeof(struct ubi_fm_sb) +
85 sizeof(struct ubi_fm_hdr) +
86 sizeof(struct ubi_fm_scan_pool) +
87 sizeof(struct ubi_fm_scan_pool) +
88 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
89 ((sizeof(struct ubi_fm_eba) +
90 sizeof(struct ubi_fm_volhdr)) *
91 (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
92 (ubi->peb_count * sizeof(__be32));
93 return roundup(size, ubi->leb_size);
94}
95
96
97/**
98 * new_fm_vhdr - allocate a new volume header for fastmap usage.
99 * @ubi: UBI device description object
100 * @vol_id: the VID of the new header
101 *
102 * Returns a new struct ubi_vid_hdr on success.
103 * NULL indicates out of memory.
104 */
105static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
106{
107 struct ubi_vid_io_buf *new;
108 struct ubi_vid_hdr *vh;
109
110 new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
111 if (!new)
112 goto out;
113
114 vh = ubi_get_vid_hdr(new);
115 vh->vol_type = UBI_VID_DYNAMIC;
116 vh->vol_id = cpu_to_be32(vol_id);
117
118 /* UBI implementations without fastmap support have to delete the
119 * fastmap.
120 */
121 vh->compat = UBI_COMPAT_DELETE;
122
123out:
124 return new;
125}
126
127/**
128 * add_aeb - create and add a attach erase block to a given list.
129 * @ai: UBI attach info object
130 * @list: the target list
131 * @pnum: PEB number of the new attach erase block
132 * @ec: erease counter of the new LEB
133 * @scrub: scrub this PEB after attaching
134 *
135 * Returns 0 on success, < 0 indicates an internal error.
136 */
137static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
138 int pnum, int ec, int scrub)
139{
140 struct ubi_ainf_peb *aeb;
141
142 aeb = ubi_alloc_aeb(ai, pnum, ec);
143 if (!aeb)
144 return -ENOMEM;
145
146 aeb->lnum = -1;
147 aeb->scrub = scrub;
148 aeb->copy_flag = aeb->sqnum = 0;
149
150 ai->ec_sum += aeb->ec;
151 ai->ec_count++;
152
153 if (ai->max_ec < aeb->ec)
154 ai->max_ec = aeb->ec;
155
156 if (ai->min_ec > aeb->ec)
157 ai->min_ec = aeb->ec;
158
159 list_add_tail(&aeb->u.list, list);
160
161 return 0;
162}
163
164/**
165 * add_vol - create and add a new volume to ubi_attach_info.
166 * @ai: ubi_attach_info object
167 * @vol_id: VID of the new volume
168 * @used_ebs: number of used EBS
169 * @data_pad: data padding value of the new volume
170 * @vol_type: volume type
171 * @last_eb_bytes: number of bytes in the last LEB
172 *
173 * Returns the new struct ubi_ainf_volume on success.
174 * NULL indicates an error.
175 */
176static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
177 int used_ebs, int data_pad, u8 vol_type,
178 int last_eb_bytes)
179{
180 struct ubi_ainf_volume *av;
181
182 av = ubi_add_av(ai, vol_id);
183 if (IS_ERR(av))
184 return av;
185
186 av->data_pad = data_pad;
187 av->last_data_size = last_eb_bytes;
188 av->compat = 0;
189 av->vol_type = vol_type;
190 if (av->vol_type == UBI_STATIC_VOLUME)
191 av->used_ebs = used_ebs;
192
193 dbg_bld("found volume (ID %i)", vol_id);
194 return av;
195}
196
197/**
198 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
199 * from it's original list.
200 * @ai: ubi_attach_info object
201 * @aeb: the to be assigned SEB
202 * @av: target scan volume
203 */
204static void assign_aeb_to_av(struct ubi_attach_info *ai,
205 struct ubi_ainf_peb *aeb,
206 struct ubi_ainf_volume *av)
207{
208 struct ubi_ainf_peb *tmp_aeb;
209 struct rb_node **p = &av->root.rb_node, *parent = NULL;
210
211 while (*p) {
212 parent = *p;
213
214 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
215 if (aeb->lnum != tmp_aeb->lnum) {
216 if (aeb->lnum < tmp_aeb->lnum)
217 p = &(*p)->rb_left;
218 else
219 p = &(*p)->rb_right;
220
221 continue;
222 } else
223 break;
224 }
225
226 list_del(&aeb->u.list);
227 av->leb_count++;
228
229 rb_link_node(&aeb->u.rb, parent, p);
230 rb_insert_color(&aeb->u.rb, &av->root);
231}
232
233/**
234 * update_vol - inserts or updates a LEB which was found a pool.
235 * @ubi: the UBI device object
236 * @ai: attach info object
237 * @av: the volume this LEB belongs to
238 * @new_vh: the volume header derived from new_aeb
239 * @new_aeb: the AEB to be examined
240 *
241 * Returns 0 on success, < 0 indicates an internal error.
242 */
243static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
244 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
245 struct ubi_ainf_peb *new_aeb)
246{
247 struct rb_node **p = &av->root.rb_node, *parent = NULL;
248 struct ubi_ainf_peb *aeb, *victim;
249 int cmp_res;
250
251 while (*p) {
252 parent = *p;
253 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
254
255 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
256 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
257 p = &(*p)->rb_left;
258 else
259 p = &(*p)->rb_right;
260
261 continue;
262 }
263
264 /* This case can happen if the fastmap gets written
265 * because of a volume change (creation, deletion, ..).
266 * Then a PEB can be within the persistent EBA and the pool.
267 */
268 if (aeb->pnum == new_aeb->pnum) {
269 ubi_assert(aeb->lnum == new_aeb->lnum);
270 ubi_free_aeb(ai, new_aeb);
271
272 return 0;
273 }
274
275 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
276 if (cmp_res < 0)
277 return cmp_res;
278
279 /* new_aeb is newer */
280 if (cmp_res & 1) {
281 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
282 if (!victim)
283 return -ENOMEM;
284
285 list_add_tail(&victim->u.list, &ai->erase);
286
287 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
288 av->last_data_size =
289 be32_to_cpu(new_vh->data_size);
290
291 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
292 av->vol_id, aeb->lnum, new_aeb->pnum);
293
294 aeb->ec = new_aeb->ec;
295 aeb->pnum = new_aeb->pnum;
296 aeb->copy_flag = new_vh->copy_flag;
297 aeb->scrub = new_aeb->scrub;
298 aeb->sqnum = new_aeb->sqnum;
299 ubi_free_aeb(ai, new_aeb);
300
301 /* new_aeb is older */
302 } else {
303 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
304 av->vol_id, aeb->lnum, new_aeb->pnum);
305 list_add_tail(&new_aeb->u.list, &ai->erase);
306 }
307
308 return 0;
309 }
310 /* This LEB is new, let's add it to the volume */
311
312 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
313 av->highest_lnum = be32_to_cpu(new_vh->lnum);
314 av->last_data_size = be32_to_cpu(new_vh->data_size);
315 }
316
317 if (av->vol_type == UBI_STATIC_VOLUME)
318 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
319
320 av->leb_count++;
321
322 rb_link_node(&new_aeb->u.rb, parent, p);
323 rb_insert_color(&new_aeb->u.rb, &av->root);
324
325 return 0;
326}
327
328/**
329 * process_pool_aeb - we found a non-empty PEB in a pool.
330 * @ubi: UBI device object
331 * @ai: attach info object
332 * @new_vh: the volume header derived from new_aeb
333 * @new_aeb: the AEB to be examined
334 *
335 * Returns 0 on success, < 0 indicates an internal error.
336 */
337static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
338 struct ubi_vid_hdr *new_vh,
339 struct ubi_ainf_peb *new_aeb)
340{
341 int vol_id = be32_to_cpu(new_vh->vol_id);
342 struct ubi_ainf_volume *av;
343
344 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
345 ubi_free_aeb(ai, new_aeb);
346
347 return 0;
348 }
349
350 /* Find the volume this SEB belongs to */
351 av = ubi_find_av(ai, vol_id);
352 if (!av) {
353 ubi_err(ubi, "orphaned volume in fastmap pool!");
354 ubi_free_aeb(ai, new_aeb);
355 return UBI_BAD_FASTMAP;
356 }
357
358 ubi_assert(vol_id == av->vol_id);
359
360 return update_vol(ubi, ai, av, new_vh, new_aeb);
361}
362
363/**
364 * unmap_peb - unmap a PEB.
365 * If fastmap detects a free PEB in the pool it has to check whether
366 * this PEB has been unmapped after writing the fastmap.
367 *
368 * @ai: UBI attach info object
369 * @pnum: The PEB to be unmapped
370 */
371static void unmap_peb(struct ubi_attach_info *ai, int pnum)
372{
373 struct ubi_ainf_volume *av;
374 struct rb_node *node, *node2;
375 struct ubi_ainf_peb *aeb;
376
377 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
378 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
379 if (aeb->pnum == pnum) {
380 rb_erase(&aeb->u.rb, &av->root);
381 av->leb_count--;
382 ubi_free_aeb(ai, aeb);
383 return;
384 }
385 }
386 }
387}
388
389/**
390 * scan_pool - scans a pool for changed (no longer empty PEBs).
391 * @ubi: UBI device object
392 * @ai: attach info object
393 * @pebs: an array of all PEB numbers in the to be scanned pool
394 * @pool_size: size of the pool (number of entries in @pebs)
395 * @max_sqnum: pointer to the maximal sequence number
396 * @free: list of PEBs which are most likely free (and go into @ai->free)
397 *
398 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
399 * < 0 indicates an internal error.
400 */
401static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
402 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
403 struct list_head *free)
404{
405 struct ubi_vid_io_buf *vb;
406 struct ubi_vid_hdr *vh;
407 struct ubi_ec_hdr *ech;
408 struct ubi_ainf_peb *new_aeb;
409 int i, pnum, err, ret = 0;
410
411 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
412 if (!ech)
413 return -ENOMEM;
414
415 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
416 if (!vb) {
417 kfree(ech);
418 return -ENOMEM;
419 }
420
421 vh = ubi_get_vid_hdr(vb);
422
423 dbg_bld("scanning fastmap pool: size = %i", pool_size);
424
425 /*
426 * Now scan all PEBs in the pool to find changes which have been made
427 * after the creation of the fastmap
428 */
429 for (i = 0; i < pool_size; i++) {
430 int scrub = 0;
431 int image_seq;
432
433 pnum = be32_to_cpu(pebs[i]);
434
435 if (ubi_io_is_bad(ubi, pnum)) {
436 ubi_err(ubi, "bad PEB in fastmap pool!");
437 ret = UBI_BAD_FASTMAP;
438 goto out;
439 }
440
441 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
442 if (err && err != UBI_IO_BITFLIPS) {
443 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
444 pnum, err);
445 ret = err > 0 ? UBI_BAD_FASTMAP : err;
446 goto out;
447 } else if (err == UBI_IO_BITFLIPS)
448 scrub = 1;
449
450 /*
451 * Older UBI implementations have image_seq set to zero, so
452 * we shouldn't fail if image_seq == 0.
453 */
454 image_seq = be32_to_cpu(ech->image_seq);
455
456 if (image_seq && (image_seq != ubi->image_seq)) {
457 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
458 be32_to_cpu(ech->image_seq), ubi->image_seq);
459 ret = UBI_BAD_FASTMAP;
460 goto out;
461 }
462
463 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
464 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
465 unsigned long long ec = be64_to_cpu(ech->ec);
466 unmap_peb(ai, pnum);
467 dbg_bld("Adding PEB to free: %i", pnum);
468
469 if (err == UBI_IO_FF_BITFLIPS)
470 scrub = 1;
471
472 ret = add_aeb(ai, free, pnum, ec, scrub);
473 if (ret)
474 goto out;
475 continue;
476 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
477 dbg_bld("Found non empty PEB:%i in pool", pnum);
478
479 if (err == UBI_IO_BITFLIPS)
480 scrub = 1;
481
482 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
483 if (!new_aeb) {
484 ret = -ENOMEM;
485 goto out;
486 }
487
488 new_aeb->lnum = be32_to_cpu(vh->lnum);
489 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
490 new_aeb->copy_flag = vh->copy_flag;
491 new_aeb->scrub = scrub;
492
493 if (*max_sqnum < new_aeb->sqnum)
494 *max_sqnum = new_aeb->sqnum;
495
496 err = process_pool_aeb(ubi, ai, vh, new_aeb);
497 if (err) {
498 ret = err > 0 ? UBI_BAD_FASTMAP : err;
499 goto out;
500 }
501 } else {
502 /* We are paranoid and fall back to scanning mode */
503 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
504 ret = err > 0 ? UBI_BAD_FASTMAP : err;
505 goto out;
506 }
507
508 }
509
510out:
511 ubi_free_vid_buf(vb);
512 kfree(ech);
513 return ret;
514}
515
516/**
517 * count_fastmap_pebs - Counts the PEBs found by fastmap.
518 * @ai: The UBI attach info object
519 */
520static int count_fastmap_pebs(struct ubi_attach_info *ai)
521{
522 struct ubi_ainf_peb *aeb;
523 struct ubi_ainf_volume *av;
524 struct rb_node *rb1, *rb2;
525 int n = 0;
526
527 list_for_each_entry(aeb, &ai->erase, u.list)
528 n++;
529
530 list_for_each_entry(aeb, &ai->free, u.list)
531 n++;
532
533 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
534 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
535 n++;
536
537 return n;
538}
539
540/**
541 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
542 * @ubi: UBI device object
543 * @ai: UBI attach info object
544 * @fm: the fastmap to be attached
545 *
546 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
547 * < 0 indicates an internal error.
548 */
549static int ubi_attach_fastmap(struct ubi_device *ubi,
550 struct ubi_attach_info *ai,
551 struct ubi_fastmap_layout *fm)
552{
553 struct list_head used, free;
554 struct ubi_ainf_volume *av;
555 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
556 struct ubi_fm_sb *fmsb;
557 struct ubi_fm_hdr *fmhdr;
558 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
559 struct ubi_fm_ec *fmec;
560 struct ubi_fm_volhdr *fmvhdr;
561 struct ubi_fm_eba *fm_eba;
562 int ret, i, j, pool_size, wl_pool_size;
563 size_t fm_pos = 0, fm_size = ubi->fm_size;
564 unsigned long long max_sqnum = 0;
565 void *fm_raw = ubi->fm_buf;
566
567 INIT_LIST_HEAD(&used);
568 INIT_LIST_HEAD(&free);
569 ai->min_ec = UBI_MAX_ERASECOUNTER;
570
571 fmsb = (struct ubi_fm_sb *)(fm_raw);
572 ai->max_sqnum = fmsb->sqnum;
573 fm_pos += sizeof(struct ubi_fm_sb);
574 if (fm_pos >= fm_size)
575 goto fail_bad;
576
577 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
578 fm_pos += sizeof(*fmhdr);
579 if (fm_pos >= fm_size)
580 goto fail_bad;
581
582 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
583 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
584 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
585 goto fail_bad;
586 }
587
588 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
589 fm_pos += sizeof(*fmpl);
590 if (fm_pos >= fm_size)
591 goto fail_bad;
592 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
593 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
594 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
595 goto fail_bad;
596 }
597
598 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
599 fm_pos += sizeof(*fmpl_wl);
600 if (fm_pos >= fm_size)
601 goto fail_bad;
602 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
603 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
604 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
605 goto fail_bad;
606 }
607
608 pool_size = be16_to_cpu(fmpl->size);
609 wl_pool_size = be16_to_cpu(fmpl_wl->size);
610 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
611 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
612
613 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
614 ubi_err(ubi, "bad pool size: %i", pool_size);
615 goto fail_bad;
616 }
617
618 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
619 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
620 goto fail_bad;
621 }
622
623
624 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
625 fm->max_pool_size < 0) {
626 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
627 goto fail_bad;
628 }
629
630 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
631 fm->max_wl_pool_size < 0) {
632 ubi_err(ubi, "bad maximal WL pool size: %i",
633 fm->max_wl_pool_size);
634 goto fail_bad;
635 }
636
637 /* read EC values from free list */
638 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
639 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
640 fm_pos += sizeof(*fmec);
641 if (fm_pos >= fm_size)
642 goto fail_bad;
643
644 ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
645 be32_to_cpu(fmec->ec), 0);
646 if (ret)
647 goto fail;
648 }
649
650 /* read EC values from used list */
651 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
652 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
653 fm_pos += sizeof(*fmec);
654 if (fm_pos >= fm_size)
655 goto fail_bad;
656
657 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
658 be32_to_cpu(fmec->ec), 0);
659 if (ret)
660 goto fail;
661 }
662
663 /* read EC values from scrub list */
664 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
665 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
666 fm_pos += sizeof(*fmec);
667 if (fm_pos >= fm_size)
668 goto fail_bad;
669
670 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
671 be32_to_cpu(fmec->ec), 1);
672 if (ret)
673 goto fail;
674 }
675
676 /* read EC values from erase list */
677 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
678 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
679 fm_pos += sizeof(*fmec);
680 if (fm_pos >= fm_size)
681 goto fail_bad;
682
683 ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
684 be32_to_cpu(fmec->ec), 1);
685 if (ret)
686 goto fail;
687 }
688
689 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
690 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
691
692 /* Iterate over all volumes and read their EBA table */
693 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
694 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
695 fm_pos += sizeof(*fmvhdr);
696 if (fm_pos >= fm_size)
697 goto fail_bad;
698
699 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
700 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
701 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
702 goto fail_bad;
703 }
704
705 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
706 be32_to_cpu(fmvhdr->used_ebs),
707 be32_to_cpu(fmvhdr->data_pad),
708 fmvhdr->vol_type,
709 be32_to_cpu(fmvhdr->last_eb_bytes));
710
711 if (IS_ERR(av)) {
712 if (PTR_ERR(av) == -EEXIST)
713 ubi_err(ubi, "volume (ID %i) already exists",
714 fmvhdr->vol_id);
715
716 goto fail_bad;
717 }
718
719 ai->vols_found++;
720 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
721 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
722
723 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
724 fm_pos += sizeof(*fm_eba);
725 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
726 if (fm_pos >= fm_size)
727 goto fail_bad;
728
729 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
730 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
731 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
732 goto fail_bad;
733 }
734
735 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
736 int pnum = be32_to_cpu(fm_eba->pnum[j]);
737
738 if (pnum < 0)
739 continue;
740
741 aeb = NULL;
742 list_for_each_entry(tmp_aeb, &used, u.list) {
743 if (tmp_aeb->pnum == pnum) {
744 aeb = tmp_aeb;
745 break;
746 }
747 }
748
749 if (!aeb) {
750 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
751 goto fail_bad;
752 }
753
754 aeb->lnum = j;
755
756 if (av->highest_lnum <= aeb->lnum)
757 av->highest_lnum = aeb->lnum;
758
759 assign_aeb_to_av(ai, aeb, av);
760
761 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
762 aeb->pnum, aeb->lnum, av->vol_id);
763 }
764 }
765
766 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
767 if (ret)
768 goto fail;
769
770 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
771 if (ret)
772 goto fail;
773
774 if (max_sqnum > ai->max_sqnum)
775 ai->max_sqnum = max_sqnum;
776
777 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
778 list_move_tail(&tmp_aeb->u.list, &ai->free);
779
780 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
781 list_move_tail(&tmp_aeb->u.list, &ai->erase);
782
783 ubi_assert(list_empty(&free));
784
785 /*
786 * If fastmap is leaking PEBs (must not happen), raise a
787 * fat warning and fall back to scanning mode.
788 * We do this here because in ubi_wl_init() it's too late
789 * and we cannot fall back to scanning.
790 */
791 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
792 ai->bad_peb_count - fm->used_blocks))
793 goto fail_bad;
794
795 return 0;
796
797fail_bad:
798 ret = UBI_BAD_FASTMAP;
799fail:
800 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
801 list_del(&tmp_aeb->u.list);
802 ubi_free_aeb(ai, tmp_aeb);
803 }
804 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
805 list_del(&tmp_aeb->u.list);
806 ubi_free_aeb(ai, tmp_aeb);
807 }
808
809 return ret;
810}
811
812/**
813 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
814 * @ai: UBI attach info to be filled
815 */
816static int find_fm_anchor(struct ubi_attach_info *ai)
817{
818 int ret = -1;
819 struct ubi_ainf_peb *aeb;
820 unsigned long long max_sqnum = 0;
821
822 list_for_each_entry(aeb, &ai->fastmap, u.list) {
823 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
824 max_sqnum = aeb->sqnum;
825 ret = aeb->pnum;
826 }
827 }
828
829 return ret;
830}
831
832static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
833 struct ubi_ainf_peb *old)
834{
835 struct ubi_ainf_peb *new;
836
837 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
838 if (!new)
839 return NULL;
840
841 new->vol_id = old->vol_id;
842 new->sqnum = old->sqnum;
843 new->lnum = old->lnum;
844 new->scrub = old->scrub;
845 new->copy_flag = old->copy_flag;
846
847 return new;
848}
849
850/**
851 * ubi_scan_fastmap - scan the fastmap.
852 * @ubi: UBI device object
853 * @ai: UBI attach info to be filled
854 * @scan_ai: UBI attach info from the first 64 PEBs,
855 * used to find the most recent Fastmap data structure
856 *
857 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
858 * UBI_BAD_FASTMAP if one was found but is not usable.
859 * < 0 indicates an internal error.
860 */
861int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
862 struct ubi_attach_info *scan_ai)
863{
864 struct ubi_fm_sb *fmsb, *fmsb2;
865 struct ubi_vid_io_buf *vb;
866 struct ubi_vid_hdr *vh;
867 struct ubi_ec_hdr *ech;
868 struct ubi_fastmap_layout *fm;
869 struct ubi_ainf_peb *aeb;
870 int i, used_blocks, pnum, fm_anchor, ret = 0;
871 size_t fm_size;
872 __be32 crc, tmp_crc;
873 unsigned long long sqnum = 0;
874
875 fm_anchor = find_fm_anchor(scan_ai);
876 if (fm_anchor < 0)
877 return UBI_NO_FASTMAP;
878
879 /* Copy all (possible) fastmap blocks into our new attach structure. */
880 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
881 struct ubi_ainf_peb *new;
882
883 new = clone_aeb(ai, aeb);
884 if (!new)
885 return -ENOMEM;
886
887 list_add(&new->u.list, &ai->fastmap);
888 }
889
890 down_write(&ubi->fm_protect);
891 memset(ubi->fm_buf, 0, ubi->fm_size);
892
893 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
894 if (!fmsb) {
895 ret = -ENOMEM;
896 goto out;
897 }
898
899 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
900 if (!fm) {
901 ret = -ENOMEM;
902 kfree(fmsb);
903 goto out;
904 }
905
906 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
907 if (ret && ret != UBI_IO_BITFLIPS)
908 goto free_fm_sb;
909 else if (ret == UBI_IO_BITFLIPS)
910 fm->to_be_tortured[0] = 1;
911
912 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
913 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
914 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
915 ret = UBI_BAD_FASTMAP;
916 goto free_fm_sb;
917 }
918
919 if (fmsb->version != UBI_FM_FMT_VERSION) {
920 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
921 fmsb->version, UBI_FM_FMT_VERSION);
922 ret = UBI_BAD_FASTMAP;
923 goto free_fm_sb;
924 }
925
926 used_blocks = be32_to_cpu(fmsb->used_blocks);
927 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
928 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
929 used_blocks);
930 ret = UBI_BAD_FASTMAP;
931 goto free_fm_sb;
932 }
933
934 fm_size = ubi->leb_size * used_blocks;
935 if (fm_size != ubi->fm_size) {
936 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
937 fm_size, ubi->fm_size);
938 ret = UBI_BAD_FASTMAP;
939 goto free_fm_sb;
940 }
941
942 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
943 if (!ech) {
944 ret = -ENOMEM;
945 goto free_fm_sb;
946 }
947
948 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
949 if (!vb) {
950 ret = -ENOMEM;
951 goto free_hdr;
952 }
953
954 vh = ubi_get_vid_hdr(vb);
955
956 for (i = 0; i < used_blocks; i++) {
957 int image_seq;
958
959 pnum = be32_to_cpu(fmsb->block_loc[i]);
960
961 if (ubi_io_is_bad(ubi, pnum)) {
962 ret = UBI_BAD_FASTMAP;
963 goto free_hdr;
964 }
965
966 if (i == 0 && pnum != fm_anchor) {
967 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
968 pnum, fm_anchor);
969 ret = UBI_BAD_FASTMAP;
970 goto free_hdr;
971 }
972
973 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
974 if (ret && ret != UBI_IO_BITFLIPS) {
975 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
976 i, pnum);
977 if (ret > 0)
978 ret = UBI_BAD_FASTMAP;
979 goto free_hdr;
980 } else if (ret == UBI_IO_BITFLIPS)
981 fm->to_be_tortured[i] = 1;
982
983 image_seq = be32_to_cpu(ech->image_seq);
984 if (!ubi->image_seq)
985 ubi->image_seq = image_seq;
986
987 /*
988 * Older UBI implementations have image_seq set to zero, so
989 * we shouldn't fail if image_seq == 0.
990 */
991 if (image_seq && (image_seq != ubi->image_seq)) {
992 ubi_err(ubi, "wrong image seq:%d instead of %d",
993 be32_to_cpu(ech->image_seq), ubi->image_seq);
994 ret = UBI_BAD_FASTMAP;
995 goto free_hdr;
996 }
997
998 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
999 if (ret && ret != UBI_IO_BITFLIPS) {
1000 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
1001 i, pnum);
1002 goto free_hdr;
1003 }
1004
1005 if (i == 0) {
1006 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1007 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1008 be32_to_cpu(vh->vol_id),
1009 UBI_FM_SB_VOLUME_ID);
1010 ret = UBI_BAD_FASTMAP;
1011 goto free_hdr;
1012 }
1013 } else {
1014 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1015 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1016 be32_to_cpu(vh->vol_id),
1017 UBI_FM_DATA_VOLUME_ID);
1018 ret = UBI_BAD_FASTMAP;
1019 goto free_hdr;
1020 }
1021 }
1022
1023 if (sqnum < be64_to_cpu(vh->sqnum))
1024 sqnum = be64_to_cpu(vh->sqnum);
1025
1026 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1027 pnum, 0, ubi->leb_size);
1028 if (ret && ret != UBI_IO_BITFLIPS) {
1029 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1030 "err: %i)", i, pnum, ret);
1031 goto free_hdr;
1032 }
1033 }
1034
1035 kfree(fmsb);
1036 fmsb = NULL;
1037
1038 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1039 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1040 fmsb2->data_crc = 0;
1041 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1042 if (crc != tmp_crc) {
1043 ubi_err(ubi, "fastmap data CRC is invalid");
1044 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1045 tmp_crc, crc);
1046 ret = UBI_BAD_FASTMAP;
1047 goto free_hdr;
1048 }
1049
1050 fmsb2->sqnum = sqnum;
1051
1052 fm->used_blocks = used_blocks;
1053
1054 ret = ubi_attach_fastmap(ubi, ai, fm);
1055 if (ret) {
1056 if (ret > 0)
1057 ret = UBI_BAD_FASTMAP;
1058 goto free_hdr;
1059 }
1060
1061 for (i = 0; i < used_blocks; i++) {
1062 struct ubi_wl_entry *e;
1063
1064 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1065 if (!e) {
1066 while (i--)
1067 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1068
1069 ret = -ENOMEM;
1070 goto free_hdr;
1071 }
1072
1073 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1074 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1075 fm->e[i] = e;
1076 }
1077
1078 ubi->fm = fm;
1079 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1080 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1081 ubi_msg(ubi, "attached by fastmap");
1082 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1083 ubi_msg(ubi, "fastmap WL pool size: %d",
1084 ubi->fm_wl_pool.max_size);
1085 ubi->fm_disabled = 0;
1086 ubi->fast_attach = 1;
1087
1088 ubi_free_vid_buf(vb);
1089 kfree(ech);
1090out:
1091 up_write(&ubi->fm_protect);
1092 if (ret == UBI_BAD_FASTMAP)
1093 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1094 return ret;
1095
1096free_hdr:
1097 ubi_free_vid_buf(vb);
1098 kfree(ech);
1099free_fm_sb:
1100 kfree(fmsb);
1101 kfree(fm);
1102 goto out;
1103}
1104
1105int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1106{
1107 struct ubi_device *ubi = vol->ubi;
1108
1109 if (!ubi->fast_attach)
1110 return 0;
1111
1112 vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1113 GFP_KERNEL);
1114 if (!vol->checkmap)
1115 return -ENOMEM;
1116
1117 return 0;
1118}
1119
1120void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1121{
1122 kfree(vol->checkmap);
1123}
1124
1125/**
1126 * ubi_write_fastmap - writes a fastmap.
1127 * @ubi: UBI device object
1128 * @new_fm: the to be written fastmap
1129 *
1130 * Returns 0 on success, < 0 indicates an internal error.
1131 */
1132static int ubi_write_fastmap(struct ubi_device *ubi,
1133 struct ubi_fastmap_layout *new_fm)
1134{
1135 size_t fm_pos = 0;
1136 void *fm_raw;
1137 struct ubi_fm_sb *fmsb;
1138 struct ubi_fm_hdr *fmh;
1139 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1140 struct ubi_fm_ec *fec;
1141 struct ubi_fm_volhdr *fvh;
1142 struct ubi_fm_eba *feba;
1143 struct ubi_wl_entry *wl_e;
1144 struct ubi_volume *vol;
1145 struct ubi_vid_io_buf *avbuf, *dvbuf;
1146 struct ubi_vid_hdr *avhdr, *dvhdr;
1147 struct ubi_work *ubi_wrk;
1148 struct rb_node *tmp_rb;
1149 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1150 int scrub_peb_count, erase_peb_count;
1151 unsigned long *seen_pebs;
1152
1153 fm_raw = ubi->fm_buf;
1154 memset(ubi->fm_buf, 0, ubi->fm_size);
1155
1156 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1157 if (!avbuf) {
1158 ret = -ENOMEM;
1159 goto out;
1160 }
1161
1162 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1163 if (!dvbuf) {
1164 ret = -ENOMEM;
1165 goto out_free_avbuf;
1166 }
1167
1168 avhdr = ubi_get_vid_hdr(avbuf);
1169 dvhdr = ubi_get_vid_hdr(dvbuf);
1170
1171 seen_pebs = init_seen(ubi);
1172 if (IS_ERR(seen_pebs)) {
1173 ret = PTR_ERR(seen_pebs);
1174 goto out_free_dvbuf;
1175 }
1176
1177 spin_lock(&ubi->volumes_lock);
1178 spin_lock(&ubi->wl_lock);
1179
1180 fmsb = (struct ubi_fm_sb *)fm_raw;
1181 fm_pos += sizeof(*fmsb);
1182 ubi_assert(fm_pos <= ubi->fm_size);
1183
1184 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1185 fm_pos += sizeof(*fmh);
1186 ubi_assert(fm_pos <= ubi->fm_size);
1187
1188 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1189 fmsb->version = UBI_FM_FMT_VERSION;
1190 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1191 /* the max sqnum will be filled in while *reading* the fastmap */
1192 fmsb->sqnum = 0;
1193
1194 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1195 free_peb_count = 0;
1196 used_peb_count = 0;
1197 scrub_peb_count = 0;
1198 erase_peb_count = 0;
1199 vol_count = 0;
1200
1201 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1202 fm_pos += sizeof(*fmpl);
1203 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1204 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1205 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1206
1207 for (i = 0; i < ubi->fm_pool.size; i++) {
1208 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1209 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1210 }
1211
1212 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1213 fm_pos += sizeof(*fmpl_wl);
1214 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1215 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1216 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1217
1218 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1219 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1220 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1221 }
1222
1223 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1224 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1225
1226 fec->pnum = cpu_to_be32(wl_e->pnum);
1227 set_seen(ubi, wl_e->pnum, seen_pebs);
1228 fec->ec = cpu_to_be32(wl_e->ec);
1229
1230 free_peb_count++;
1231 fm_pos += sizeof(*fec);
1232 ubi_assert(fm_pos <= ubi->fm_size);
1233 }
1234 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1235
1236 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1237 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1238
1239 fec->pnum = cpu_to_be32(wl_e->pnum);
1240 set_seen(ubi, wl_e->pnum, seen_pebs);
1241 fec->ec = cpu_to_be32(wl_e->ec);
1242
1243 used_peb_count++;
1244 fm_pos += sizeof(*fec);
1245 ubi_assert(fm_pos <= ubi->fm_size);
1246 }
1247
1248 ubi_for_each_protected_peb(ubi, i, wl_e) {
1249 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1250
1251 fec->pnum = cpu_to_be32(wl_e->pnum);
1252 set_seen(ubi, wl_e->pnum, seen_pebs);
1253 fec->ec = cpu_to_be32(wl_e->ec);
1254
1255 used_peb_count++;
1256 fm_pos += sizeof(*fec);
1257 ubi_assert(fm_pos <= ubi->fm_size);
1258 }
1259 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1260
1261 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1262 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1263
1264 fec->pnum = cpu_to_be32(wl_e->pnum);
1265 set_seen(ubi, wl_e->pnum, seen_pebs);
1266 fec->ec = cpu_to_be32(wl_e->ec);
1267
1268 scrub_peb_count++;
1269 fm_pos += sizeof(*fec);
1270 ubi_assert(fm_pos <= ubi->fm_size);
1271 }
1272 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1273
1274
1275 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1276 if (ubi_is_erase_work(ubi_wrk)) {
1277 wl_e = ubi_wrk->e;
1278 ubi_assert(wl_e);
1279
1280 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1281
1282 fec->pnum = cpu_to_be32(wl_e->pnum);
1283 set_seen(ubi, wl_e->pnum, seen_pebs);
1284 fec->ec = cpu_to_be32(wl_e->ec);
1285
1286 erase_peb_count++;
1287 fm_pos += sizeof(*fec);
1288 ubi_assert(fm_pos <= ubi->fm_size);
1289 }
1290 }
1291 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1292
1293 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1294 vol = ubi->volumes[i];
1295
1296 if (!vol)
1297 continue;
1298
1299 vol_count++;
1300
1301 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1302 fm_pos += sizeof(*fvh);
1303 ubi_assert(fm_pos <= ubi->fm_size);
1304
1305 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1306 fvh->vol_id = cpu_to_be32(vol->vol_id);
1307 fvh->vol_type = vol->vol_type;
1308 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1309 fvh->data_pad = cpu_to_be32(vol->data_pad);
1310 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1311
1312 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1313 vol->vol_type == UBI_STATIC_VOLUME);
1314
1315 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1316 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1317 ubi_assert(fm_pos <= ubi->fm_size);
1318
1319 for (j = 0; j < vol->reserved_pebs; j++) {
1320 struct ubi_eba_leb_desc ldesc;
1321
1322 ubi_eba_get_ldesc(vol, j, &ldesc);
1323 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1324 }
1325
1326 feba->reserved_pebs = cpu_to_be32(j);
1327 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1328 }
1329 fmh->vol_count = cpu_to_be32(vol_count);
1330 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1331
1332 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1333 avhdr->lnum = 0;
1334
1335 spin_unlock(&ubi->wl_lock);
1336 spin_unlock(&ubi->volumes_lock);
1337
1338 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1339 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1340 if (ret) {
1341 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1342 goto out_free_seen;
1343 }
1344
1345 for (i = 0; i < new_fm->used_blocks; i++) {
1346 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1347 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1348 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1349 }
1350
1351 fmsb->data_crc = 0;
1352 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1353 ubi->fm_size));
1354
1355 for (i = 1; i < new_fm->used_blocks; i++) {
1356 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1357 dvhdr->lnum = cpu_to_be32(i);
1358 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1359 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1360 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1361 if (ret) {
1362 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1363 new_fm->e[i]->pnum);
1364 goto out_free_seen;
1365 }
1366 }
1367
1368 for (i = 0; i < new_fm->used_blocks; i++) {
1369 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1370 new_fm->e[i]->pnum, 0, ubi->leb_size);
1371 if (ret) {
1372 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1373 new_fm->e[i]->pnum);
1374 goto out_free_seen;
1375 }
1376 }
1377
1378 ubi_assert(new_fm);
1379 ubi->fm = new_fm;
1380
1381 ret = self_check_seen(ubi, seen_pebs);
1382 dbg_bld("fastmap written!");
1383
1384out_free_seen:
1385 free_seen(seen_pebs);
1386out_free_dvbuf:
1387 ubi_free_vid_buf(dvbuf);
1388out_free_avbuf:
1389 ubi_free_vid_buf(avbuf);
1390
1391out:
1392 return ret;
1393}
1394
1395/**
1396 * erase_block - Manually erase a PEB.
1397 * @ubi: UBI device object
1398 * @pnum: PEB to be erased
1399 *
1400 * Returns the new EC value on success, < 0 indicates an internal error.
1401 */
1402static int erase_block(struct ubi_device *ubi, int pnum)
1403{
1404 int ret;
1405 struct ubi_ec_hdr *ec_hdr;
1406 long long ec;
1407
1408 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1409 if (!ec_hdr)
1410 return -ENOMEM;
1411
1412 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1413 if (ret < 0)
1414 goto out;
1415 else if (ret && ret != UBI_IO_BITFLIPS) {
1416 ret = -EINVAL;
1417 goto out;
1418 }
1419
1420 ret = ubi_io_sync_erase(ubi, pnum, 0);
1421 if (ret < 0)
1422 goto out;
1423
1424 ec = be64_to_cpu(ec_hdr->ec);
1425 ec += ret;
1426 if (ec > UBI_MAX_ERASECOUNTER) {
1427 ret = -EINVAL;
1428 goto out;
1429 }
1430
1431 ec_hdr->ec = cpu_to_be64(ec);
1432 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1433 if (ret < 0)
1434 goto out;
1435
1436 ret = ec;
1437out:
1438 kfree(ec_hdr);
1439 return ret;
1440}
1441
1442/**
1443 * invalidate_fastmap - destroys a fastmap.
1444 * @ubi: UBI device object
1445 *
1446 * This function ensures that upon next UBI attach a full scan
1447 * is issued. We need this if UBI is about to write a new fastmap
1448 * but is unable to do so. In this case we have two options:
1449 * a) Make sure that the current fastmap will not be usued upon
1450 * attach time and contine or b) fall back to RO mode to have the
1451 * current fastmap in a valid state.
1452 * Returns 0 on success, < 0 indicates an internal error.
1453 */
1454static int invalidate_fastmap(struct ubi_device *ubi)
1455{
1456 int ret;
1457 struct ubi_fastmap_layout *fm;
1458 struct ubi_wl_entry *e;
1459 struct ubi_vid_io_buf *vb = NULL;
1460 struct ubi_vid_hdr *vh;
1461
1462 if (!ubi->fm)
1463 return 0;
1464
1465 ubi->fm = NULL;
1466
1467 ret = -ENOMEM;
1468 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1469 if (!fm)
1470 goto out;
1471
1472 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1473 if (!vb)
1474 goto out_free_fm;
1475
1476 vh = ubi_get_vid_hdr(vb);
1477
1478 ret = -ENOSPC;
1479 e = ubi_wl_get_fm_peb(ubi, 1);
1480 if (!e)
1481 goto out_free_fm;
1482
1483 /*
1484 * Create fake fastmap such that UBI will fall back
1485 * to scanning mode.
1486 */
1487 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1488 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1489 if (ret < 0) {
1490 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1491 goto out_free_fm;
1492 }
1493
1494 fm->used_blocks = 1;
1495 fm->e[0] = e;
1496
1497 ubi->fm = fm;
1498
1499out:
1500 ubi_free_vid_buf(vb);
1501 return ret;
1502
1503out_free_fm:
1504 kfree(fm);
1505 goto out;
1506}
1507
1508/**
1509 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1510 * WL sub-system.
1511 * @ubi: UBI device object
1512 * @fm: fastmap layout object
1513 */
1514static void return_fm_pebs(struct ubi_device *ubi,
1515 struct ubi_fastmap_layout *fm)
1516{
1517 int i;
1518
1519 if (!fm)
1520 return;
1521
1522 for (i = 0; i < fm->used_blocks; i++) {
1523 if (fm->e[i]) {
1524 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1525 fm->to_be_tortured[i]);
1526 fm->e[i] = NULL;
1527 }
1528 }
1529}
1530
1531/**
1532 * ubi_update_fastmap - will be called by UBI if a volume changes or
1533 * a fastmap pool becomes full.
1534 * @ubi: UBI device object
1535 *
1536 * Returns 0 on success, < 0 indicates an internal error.
1537 */
1538int ubi_update_fastmap(struct ubi_device *ubi)
1539{
1540 int ret, i, j;
1541 struct ubi_fastmap_layout *new_fm, *old_fm;
1542 struct ubi_wl_entry *tmp_e;
1543
1544 down_write(&ubi->fm_protect);
1545 down_write(&ubi->work_sem);
1546 down_write(&ubi->fm_eba_sem);
1547
1548 ubi_refill_pools(ubi);
1549
1550 if (ubi->ro_mode || ubi->fm_disabled) {
1551 up_write(&ubi->fm_eba_sem);
1552 up_write(&ubi->work_sem);
1553 up_write(&ubi->fm_protect);
1554 return 0;
1555 }
1556
1557 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1558 if (!new_fm) {
1559 up_write(&ubi->fm_eba_sem);
1560 up_write(&ubi->work_sem);
1561 up_write(&ubi->fm_protect);
1562 return -ENOMEM;
1563 }
1564
1565 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1566 old_fm = ubi->fm;
1567 ubi->fm = NULL;
1568
1569 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1570 ubi_err(ubi, "fastmap too large");
1571 ret = -ENOSPC;
1572 goto err;
1573 }
1574
1575 for (i = 1; i < new_fm->used_blocks; i++) {
1576 spin_lock(&ubi->wl_lock);
1577 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1578 spin_unlock(&ubi->wl_lock);
1579
1580 if (!tmp_e) {
1581 if (old_fm && old_fm->e[i]) {
1582 ret = erase_block(ubi, old_fm->e[i]->pnum);
1583 if (ret < 0) {
1584 ubi_err(ubi, "could not erase old fastmap PEB");
1585
1586 for (j = 1; j < i; j++) {
1587 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1588 j, 0);
1589 new_fm->e[j] = NULL;
1590 }
1591 goto err;
1592 }
1593 new_fm->e[i] = old_fm->e[i];
1594 old_fm->e[i] = NULL;
1595 } else {
1596 ubi_err(ubi, "could not get any free erase block");
1597
1598 for (j = 1; j < i; j++) {
1599 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1600 new_fm->e[j] = NULL;
1601 }
1602
1603 ret = -ENOSPC;
1604 goto err;
1605 }
1606 } else {
1607 new_fm->e[i] = tmp_e;
1608
1609 if (old_fm && old_fm->e[i]) {
1610 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1611 old_fm->to_be_tortured[i]);
1612 old_fm->e[i] = NULL;
1613 }
1614 }
1615 }
1616
1617 /* Old fastmap is larger than the new one */
1618 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1619 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1620 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1621 old_fm->to_be_tortured[i]);
1622 old_fm->e[i] = NULL;
1623 }
1624 }
1625
1626 spin_lock(&ubi->wl_lock);
1627 tmp_e = ubi->fm_anchor;
1628 ubi->fm_anchor = NULL;
1629 spin_unlock(&ubi->wl_lock);
1630
1631 if (old_fm) {
1632 /* no fresh anchor PEB was found, reuse the old one */
1633 if (!tmp_e) {
1634 ret = erase_block(ubi, old_fm->e[0]->pnum);
1635 if (ret < 0) {
1636 ubi_err(ubi, "could not erase old anchor PEB");
1637
1638 for (i = 1; i < new_fm->used_blocks; i++) {
1639 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1640 i, 0);
1641 new_fm->e[i] = NULL;
1642 }
1643 goto err;
1644 }
1645 new_fm->e[0] = old_fm->e[0];
1646 new_fm->e[0]->ec = ret;
1647 old_fm->e[0] = NULL;
1648 } else {
1649 /* we've got a new anchor PEB, return the old one */
1650 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1651 old_fm->to_be_tortured[0]);
1652 new_fm->e[0] = tmp_e;
1653 old_fm->e[0] = NULL;
1654 }
1655 } else {
1656 if (!tmp_e) {
1657 ubi_err(ubi, "could not find any anchor PEB");
1658
1659 for (i = 1; i < new_fm->used_blocks; i++) {
1660 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1661 new_fm->e[i] = NULL;
1662 }
1663
1664 ret = -ENOSPC;
1665 goto err;
1666 }
1667 new_fm->e[0] = tmp_e;
1668 }
1669
1670 ret = ubi_write_fastmap(ubi, new_fm);
1671
1672 if (ret)
1673 goto err;
1674
1675out_unlock:
1676 up_write(&ubi->fm_eba_sem);
1677 up_write(&ubi->work_sem);
1678 up_write(&ubi->fm_protect);
1679 kfree(old_fm);
1680
1681 ubi_ensure_anchor_pebs(ubi);
1682
1683 return ret;
1684
1685err:
1686 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1687
1688 ret = invalidate_fastmap(ubi);
1689 if (ret < 0) {
1690 ubi_err(ubi, "Unable to invalidate current fastmap!");
1691 ubi_ro_mode(ubi);
1692 } else {
1693 return_fm_pebs(ubi, old_fm);
1694 return_fm_pebs(ubi, new_fm);
1695 ret = 0;
1696 }
1697
1698 kfree(new_fm);
1699 goto out_unlock;
1700}