| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * JFFS2 -- Journalling Flash File System, Version 2. | 
|  | 3 | * | 
|  | 4 | * Copyright © 2001-2007 Red Hat, Inc. | 
|  | 5 | * | 
|  | 6 | * Created by David Woodhouse <dwmw2@infradead.org> | 
|  | 7 | * | 
|  | 8 | * For licensing information, see the file 'LICENCE' in this directory. | 
|  | 9 | * | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|  | 13 |  | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/sched.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/mtd/mtd.h> | 
|  | 18 | #include <linux/pagemap.h> | 
|  | 19 | #include <linux/crc32.h> | 
|  | 20 | #include <linux/compiler.h> | 
|  | 21 | #include "nodelist.h" | 
|  | 22 | #include "summary.h" | 
|  | 23 | #include "debug.h" | 
|  | 24 |  | 
|  | 25 | #define DEFAULT_EMPTY_SCAN_SIZE 256 | 
|  | 26 |  | 
|  | 27 | #define noisy_printk(noise, fmt, ...)					\ | 
|  | 28 | do {									\ | 
|  | 29 | if (*(noise)) {							\ | 
|  | 30 | pr_notice(fmt, ##__VA_ARGS__);				\ | 
|  | 31 | (*(noise))--;						\ | 
|  | 32 | if (!(*(noise)))					\ | 
|  | 33 | pr_notice("Further such events for this erase block will not be printed\n"); \ | 
|  | 34 | }								\ | 
|  | 35 | } while (0) | 
|  | 36 |  | 
|  | 37 | static uint32_t pseudo_random; | 
|  | 38 |  | 
|  | 39 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 40 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s); | 
|  | 41 |  | 
|  | 42 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. | 
|  | 43 | * Returning an error will abort the mount - bad checksums etc. should just mark the space | 
|  | 44 | * as dirty. | 
|  | 45 | */ | 
|  | 46 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 47 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); | 
|  | 48 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 49 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); | 
|  | 50 |  | 
|  | 51 | static inline int min_free(struct jffs2_sb_info *c) | 
|  | 52 | { | 
|  | 53 | uint32_t min = 2 * sizeof(struct jffs2_raw_inode); | 
|  | 54 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 
|  | 55 | if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) | 
|  | 56 | return c->wbuf_pagesize; | 
|  | 57 | #endif | 
|  | 58 | return min; | 
|  | 59 |  | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) { | 
|  | 63 | if (sector_size < DEFAULT_EMPTY_SCAN_SIZE) | 
|  | 64 | return sector_size; | 
|  | 65 | else | 
|  | 66 | return DEFAULT_EMPTY_SCAN_SIZE; | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 
|  | 70 | { | 
|  | 71 | int ret; | 
|  | 72 |  | 
|  | 73 | if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1))) | 
|  | 74 | return ret; | 
|  | 75 | if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) | 
|  | 76 | return ret; | 
|  | 77 | /* Turned wasted size into dirty, since we apparently | 
|  | 78 | think it's recoverable now. */ | 
|  | 79 | jeb->dirty_size += jeb->wasted_size; | 
|  | 80 | c->dirty_size += jeb->wasted_size; | 
|  | 81 | c->wasted_size -= jeb->wasted_size; | 
|  | 82 | jeb->wasted_size = 0; | 
|  | 83 | if (VERYDIRTY(c, jeb->dirty_size)) { | 
|  | 84 | list_add(&jeb->list, &c->very_dirty_list); | 
|  | 85 | } else { | 
|  | 86 | list_add(&jeb->list, &c->dirty_list); | 
|  | 87 | } | 
|  | 88 | return 0; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | int jffs2_scan_medium(struct jffs2_sb_info *c) | 
|  | 92 | { | 
|  | 93 | int i, ret; | 
|  | 94 | uint32_t empty_blocks = 0, bad_blocks = 0; | 
|  | 95 | unsigned char *flashbuf = NULL; | 
|  | 96 | uint32_t buf_size = 0; | 
|  | 97 | struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ | 
|  | 98 | #ifndef __ECOS | 
|  | 99 | size_t pointlen, try_size; | 
|  | 100 |  | 
|  | 101 | ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, | 
|  | 102 | (void **)&flashbuf, NULL); | 
|  | 103 | if (!ret && pointlen < c->mtd->size) { | 
|  | 104 | /* Don't muck about if it won't let us point to the whole flash */ | 
|  | 105 | jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", | 
|  | 106 | pointlen); | 
|  | 107 | mtd_unpoint(c->mtd, 0, pointlen); | 
|  | 108 | flashbuf = NULL; | 
|  | 109 | } | 
|  | 110 | if (ret && ret != -EOPNOTSUPP) | 
|  | 111 | jffs2_dbg(1, "MTD point failed %d\n", ret); | 
|  | 112 | #endif | 
|  | 113 | if (!flashbuf) { | 
|  | 114 | /* For NAND it's quicker to read a whole eraseblock at a time, | 
|  | 115 | apparently */ | 
|  | 116 | if (c->mtd->type == MTD_NANDFLASH) | 
|  | 117 | try_size = c->sector_size; | 
|  | 118 | else | 
|  | 119 | try_size = PAGE_SIZE; | 
|  | 120 |  | 
|  | 121 | jffs2_dbg(1, "Trying to allocate readbuf of %zu " | 
|  | 122 | "bytes\n", try_size); | 
|  | 123 |  | 
|  | 124 | flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); | 
|  | 125 | if (!flashbuf) | 
|  | 126 | return -ENOMEM; | 
|  | 127 |  | 
|  | 128 | jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", | 
|  | 129 | try_size); | 
|  | 130 |  | 
|  | 131 | buf_size = (uint32_t)try_size; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | if (jffs2_sum_active()) { | 
|  | 135 | s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | 
|  | 136 | if (!s) { | 
|  | 137 | JFFS2_WARNING("Can't allocate memory for summary\n"); | 
|  | 138 | ret = -ENOMEM; | 
|  | 139 | goto out; | 
|  | 140 | } | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | for (i=0; i<c->nr_blocks; i++) { | 
|  | 144 | struct jffs2_eraseblock *jeb = &c->blocks[i]; | 
|  | 145 |  | 
|  | 146 | cond_resched(); | 
|  | 147 |  | 
|  | 148 | /* reset summary info for next eraseblock scan */ | 
|  | 149 | jffs2_sum_reset_collected(s); | 
|  | 150 |  | 
|  | 151 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), | 
|  | 152 | buf_size, s); | 
|  | 153 |  | 
|  | 154 | if (ret < 0) | 
|  | 155 | goto out; | 
|  | 156 |  | 
|  | 157 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 
|  | 158 |  | 
|  | 159 | /* Now decide which list to put it on */ | 
|  | 160 | switch(ret) { | 
|  | 161 | case BLK_STATE_ALLFF: | 
|  | 162 | /* | 
|  | 163 | * Empty block.   Since we can't be sure it | 
|  | 164 | * was entirely erased, we just queue it for erase | 
|  | 165 | * again.  It will be marked as such when the erase | 
|  | 166 | * is complete.  Meanwhile we still count it as empty | 
|  | 167 | * for later checks. | 
|  | 168 | */ | 
|  | 169 | empty_blocks++; | 
|  | 170 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 171 | c->nr_erasing_blocks++; | 
|  | 172 | break; | 
|  | 173 |  | 
|  | 174 | case BLK_STATE_CLEANMARKER: | 
|  | 175 | /* Only a CLEANMARKER node is valid */ | 
|  | 176 | if (!jeb->dirty_size) { | 
|  | 177 | /* It's actually free */ | 
|  | 178 | list_add(&jeb->list, &c->free_list); | 
|  | 179 | c->nr_free_blocks++; | 
|  | 180 | } else { | 
|  | 181 | /* Dirt */ | 
|  | 182 | jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", | 
|  | 183 | jeb->offset); | 
|  | 184 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 185 | c->nr_erasing_blocks++; | 
|  | 186 | } | 
|  | 187 | break; | 
|  | 188 |  | 
|  | 189 | case BLK_STATE_CLEAN: | 
|  | 190 | /* Full (or almost full) of clean data. Clean list */ | 
|  | 191 | list_add(&jeb->list, &c->clean_list); | 
|  | 192 | break; | 
|  | 193 |  | 
|  | 194 | case BLK_STATE_PARTDIRTY: | 
|  | 195 | /* Some data, but not full. Dirty list. */ | 
|  | 196 | /* We want to remember the block with most free space | 
|  | 197 | and stick it in the 'nextblock' position to start writing to it. */ | 
|  | 198 | if (jeb->free_size > min_free(c) && | 
|  | 199 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | 
|  | 200 | /* Better candidate for the next writes to go to */ | 
|  | 201 | if (c->nextblock) { | 
|  | 202 | ret = file_dirty(c, c->nextblock); | 
|  | 203 | if (ret) | 
|  | 204 | goto out; | 
|  | 205 | /* deleting summary information of the old nextblock */ | 
|  | 206 | jffs2_sum_reset_collected(c->summary); | 
|  | 207 | } | 
|  | 208 | /* update collected summary information for the current nextblock */ | 
|  | 209 | jffs2_sum_move_collected(c, s); | 
|  | 210 | jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", | 
|  | 211 | __func__, jeb->offset); | 
|  | 212 | c->nextblock = jeb; | 
|  | 213 | } else { | 
|  | 214 | ret = file_dirty(c, jeb); | 
|  | 215 | if (ret) | 
|  | 216 | goto out; | 
|  | 217 | } | 
|  | 218 | break; | 
|  | 219 |  | 
|  | 220 | case BLK_STATE_ALLDIRTY: | 
|  | 221 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 
|  | 222 | /* For now we just put it on the erasing list. We'll start the erases later */ | 
|  | 223 | jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n", | 
|  | 224 | jeb->offset); | 
|  | 225 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 226 | c->nr_erasing_blocks++; | 
|  | 227 | break; | 
|  | 228 |  | 
|  | 229 | case BLK_STATE_BADBLOCK: | 
|  | 230 | jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); | 
|  | 231 | list_add(&jeb->list, &c->bad_list); | 
|  | 232 | c->bad_size += c->sector_size; | 
|  | 233 | c->free_size -= c->sector_size; | 
|  | 234 | bad_blocks++; | 
|  | 235 | break; | 
|  | 236 | default: | 
|  | 237 | pr_warn("%s(): unknown block state\n", __func__); | 
|  | 238 | BUG(); | 
|  | 239 | } | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 
|  | 243 | if (c->nextblock && (c->nextblock->dirty_size)) { | 
|  | 244 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 
|  | 245 | c->wasted_size += c->nextblock->dirty_size; | 
|  | 246 | c->dirty_size -= c->nextblock->dirty_size; | 
|  | 247 | c->nextblock->dirty_size = 0; | 
|  | 248 | } | 
|  | 249 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 
|  | 250 | if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { | 
|  | 251 | /* If we're going to start writing into a block which already | 
|  | 252 | contains data, and the end of the data isn't page-aligned, | 
|  | 253 | skip a little and align it. */ | 
|  | 254 |  | 
|  | 255 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; | 
|  | 256 |  | 
|  | 257 | jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", | 
|  | 258 | __func__, skip); | 
|  | 259 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | 
|  | 260 | jffs2_scan_dirty_space(c, c->nextblock, skip); | 
|  | 261 | } | 
|  | 262 | #endif | 
|  | 263 | if (c->nr_erasing_blocks) { | 
|  | 264 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | 
|  | 265 | pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | 
|  | 266 | pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", | 
|  | 267 | empty_blocks, bad_blocks, c->nr_blocks); | 
|  | 268 | ret = -EIO; | 
|  | 269 | goto out; | 
|  | 270 | } | 
|  | 271 | spin_lock(&c->erase_completion_lock); | 
|  | 272 | jffs2_garbage_collect_trigger(c); | 
|  | 273 | spin_unlock(&c->erase_completion_lock); | 
|  | 274 | } | 
|  | 275 | ret = 0; | 
|  | 276 | out: | 
|  | 277 | if (buf_size) | 
|  | 278 | kfree(flashbuf); | 
|  | 279 | #ifndef __ECOS | 
|  | 280 | else | 
|  | 281 | mtd_unpoint(c->mtd, 0, c->mtd->size); | 
|  | 282 | #endif | 
|  | 283 | kfree(s); | 
|  | 284 | return ret; | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | 
|  | 288 | uint32_t ofs, uint32_t len) | 
|  | 289 | { | 
|  | 290 | int ret; | 
|  | 291 | size_t retlen; | 
|  | 292 |  | 
|  | 293 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | 
|  | 294 | if (ret) { | 
|  | 295 | jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", | 
|  | 296 | len, ofs, ret); | 
|  | 297 | return ret; | 
|  | 298 | } | 
|  | 299 | if (retlen < len) { | 
|  | 300 | jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n", | 
|  | 301 | ofs, retlen); | 
|  | 302 | return -EIO; | 
|  | 303 | } | 
|  | 304 | return 0; | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 
|  | 308 | { | 
|  | 309 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | 
|  | 310 | && (!jeb->first_node || !ref_next(jeb->first_node)) ) | 
|  | 311 | return BLK_STATE_CLEANMARKER; | 
|  | 312 |  | 
|  | 313 | /* move blocks with max 4 byte dirty space to cleanlist */ | 
|  | 314 | else if (!ISDIRTY(c->flags, c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | 
|  | 315 | c->dirty_size -= jeb->dirty_size; | 
|  | 316 | c->wasted_size += jeb->dirty_size; | 
|  | 317 | jeb->wasted_size += jeb->dirty_size; | 
|  | 318 | jeb->dirty_size = 0; | 
|  | 319 | return BLK_STATE_CLEAN; | 
|  | 320 | } else if (jeb->used_size || jeb->unchecked_size) | 
|  | 321 | return BLK_STATE_PARTDIRTY; | 
|  | 322 | else | 
|  | 323 | return BLK_STATE_ALLDIRTY; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | #ifdef CONFIG_JFFS2_FS_XATTR | 
|  | 327 | static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 328 | struct jffs2_raw_xattr *rx, uint32_t ofs, | 
|  | 329 | struct jffs2_summary *s) | 
|  | 330 | { | 
|  | 331 | struct jffs2_xattr_datum *xd; | 
|  | 332 | uint32_t xid, version, totlen, crc; | 
|  | 333 | int err; | 
|  | 334 |  | 
|  | 335 | crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); | 
|  | 336 | if (crc != je32_to_cpu(rx->node_crc)) { | 
|  | 337 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 
|  | 338 | ofs, je32_to_cpu(rx->node_crc), crc); | 
|  | 339 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) | 
|  | 340 | return err; | 
|  | 341 | return 0; | 
|  | 342 | } | 
|  | 343 |  | 
|  | 344 | xid = je32_to_cpu(rx->xid); | 
|  | 345 | version = je32_to_cpu(rx->version); | 
|  | 346 |  | 
|  | 347 | totlen = PAD(sizeof(struct jffs2_raw_xattr) | 
|  | 348 | + rx->name_len + 1 + je16_to_cpu(rx->value_len)); | 
|  | 349 | if (totlen != je32_to_cpu(rx->totlen)) { | 
|  | 350 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", | 
|  | 351 | ofs, je32_to_cpu(rx->totlen), totlen); | 
|  | 352 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) | 
|  | 353 | return err; | 
|  | 354 | return 0; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | xd = jffs2_setup_xattr_datum(c, xid, version); | 
|  | 358 | if (IS_ERR(xd)) | 
|  | 359 | return PTR_ERR(xd); | 
|  | 360 |  | 
|  | 361 | if (xd->version > version) { | 
|  | 362 | struct jffs2_raw_node_ref *raw | 
|  | 363 | = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); | 
|  | 364 | raw->next_in_ino = xd->node->next_in_ino; | 
|  | 365 | xd->node->next_in_ino = raw; | 
|  | 366 | } else { | 
|  | 367 | xd->version = version; | 
|  | 368 | xd->xprefix = rx->xprefix; | 
|  | 369 | xd->name_len = rx->name_len; | 
|  | 370 | xd->value_len = je16_to_cpu(rx->value_len); | 
|  | 371 | xd->data_crc = je32_to_cpu(rx->data_crc); | 
|  | 372 |  | 
|  | 373 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | if (jffs2_sum_active()) | 
|  | 377 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); | 
|  | 378 | dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n", | 
|  | 379 | ofs, xd->xid, xd->version); | 
|  | 380 | return 0; | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 384 | struct jffs2_raw_xref *rr, uint32_t ofs, | 
|  | 385 | struct jffs2_summary *s) | 
|  | 386 | { | 
|  | 387 | struct jffs2_xattr_ref *ref; | 
|  | 388 | uint32_t crc; | 
|  | 389 | int err; | 
|  | 390 |  | 
|  | 391 | crc = crc32(0, rr, sizeof(*rr) - 4); | 
|  | 392 | if (crc != je32_to_cpu(rr->node_crc)) { | 
|  | 393 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 
|  | 394 | ofs, je32_to_cpu(rr->node_crc), crc); | 
|  | 395 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) | 
|  | 396 | return err; | 
|  | 397 | return 0; | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) { | 
|  | 401 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n", | 
|  | 402 | ofs, je32_to_cpu(rr->totlen), | 
|  | 403 | PAD(sizeof(struct jffs2_raw_xref))); | 
|  | 404 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) | 
|  | 405 | return err; | 
|  | 406 | return 0; | 
|  | 407 | } | 
|  | 408 |  | 
|  | 409 | ref = jffs2_alloc_xattr_ref(); | 
|  | 410 | if (!ref) | 
|  | 411 | return -ENOMEM; | 
|  | 412 |  | 
|  | 413 | /* BEFORE jffs2_build_xattr_subsystem() called, | 
|  | 414 | * and AFTER xattr_ref is marked as a dead xref, | 
|  | 415 | * ref->xid is used to store 32bit xid, xd is not used | 
|  | 416 | * ref->ino is used to store 32bit inode-number, ic is not used | 
|  | 417 | * Thoes variables are declared as union, thus using those | 
|  | 418 | * are exclusive. In a similar way, ref->next is temporarily | 
|  | 419 | * used to chain all xattr_ref object. It's re-chained to | 
|  | 420 | * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly. | 
|  | 421 | */ | 
|  | 422 | ref->ino = je32_to_cpu(rr->ino); | 
|  | 423 | ref->xid = je32_to_cpu(rr->xid); | 
|  | 424 | ref->xseqno = je32_to_cpu(rr->xseqno); | 
|  | 425 | if (ref->xseqno > c->highest_xseqno) | 
|  | 426 | c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); | 
|  | 427 | ref->next = c->xref_temp; | 
|  | 428 | c->xref_temp = ref; | 
|  | 429 |  | 
|  | 430 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); | 
|  | 431 |  | 
|  | 432 | if (jffs2_sum_active()) | 
|  | 433 | jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); | 
|  | 434 | dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n", | 
|  | 435 | ofs, ref->xid, ref->ino); | 
|  | 436 | return 0; | 
|  | 437 | } | 
|  | 438 | #endif | 
|  | 439 |  | 
|  | 440 | /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into | 
|  | 441 | the flash, XIP-style */ | 
|  | 442 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 443 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { | 
|  | 444 | struct jffs2_unknown_node *node; | 
|  | 445 | struct jffs2_unknown_node crcnode; | 
|  | 446 | uint32_t ofs, prevofs, max_ofs; | 
|  | 447 | uint32_t hdr_crc, buf_ofs, buf_len; | 
|  | 448 | int err; | 
|  | 449 | int noise = 0; | 
|  | 450 |  | 
|  | 451 |  | 
|  | 452 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 
|  | 453 | int cleanmarkerfound = 0; | 
|  | 454 | #endif | 
|  | 455 |  | 
|  | 456 | ofs = jeb->offset; | 
|  | 457 | prevofs = jeb->offset - 1; | 
|  | 458 |  | 
|  | 459 | jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); | 
|  | 460 |  | 
|  | 461 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 
|  | 462 | if (c->mtd->type == MTD_NANDFLASH) { | 
|  | 463 | int ret; | 
|  | 464 |  | 
|  | 465 | if (mtd_block_isbad(c->mtd, jeb->offset)) | 
|  | 466 | return BLK_STATE_BADBLOCK; | 
|  | 467 |  | 
|  | 468 | ret = jffs2_check_nand_cleanmarker(c, jeb); | 
|  | 469 | jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); | 
|  | 470 |  | 
|  | 471 | /* Even if it's not found, we still scan to see | 
|  | 472 | if the block is empty. We use this information | 
|  | 473 | to decide whether to erase it or not. */ | 
|  | 474 | switch (ret) { | 
|  | 475 | case 0:		cleanmarkerfound = 1; break; | 
|  | 476 | case 1: 	break; | 
|  | 477 | default: 	return ret; | 
|  | 478 | } | 
|  | 479 | } | 
|  | 480 | #endif | 
|  | 481 |  | 
|  | 482 | if (jffs2_sum_active()) { | 
|  | 483 | struct jffs2_sum_marker *sm; | 
|  | 484 | void *sumptr = NULL; | 
|  | 485 | uint32_t sumlen; | 
|  | 486 |  | 
|  | 487 | if (!buf_size) { | 
|  | 488 | /* XIP case. Just look, point at the summary if it's there */ | 
|  | 489 | sm = (void *)buf + c->sector_size - sizeof(*sm); | 
|  | 490 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { | 
|  | 491 | sumptr = buf + je32_to_cpu(sm->offset); | 
|  | 492 | sumlen = c->sector_size - je32_to_cpu(sm->offset); | 
|  | 493 | } | 
|  | 494 | } else { | 
|  | 495 | /* If NAND flash, read a whole page of it. Else just the end */ | 
|  | 496 | if (c->wbuf_pagesize) | 
|  | 497 | buf_len = c->wbuf_pagesize; | 
|  | 498 | else | 
|  | 499 | buf_len = sizeof(*sm); | 
|  | 500 |  | 
|  | 501 | /* Read as much as we want into the _end_ of the preallocated buffer */ | 
|  | 502 | err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, | 
|  | 503 | jeb->offset + c->sector_size - buf_len, | 
|  | 504 | buf_len); | 
|  | 505 | if (err) | 
|  | 506 | return err; | 
|  | 507 |  | 
|  | 508 | sm = (void *)buf + buf_size - sizeof(*sm); | 
|  | 509 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { | 
|  | 510 | sumlen = c->sector_size - je32_to_cpu(sm->offset); | 
|  | 511 | sumptr = buf + buf_size - sumlen; | 
|  | 512 |  | 
|  | 513 | /* sm->offset maybe wrong but MAGIC maybe right */ | 
|  | 514 | if (sumlen > c->sector_size) | 
|  | 515 | goto full_scan; | 
|  | 516 |  | 
|  | 517 | /* Now, make sure the summary itself is available */ | 
|  | 518 | if (sumlen > buf_size) { | 
|  | 519 | /* Need to kmalloc for this. */ | 
|  | 520 | sumptr = kmalloc(sumlen, GFP_KERNEL); | 
|  | 521 | if (!sumptr) | 
|  | 522 | return -ENOMEM; | 
|  | 523 | memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); | 
|  | 524 | } | 
|  | 525 | if (buf_len < sumlen) { | 
|  | 526 | /* Need to read more so that the entire summary node is present */ | 
|  | 527 | err = jffs2_fill_scan_buf(c, sumptr, | 
|  | 528 | jeb->offset + c->sector_size - sumlen, | 
|  | 529 | sumlen - buf_len); | 
|  | 530 | if (err){ | 
|  | 531 | if (sumlen > buf_size) | 
|  | 532 | kfree(sumptr); | 
|  | 533 | return err; | 
|  | 534 | } | 
|  | 535 | } | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | } | 
|  | 539 |  | 
|  | 540 | if (sumptr) { | 
|  | 541 | err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); | 
|  | 542 |  | 
|  | 543 | if (buf_size && sumlen > buf_size) | 
|  | 544 | kfree(sumptr); | 
|  | 545 | /* If it returns with a real error, bail. | 
|  | 546 | If it returns positive, that's a block classification | 
|  | 547 | (i.e. BLK_STATE_xxx) so return that too. | 
|  | 548 | If it returns zero, fall through to full scan. */ | 
|  | 549 | if (err) | 
|  | 550 | return err; | 
|  | 551 | } | 
|  | 552 | } | 
|  | 553 |  | 
|  | 554 | full_scan: | 
|  | 555 | buf_ofs = jeb->offset; | 
|  | 556 |  | 
|  | 557 | if (!buf_size) { | 
|  | 558 | /* This is the XIP case -- we're reading _directly_ from the flash chip */ | 
|  | 559 | buf_len = c->sector_size; | 
|  | 560 | } else { | 
|  | 561 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); | 
|  | 562 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | 
|  | 563 | if (err) | 
|  | 564 | return err; | 
|  | 565 | } | 
|  | 566 |  | 
|  | 567 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ | 
|  | 568 | ofs = 0; | 
|  | 569 | max_ofs = EMPTY_SCAN_SIZE(c->sector_size); | 
|  | 570 | /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */ | 
|  | 571 | while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) | 
|  | 572 | ofs += 4; | 
|  | 573 |  | 
|  | 574 | if (ofs == max_ofs) { | 
|  | 575 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 
|  | 576 | if (c->mtd->type == MTD_NANDFLASH) { | 
|  | 577 | /* scan oob, take care of cleanmarker */ | 
|  | 578 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); | 
|  | 579 | jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", | 
|  | 580 | ret); | 
|  | 581 | switch (ret) { | 
|  | 582 | case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; | 
|  | 583 | case 1: 	return BLK_STATE_ALLDIRTY; | 
|  | 584 | default: 	return ret; | 
|  | 585 | } | 
|  | 586 | } | 
|  | 587 | #endif | 
|  | 588 | jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", | 
|  | 589 | jeb->offset); | 
|  | 590 | if (c->cleanmarker_size == 0) | 
|  | 591 | return BLK_STATE_CLEANMARKER;	/* don't bother with re-erase */ | 
|  | 592 | else | 
|  | 593 | return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */ | 
|  | 594 | } | 
|  | 595 | if (ofs) { | 
|  | 596 | jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, | 
|  | 597 | jeb->offset + ofs); | 
|  | 598 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) | 
|  | 599 | return err; | 
|  | 600 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) | 
|  | 601 | return err; | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | /* Now ofs is a complete physical flash offset as it always was... */ | 
|  | 605 | ofs += jeb->offset; | 
|  | 606 |  | 
|  | 607 | noise = 10; | 
|  | 608 |  | 
|  | 609 | dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); | 
|  | 610 |  | 
|  | 611 | scan_more: | 
|  | 612 | while(ofs < jeb->offset + c->sector_size) { | 
|  | 613 |  | 
|  | 614 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 
|  | 615 |  | 
|  | 616 | /* Make sure there are node refs available for use */ | 
|  | 617 | err = jffs2_prealloc_raw_node_refs(c, jeb, 2); | 
|  | 618 | if (err) | 
|  | 619 | return err; | 
|  | 620 |  | 
|  | 621 | cond_resched(); | 
|  | 622 |  | 
|  | 623 | if (ofs & 3) { | 
|  | 624 | pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); | 
|  | 625 | ofs = PAD(ofs); | 
|  | 626 | continue; | 
|  | 627 | } | 
|  | 628 | if (ofs == prevofs) { | 
|  | 629 | pr_warn("ofs 0x%08x has already been seen. Skipping\n", | 
|  | 630 | ofs); | 
|  | 631 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 632 | return err; | 
|  | 633 | ofs += 4; | 
|  | 634 | continue; | 
|  | 635 | } | 
|  | 636 | prevofs = ofs; | 
|  | 637 |  | 
|  | 638 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | 
|  | 639 | jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", | 
|  | 640 | sizeof(struct jffs2_unknown_node), | 
|  | 641 | jeb->offset, c->sector_size, ofs, | 
|  | 642 | sizeof(*node)); | 
|  | 643 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) | 
|  | 644 | return err; | 
|  | 645 | break; | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { | 
|  | 649 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 650 | jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 651 | sizeof(struct jffs2_unknown_node), | 
|  | 652 | buf_len, ofs); | 
|  | 653 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 654 | if (err) | 
|  | 655 | return err; | 
|  | 656 | buf_ofs = ofs; | 
|  | 657 | } | 
|  | 658 |  | 
|  | 659 | node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; | 
|  | 660 |  | 
|  | 661 | if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { | 
|  | 662 | uint32_t inbuf_ofs; | 
|  | 663 | uint32_t empty_start, scan_end; | 
|  | 664 |  | 
|  | 665 | empty_start = ofs; | 
|  | 666 | ofs += 4; | 
|  | 667 | scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); | 
|  | 668 |  | 
|  | 669 | jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); | 
|  | 670 | more_empty: | 
|  | 671 | inbuf_ofs = ofs - buf_ofs; | 
|  | 672 | while (inbuf_ofs < scan_end) { | 
|  | 673 | if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { | 
|  | 674 | pr_warn("Empty flash at 0x%08x ends at 0x%08x\n", | 
|  | 675 | empty_start, ofs); | 
|  | 676 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) | 
|  | 677 | return err; | 
|  | 678 | goto scan_more; | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | inbuf_ofs+=4; | 
|  | 682 | ofs += 4; | 
|  | 683 | } | 
|  | 684 | /* Ran off end. */ | 
|  | 685 | jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", | 
|  | 686 | ofs); | 
|  | 687 |  | 
|  | 688 | /* If we're only checking the beginning of a block with a cleanmarker, | 
|  | 689 | bail now */ | 
|  | 690 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 
|  | 691 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { | 
|  | 692 | jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", | 
|  | 693 | EMPTY_SCAN_SIZE(c->sector_size)); | 
|  | 694 | return BLK_STATE_CLEANMARKER; | 
|  | 695 | } | 
|  | 696 | if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ | 
|  | 697 | scan_end = buf_len; | 
|  | 698 | goto more_empty; | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | /* See how much more there is to read in this eraseblock... */ | 
|  | 702 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 703 | if (!buf_len) { | 
|  | 704 | /* No more to read. Break out of main loop without marking | 
|  | 705 | this range of empty space as dirty (because it's not) */ | 
|  | 706 | jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", | 
|  | 707 | empty_start); | 
|  | 708 | break; | 
|  | 709 | } | 
|  | 710 | /* point never reaches here */ | 
|  | 711 | scan_end = buf_len; | 
|  | 712 | jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", | 
|  | 713 | buf_len, ofs); | 
|  | 714 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 715 | if (err) | 
|  | 716 | return err; | 
|  | 717 | buf_ofs = ofs; | 
|  | 718 | goto more_empty; | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { | 
|  | 722 | pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", | 
|  | 723 | ofs); | 
|  | 724 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 725 | return err; | 
|  | 726 | ofs += 4; | 
|  | 727 | continue; | 
|  | 728 | } | 
|  | 729 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | 
|  | 730 | jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); | 
|  | 731 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 732 | return err; | 
|  | 733 | ofs += 4; | 
|  | 734 | continue; | 
|  | 735 | } | 
|  | 736 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { | 
|  | 737 | pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs); | 
|  | 738 | pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n"); | 
|  | 739 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 740 | return err; | 
|  | 741 | ofs += 4; | 
|  | 742 | continue; | 
|  | 743 | } | 
|  | 744 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | 
|  | 745 | /* OK. We're out of possibilities. Whinge and move on */ | 
|  | 746 | noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 
|  | 747 | __func__, | 
|  | 748 | JFFS2_MAGIC_BITMASK, ofs, | 
|  | 749 | je16_to_cpu(node->magic)); | 
|  | 750 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 751 | return err; | 
|  | 752 | ofs += 4; | 
|  | 753 | continue; | 
|  | 754 | } | 
|  | 755 | /* We seem to have a node of sorts. Check the CRC */ | 
|  | 756 | crcnode.magic = node->magic; | 
|  | 757 | crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); | 
|  | 758 | crcnode.totlen = node->totlen; | 
|  | 759 | hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); | 
|  | 760 |  | 
|  | 761 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | 
|  | 762 | noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | 
|  | 763 | __func__, | 
|  | 764 | ofs, je16_to_cpu(node->magic), | 
|  | 765 | je16_to_cpu(node->nodetype), | 
|  | 766 | je32_to_cpu(node->totlen), | 
|  | 767 | je32_to_cpu(node->hdr_crc), | 
|  | 768 | hdr_crc); | 
|  | 769 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 770 | return err; | 
|  | 771 | ofs += 4; | 
|  | 772 | continue; | 
|  | 773 | } | 
|  | 774 |  | 
|  | 775 | if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { | 
|  | 776 | /* Eep. Node goes over the end of the erase block. */ | 
|  | 777 | pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 
|  | 778 | ofs, je32_to_cpu(node->totlen)); | 
|  | 779 | pr_warn("Perhaps the file system was created with the wrong erase size?\n"); | 
|  | 780 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 
|  | 781 | return err; | 
|  | 782 | ofs += 4; | 
|  | 783 | continue; | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | 
|  | 787 | /* Wheee. This is an obsoleted node */ | 
|  | 788 | jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", | 
|  | 789 | ofs); | 
|  | 790 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 
|  | 791 | return err; | 
|  | 792 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 793 | continue; | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | switch(je16_to_cpu(node->nodetype)) { | 
|  | 797 | case JFFS2_NODETYPE_INODE: | 
|  | 798 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { | 
|  | 799 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 800 | jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 801 | sizeof(struct jffs2_raw_inode), | 
|  | 802 | buf_len, ofs); | 
|  | 803 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 804 | if (err) | 
|  | 805 | return err; | 
|  | 806 | buf_ofs = ofs; | 
|  | 807 | node = (void *)buf; | 
|  | 808 | } | 
|  | 809 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); | 
|  | 810 | if (err) return err; | 
|  | 811 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 812 | break; | 
|  | 813 |  | 
|  | 814 | case JFFS2_NODETYPE_DIRENT: | 
|  | 815 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 
|  | 816 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 817 | jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 818 | je32_to_cpu(node->totlen), buf_len, | 
|  | 819 | ofs); | 
|  | 820 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 821 | if (err) | 
|  | 822 | return err; | 
|  | 823 | buf_ofs = ofs; | 
|  | 824 | node = (void *)buf; | 
|  | 825 | } | 
|  | 826 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); | 
|  | 827 | if (err) return err; | 
|  | 828 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 829 | break; | 
|  | 830 |  | 
|  | 831 | #ifdef CONFIG_JFFS2_FS_XATTR | 
|  | 832 | case JFFS2_NODETYPE_XATTR: | 
|  | 833 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 
|  | 834 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 835 | jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 836 | je32_to_cpu(node->totlen), buf_len, | 
|  | 837 | ofs); | 
|  | 838 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 839 | if (err) | 
|  | 840 | return err; | 
|  | 841 | buf_ofs = ofs; | 
|  | 842 | node = (void *)buf; | 
|  | 843 | } | 
|  | 844 | err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); | 
|  | 845 | if (err) | 
|  | 846 | return err; | 
|  | 847 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 848 | break; | 
|  | 849 | case JFFS2_NODETYPE_XREF: | 
|  | 850 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 
|  | 851 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 852 | jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 853 | je32_to_cpu(node->totlen), buf_len, | 
|  | 854 | ofs); | 
|  | 855 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 856 | if (err) | 
|  | 857 | return err; | 
|  | 858 | buf_ofs = ofs; | 
|  | 859 | node = (void *)buf; | 
|  | 860 | } | 
|  | 861 | err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); | 
|  | 862 | if (err) | 
|  | 863 | return err; | 
|  | 864 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 865 | break; | 
|  | 866 | #endif	/* CONFIG_JFFS2_FS_XATTR */ | 
|  | 867 |  | 
|  | 868 | case JFFS2_NODETYPE_CLEANMARKER: | 
|  | 869 | jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); | 
|  | 870 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 
|  | 871 | pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 
|  | 872 | ofs, je32_to_cpu(node->totlen), | 
|  | 873 | c->cleanmarker_size); | 
|  | 874 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) | 
|  | 875 | return err; | 
|  | 876 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 
|  | 877 | } else if (jeb->first_node) { | 
|  | 878 | pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", | 
|  | 879 | ofs, jeb->offset); | 
|  | 880 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) | 
|  | 881 | return err; | 
|  | 882 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 
|  | 883 | } else { | 
|  | 884 | jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); | 
|  | 885 |  | 
|  | 886 | ofs += PAD(c->cleanmarker_size); | 
|  | 887 | } | 
|  | 888 | break; | 
|  | 889 |  | 
|  | 890 | case JFFS2_NODETYPE_PADDING: | 
|  | 891 | if (jffs2_sum_active()) | 
|  | 892 | jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); | 
|  | 893 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 
|  | 894 | return err; | 
|  | 895 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 896 | break; | 
|  | 897 |  | 
|  | 898 | default: | 
|  | 899 | switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { | 
|  | 900 | case JFFS2_FEATURE_ROCOMPAT: | 
|  | 901 | pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", | 
|  | 902 | je16_to_cpu(node->nodetype), ofs); | 
|  | 903 | c->flags |= JFFS2_SB_FLAG_RO; | 
|  | 904 | if (!(jffs2_is_readonly(c))) | 
|  | 905 | return -EROFS; | 
|  | 906 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 
|  | 907 | return err; | 
|  | 908 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 909 | break; | 
|  | 910 |  | 
|  | 911 | case JFFS2_FEATURE_INCOMPAT: | 
|  | 912 | pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", | 
|  | 913 | je16_to_cpu(node->nodetype), ofs); | 
|  | 914 | return -EINVAL; | 
|  | 915 |  | 
|  | 916 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 
|  | 917 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", | 
|  | 918 | je16_to_cpu(node->nodetype), ofs); | 
|  | 919 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 
|  | 920 | return err; | 
|  | 921 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 922 | break; | 
|  | 923 |  | 
|  | 924 | case JFFS2_FEATURE_RWCOMPAT_COPY: { | 
|  | 925 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", | 
|  | 926 | je16_to_cpu(node->nodetype), ofs); | 
|  | 927 |  | 
|  | 928 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); | 
|  | 929 |  | 
|  | 930 | /* We can't summarise nodes we don't grok */ | 
|  | 931 | jffs2_sum_disable_collecting(s); | 
|  | 932 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 933 | break; | 
|  | 934 | } | 
|  | 935 | } | 
|  | 936 | } | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | if (jffs2_sum_active()) { | 
|  | 940 | if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { | 
|  | 941 | dbg_summary("There is not enough space for " | 
|  | 942 | "summary information, disabling for this jeb!\n"); | 
|  | 943 | jffs2_sum_disable_collecting(s); | 
|  | 944 | } | 
|  | 945 | } | 
|  | 946 |  | 
|  | 947 | jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", | 
|  | 948 | jeb->offset, jeb->free_size, jeb->dirty_size, | 
|  | 949 | jeb->unchecked_size, jeb->used_size, jeb->wasted_size); | 
|  | 950 |  | 
|  | 951 | /* mark_node_obsolete can add to wasted !! */ | 
|  | 952 | if (jeb->wasted_size) { | 
|  | 953 | jeb->dirty_size += jeb->wasted_size; | 
|  | 954 | c->dirty_size += jeb->wasted_size; | 
|  | 955 | c->wasted_size -= jeb->wasted_size; | 
|  | 956 | jeb->wasted_size = 0; | 
|  | 957 | } | 
|  | 958 |  | 
|  | 959 | return jffs2_scan_classify_jeb(c, jeb); | 
|  | 960 | } | 
|  | 961 |  | 
|  | 962 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 
|  | 963 | { | 
|  | 964 | struct jffs2_inode_cache *ic; | 
|  | 965 |  | 
|  | 966 | ic = jffs2_get_ino_cache(c, ino); | 
|  | 967 | if (ic) | 
|  | 968 | return ic; | 
|  | 969 |  | 
|  | 970 | if (ino > c->highest_ino) | 
|  | 971 | c->highest_ino = ino; | 
|  | 972 |  | 
|  | 973 | ic = jffs2_alloc_inode_cache(); | 
|  | 974 | if (!ic) { | 
|  | 975 | pr_notice("%s(): allocation of inode cache failed\n", __func__); | 
|  | 976 | return NULL; | 
|  | 977 | } | 
|  | 978 | memset(ic, 0, sizeof(*ic)); | 
|  | 979 |  | 
|  | 980 | ic->ino = ino; | 
|  | 981 | ic->nodes = (void *)ic; | 
|  | 982 | jffs2_add_ino_cache(c, ic); | 
|  | 983 | if (ino == 1) | 
|  | 984 | ic->pino_nlink = 1; | 
|  | 985 | return ic; | 
|  | 986 | } | 
|  | 987 |  | 
|  | 988 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 989 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) | 
|  | 990 | { | 
|  | 991 | struct jffs2_inode_cache *ic; | 
|  | 992 | uint32_t crc, ino = je32_to_cpu(ri->ino); | 
|  | 993 |  | 
|  | 994 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); | 
|  | 995 |  | 
|  | 996 | /* We do very little here now. Just check the ino# to which we should attribute | 
|  | 997 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 
|  | 998 | we used to scan the flash once only, reading everything we want from it into | 
|  | 999 | memory, then building all our in-core data structures and freeing the extra | 
|  | 1000 | information. Now we allow the first part of the mount to complete a lot quicker, | 
|  | 1001 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. | 
|  | 1002 | Which means that the _full_ amount of time to get to proper write mode with GC | 
|  | 1003 | operational may actually be _longer_ than before. Sucks to be me. */ | 
|  | 1004 |  | 
|  | 1005 | /* Check the node CRC in any case. */ | 
|  | 1006 | crc = crc32(0, ri, sizeof(*ri)-8); | 
|  | 1007 | if (crc != je32_to_cpu(ri->node_crc)) { | 
|  | 1008 | pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 1009 | __func__, ofs, je32_to_cpu(ri->node_crc), crc); | 
|  | 1010 | /* | 
|  | 1011 | * We believe totlen because the CRC on the node | 
|  | 1012 | * _header_ was OK, just the node itself failed. | 
|  | 1013 | */ | 
|  | 1014 | return jffs2_scan_dirty_space(c, jeb, | 
|  | 1015 | PAD(je32_to_cpu(ri->totlen))); | 
|  | 1016 | } | 
|  | 1017 |  | 
|  | 1018 | ic = jffs2_get_ino_cache(c, ino); | 
|  | 1019 | if (!ic) { | 
|  | 1020 | ic = jffs2_scan_make_ino_cache(c, ino); | 
|  | 1021 | if (!ic) | 
|  | 1022 | return -ENOMEM; | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | /* Wheee. It worked */ | 
|  | 1026 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); | 
|  | 1027 |  | 
|  | 1028 | jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 
|  | 1029 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 
|  | 1030 | je32_to_cpu(ri->offset), | 
|  | 1031 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); | 
|  | 1032 |  | 
|  | 1033 | pseudo_random += je32_to_cpu(ri->version); | 
|  | 1034 |  | 
|  | 1035 | if (jffs2_sum_active()) { | 
|  | 1036 | jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); | 
|  | 1037 | } | 
|  | 1038 |  | 
|  | 1039 | return 0; | 
|  | 1040 | } | 
|  | 1041 |  | 
|  | 1042 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 1043 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) | 
|  | 1044 | { | 
|  | 1045 | struct jffs2_full_dirent *fd; | 
|  | 1046 | struct jffs2_inode_cache *ic; | 
|  | 1047 | uint32_t checkedlen; | 
|  | 1048 | uint32_t crc; | 
|  | 1049 | int err; | 
|  | 1050 |  | 
|  | 1051 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); | 
|  | 1052 |  | 
|  | 1053 | /* We don't get here unless the node is still valid, so we don't have to | 
|  | 1054 | mask in the ACCURATE bit any more. */ | 
|  | 1055 | crc = crc32(0, rd, sizeof(*rd)-8); | 
|  | 1056 |  | 
|  | 1057 | if (crc != je32_to_cpu(rd->node_crc)) { | 
|  | 1058 | pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 1059 | __func__, ofs, je32_to_cpu(rd->node_crc), crc); | 
|  | 1060 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 
|  | 1061 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) | 
|  | 1062 | return err; | 
|  | 1063 | return 0; | 
|  | 1064 | } | 
|  | 1065 |  | 
|  | 1066 | pseudo_random += je32_to_cpu(rd->version); | 
|  | 1067 |  | 
|  | 1068 | /* Should never happen. Did. (OLPC trac #4184)*/ | 
|  | 1069 | checkedlen = strnlen(rd->name, rd->nsize); | 
|  | 1070 | if (checkedlen < rd->nsize) { | 
|  | 1071 | pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", | 
|  | 1072 | ofs, checkedlen); | 
|  | 1073 | } | 
|  | 1074 | fd = jffs2_alloc_full_dirent(checkedlen+1); | 
|  | 1075 | if (!fd) { | 
|  | 1076 | return -ENOMEM; | 
|  | 1077 | } | 
|  | 1078 | memcpy(&fd->name, rd->name, checkedlen); | 
|  | 1079 | fd->name[checkedlen] = 0; | 
|  | 1080 |  | 
|  | 1081 | crc = crc32(0, fd->name, rd->nsize); | 
|  | 1082 | if (crc != je32_to_cpu(rd->name_crc)) { | 
|  | 1083 | pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 1084 | __func__, ofs, je32_to_cpu(rd->name_crc), crc); | 
|  | 1085 | jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n", | 
|  | 1086 | fd->name, je32_to_cpu(rd->ino)); | 
|  | 1087 | jffs2_free_full_dirent(fd); | 
|  | 1088 | /* FIXME: Why do we believe totlen? */ | 
|  | 1089 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | 
|  | 1090 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) | 
|  | 1091 | return err; | 
|  | 1092 | return 0; | 
|  | 1093 | } | 
|  | 1094 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); | 
|  | 1095 | if (!ic) { | 
|  | 1096 | jffs2_free_full_dirent(fd); | 
|  | 1097 | return -ENOMEM; | 
|  | 1098 | } | 
|  | 1099 |  | 
|  | 1100 | fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), | 
|  | 1101 | PAD(je32_to_cpu(rd->totlen)), ic); | 
|  | 1102 |  | 
|  | 1103 | fd->next = NULL; | 
|  | 1104 | fd->version = je32_to_cpu(rd->version); | 
|  | 1105 | fd->ino = je32_to_cpu(rd->ino); | 
|  | 1106 | fd->nhash = full_name_hash(fd->name, checkedlen); | 
|  | 1107 | fd->type = rd->type; | 
|  | 1108 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 
|  | 1109 |  | 
|  | 1110 | if (jffs2_sum_active()) { | 
|  | 1111 | jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); | 
|  | 1112 | } | 
|  | 1113 |  | 
|  | 1114 | return 0; | 
|  | 1115 | } | 
|  | 1116 |  | 
|  | 1117 | static int count_list(struct list_head *l) | 
|  | 1118 | { | 
|  | 1119 | uint32_t count = 0; | 
|  | 1120 | struct list_head *tmp; | 
|  | 1121 |  | 
|  | 1122 | list_for_each(tmp, l) { | 
|  | 1123 | count++; | 
|  | 1124 | } | 
|  | 1125 | return count; | 
|  | 1126 | } | 
|  | 1127 |  | 
|  | 1128 | /* Note: This breaks if list_empty(head). I don't care. You | 
|  | 1129 | might, if you copy this code and use it elsewhere :) */ | 
|  | 1130 | static void rotate_list(struct list_head *head, uint32_t count) | 
|  | 1131 | { | 
|  | 1132 | struct list_head *n = head->next; | 
|  | 1133 |  | 
|  | 1134 | list_del(head); | 
|  | 1135 | while(count--) { | 
|  | 1136 | n = n->next; | 
|  | 1137 | } | 
|  | 1138 | list_add(head, n); | 
|  | 1139 | } | 
|  | 1140 |  | 
|  | 1141 | void jffs2_rotate_lists(struct jffs2_sb_info *c) | 
|  | 1142 | { | 
|  | 1143 | uint32_t x; | 
|  | 1144 | uint32_t rotateby; | 
|  | 1145 |  | 
|  | 1146 | x = count_list(&c->clean_list); | 
|  | 1147 | if (x) { | 
|  | 1148 | rotateby = pseudo_random % x; | 
|  | 1149 | rotate_list((&c->clean_list), rotateby); | 
|  | 1150 | } | 
|  | 1151 |  | 
|  | 1152 | x = count_list(&c->very_dirty_list); | 
|  | 1153 | if (x) { | 
|  | 1154 | rotateby = pseudo_random % x; | 
|  | 1155 | rotate_list((&c->very_dirty_list), rotateby); | 
|  | 1156 | } | 
|  | 1157 |  | 
|  | 1158 | x = count_list(&c->dirty_list); | 
|  | 1159 | if (x) { | 
|  | 1160 | rotateby = pseudo_random % x; | 
|  | 1161 | rotate_list((&c->dirty_list), rotateby); | 
|  | 1162 | } | 
|  | 1163 |  | 
|  | 1164 | x = count_list(&c->erasable_list); | 
|  | 1165 | if (x) { | 
|  | 1166 | rotateby = pseudo_random % x; | 
|  | 1167 | rotate_list((&c->erasable_list), rotateby); | 
|  | 1168 | } | 
|  | 1169 |  | 
|  | 1170 | if (c->nr_erasing_blocks) { | 
|  | 1171 | rotateby = pseudo_random % c->nr_erasing_blocks; | 
|  | 1172 | rotate_list((&c->erase_pending_list), rotateby); | 
|  | 1173 | } | 
|  | 1174 |  | 
|  | 1175 | if (c->nr_free_blocks) { | 
|  | 1176 | rotateby = pseudo_random % c->nr_free_blocks; | 
|  | 1177 | rotate_list((&c->free_list), rotateby); | 
|  | 1178 | } | 
|  | 1179 | } |