blob: 0fc68e0e192db09434eb434065bbe9288871dcf2 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/mtd/mtd.h>
16#include <linux/compiler.h>
17#include <linux/sched.h> /* For cond_resched() */
18#include "nodelist.h"
19#include "debug.h"
20
21extern void jffs2_quick_gc_done(struct jffs2_sb_info *c);
22
23/**
24 * jffs2_reserve_space - request physical space to write nodes to flash
25 * @c: superblock info
26 * @minsize: Minimum acceptable size of allocation
27 * @len: Returned value of allocation length
28 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 *
30 * Requests a block of physical space on the flash. Returns zero for success
31 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
32 * error if appropriate. Doesn't return len since that's
33 *
34 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35 * allocation semaphore, to prevent more than one allocation from being
36 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 *
38 * jffs2_reserve_space() may trigger garbage collection in order to make room
39 * for the requested allocation.
40 */
41
42static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *len, uint32_t sumsize);
44
45int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
46 uint32_t *len, int prio, uint32_t sumsize)
47{
48 int ret = -EAGAIN;
49 int blocksneeded = c->resv_blocks_write;
50 /* align it */
51 minsize = PAD(minsize);
52
53 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
54 mutex_lock(&c->alloc_sem);
55
56 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
57
58 spin_lock(&c->erase_completion_lock);
59
60 /* this needs a little more thought (true <tglx> :)) */
61 while(ret == -EAGAIN) {
62 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 uint32_t dirty, avail;
64
65 /* calculate real dirty size
66 * dirty_size contains blocks on erase_pending_list
67 * those blocks are counted in c->nr_erasing_blocks.
68 * If one block is actually erased, it is not longer counted as dirty_space
69 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
70 * with c->nr_erasing_blocks * c->sector_size again.
71 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
72 * This helps us to force gc and pick eventually a clean block to spread the load.
73 * We add unchecked_size here, as we hopefully will find some space to use.
74 * This will affect the sum only once, as gc first finishes checking
75 * of nodes.
76 */
77 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
78 if (dirty < c->nospc_dirty_size) {
79 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
80 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
81 __func__);
82 break;
83 }
84 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 dirty, c->unchecked_size,
86 c->sector_size);
87
88 spin_unlock(&c->erase_completion_lock);
89 mutex_unlock(&c->alloc_sem);
90 return -ENOSPC;
91 }
92
93 /* Calc possibly available space. Possibly available means that we
94 * don't know, if unchecked size contains obsoleted nodes, which could give us some
95 * more usable space. This will affect the sum only once, as gc first finishes checking
96 * of nodes.
97 + Return -ENOSPC, if the maximum possibly available space is less or equal than
98 * blocksneeded * sector_size.
99 * This blocks endless gc looping on a filesystem, which is nearly full, even if
100 * the check above passes.
101 */
102 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
103 if ( (avail / c->sector_size) <= blocksneeded) {
104 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
105 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
106 __func__);
107 break;
108 }
109
110 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
111 avail, blocksneeded * c->sector_size);
112 spin_unlock(&c->erase_completion_lock);
113 mutex_unlock(&c->alloc_sem);
114 return -ENOSPC;
115 }
116
117 mutex_unlock(&c->alloc_sem);
118
119 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
120 c->nr_free_blocks, c->nr_erasing_blocks,
121 c->free_size, c->dirty_size, c->wasted_size,
122 c->used_size, c->erasing_size, c->bad_size,
123 c->free_size + c->dirty_size +
124 c->wasted_size + c->used_size +
125 c->erasing_size + c->bad_size,
126 c->flash_size);
127 spin_unlock(&c->erase_completion_lock);
128
129 ret = jffs2_garbage_collect_pass(c);
130
131 if (ret == -EAGAIN) {
132 spin_lock(&c->erase_completion_lock);
133 if (c->nr_erasing_blocks &&
134 list_empty(&c->erase_pending_list) &&
135 list_empty(&c->erase_complete_list)) {
136 DECLARE_WAITQUEUE(wait, current);
137 set_current_state(TASK_UNINTERRUPTIBLE);
138 add_wait_queue(&c->erase_wait, &wait);
139 jffs2_dbg(1, "%s waiting for erase to complete\n",
140 __func__);
141 spin_unlock(&c->erase_completion_lock);
142
143 schedule();
144 remove_wait_queue(&c->erase_wait, &wait);
145 } else
146 spin_unlock(&c->erase_completion_lock);
147 } else if (ret)
148 return ret;
149
150 cond_resched();
151
152 if (signal_pending(current))
153 return -EINTR;
154
155 mutex_lock(&c->alloc_sem);
156 spin_lock(&c->erase_completion_lock);
157 }
158
159 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
160 if (ret) {
161 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
162 }
163 }
164 spin_unlock(&c->erase_completion_lock);
165 if (!ret)
166 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
167 if (ret)
168 mutex_unlock(&c->alloc_sem);
169 return ret;
170}
171
172int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
173 uint32_t *len, uint32_t sumsize)
174{
175 int ret;
176 minsize = PAD(minsize);
177
178 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
179
180 while (true) {
181 spin_lock(&c->erase_completion_lock);
182 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
183 if (ret) {
184 jffs2_dbg(1, "%s(): looping, ret is %d\n",
185 __func__, ret);
186 }
187 spin_unlock(&c->erase_completion_lock);
188
189 if (ret == -EAGAIN)
190 cond_resched();
191 else
192 break;
193 }
194 if (!ret)
195 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
196
197 return ret;
198}
199
200
201/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
202
203static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
204{
205
206 if (c->nextblock == NULL) {
207 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
208 __func__, jeb->offset);
209 return;
210 }
211 /* Check, if we have a dirty block now, or if it was dirty already */
212 if (ISDIRTY (c->flags, jeb->wasted_size + jeb->dirty_size)) {
213 c->dirty_size += jeb->wasted_size;
214 c->wasted_size -= jeb->wasted_size;
215 jeb->dirty_size += jeb->wasted_size;
216 jeb->wasted_size = 0;
217 if (VERYDIRTY(c, jeb->dirty_size)) {
218 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
219 jeb->offset, jeb->free_size, jeb->dirty_size,
220 jeb->used_size);
221 list_add_tail(&jeb->list, &c->very_dirty_list);
222 } else {
223 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
224 jeb->offset, jeb->free_size, jeb->dirty_size,
225 jeb->used_size);
226 list_add_tail(&jeb->list, &c->dirty_list);
227 }
228 } else {
229 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
230 jeb->offset, jeb->free_size, jeb->dirty_size,
231 jeb->used_size);
232 list_add_tail(&jeb->list, &c->clean_list);
233 }
234 c->nextblock = NULL;
235
236}
237
238/* Select a new jeb for nextblock */
239
240static int jffs2_find_nextblock(struct jffs2_sb_info *c)
241{
242 struct list_head *next;
243
244 /* Take the next block off the 'free' list */
245
246 if (list_empty(&c->free_list)) {
247
248 if (!c->nr_erasing_blocks &&
249 !list_empty(&c->erasable_list)) {
250 struct jffs2_eraseblock *ejeb;
251
252 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
253 list_move_tail(&ejeb->list, &c->erase_pending_list);
254 c->nr_erasing_blocks++;
255 jffs2_garbage_collect_trigger(c);
256 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
257 __func__, ejeb->offset);
258 }
259
260 if (!c->nr_erasing_blocks &&
261 !list_empty(&c->erasable_pending_wbuf_list)) {
262 jffs2_dbg(1, "%s(): Flushing write buffer\n",
263 __func__);
264 /* c->nextblock is NULL, no update to c->nextblock allowed */
265 spin_unlock(&c->erase_completion_lock);
266 jffs2_flush_wbuf_pad(c);
267 spin_lock(&c->erase_completion_lock);
268 /* Have another go. It'll be on the erasable_list now */
269 return -EAGAIN;
270 }
271
272 if (!c->nr_erasing_blocks) {
273 /* Ouch. We're in GC, or we wouldn't have got here.
274 And there's no space left. At all. */
275 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
276 c->nr_erasing_blocks, c->nr_free_blocks,
277 list_empty(&c->erasable_list) ? "yes" : "no",
278 list_empty(&c->erasing_list) ? "yes" : "no",
279 list_empty(&c->erase_pending_list) ? "yes" : "no");
280 return -ENOSPC;
281 }
282
283 spin_unlock(&c->erase_completion_lock);
284 /* Don't wait for it; just erase one right now */
285 jffs2_erase_pending_blocks(c, 1);
286 spin_lock(&c->erase_completion_lock);
287
288 /* An erase may have failed, decreasing the
289 amount of free space available. So we must
290 restart from the beginning */
291 return -EAGAIN;
292 }
293
294 next = c->free_list.next;
295 list_del(next);
296 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
297 c->nr_free_blocks--;
298
299 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
300
301#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
302 /* adjust write buffer offset, else we get a non contiguous write bug */
303 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
304 c->wbuf_ofs = 0xffffffff;
305#endif
306
307 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
308 __func__, c->nextblock->offset);
309
310 return 0;
311}
312
313/* Called with alloc sem _and_ erase_completion_lock */
314static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
315 uint32_t *len, uint32_t sumsize)
316{
317 struct jffs2_eraseblock *jeb = c->nextblock;
318 uint32_t reserved_size; /* for summary information at the end of the jeb */
319 int ret;
320
321 restart:
322 reserved_size = 0;
323
324 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
325 /* NOSUM_SIZE means not to generate summary */
326
327 if (jeb) {
328 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
329 dbg_summary("minsize=%d , jeb->free=%d ,"
330 "summary->size=%d , sumsize=%d\n",
331 minsize, jeb->free_size,
332 c->summary->sum_size, sumsize);
333 }
334
335 /* Is there enough space for writing out the current node, or we have to
336 write out summary information now, close this jeb and select new nextblock? */
337 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
338 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
339
340 /* Has summary been disabled for this jeb? */
341 if (jffs2_sum_is_disabled(c->summary)) {
342 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
343 goto restart;
344 }
345
346 /* Writing out the collected summary information */
347 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
348 ret = jffs2_sum_write_sumnode(c);
349
350 if (ret)
351 return ret;
352
353 if (jffs2_sum_is_disabled(c->summary)) {
354 /* jffs2_write_sumnode() couldn't write out the summary information
355 diabling summary for this jeb and free the collected information
356 */
357 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
358 goto restart;
359 }
360
361 jffs2_close_nextblock(c, jeb);
362 jeb = NULL;
363 /* keep always valid value in reserved_size */
364 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
365 }
366 } else {
367 if (jeb && minsize > jeb->free_size) {
368 uint32_t waste;
369
370 /* Skip the end of this block and file it as having some dirty space */
371 /* If there's a pending write to it, flush now */
372
373 if (jffs2_wbuf_dirty(c)) {
374 spin_unlock(&c->erase_completion_lock);
375 jffs2_dbg(1, "%s(): Flushing write buffer\n",
376 __func__);
377 jffs2_flush_wbuf_pad(c);
378 spin_lock(&c->erase_completion_lock);
379 jeb = c->nextblock;
380 goto restart;
381 }
382
383 spin_unlock(&c->erase_completion_lock);
384
385 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
386
387 /* Just lock it again and continue. Nothing much can change because
388 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
389 we hold c->erase_completion_lock in the majority of this function...
390 but that's a question for another (more caffeine-rich) day. */
391 spin_lock(&c->erase_completion_lock);
392
393 if (ret)
394 return ret;
395
396 waste = jeb->free_size;
397 jffs2_link_node_ref(c, jeb,
398 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
399 waste, NULL);
400 /* FIXME: that made it count as dirty. Convert to wasted */
401 jeb->dirty_size -= waste;
402 c->dirty_size -= waste;
403 jeb->wasted_size += waste;
404 c->wasted_size += waste;
405
406 jffs2_close_nextblock(c, jeb);
407 jeb = NULL;
408 }
409 }
410
411 if (!jeb) {
412
413 ret = jffs2_find_nextblock(c);
414 if (ret)
415 return ret;
416
417 jeb = c->nextblock;
418
419 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
420 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
421 jeb->offset, jeb->free_size);
422 goto restart;
423 }
424 }
425 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
426 enough space */
427 *len = jeb->free_size - reserved_size;
428
429 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
430 !jeb->first_node->next_in_ino) {
431 /* Only node in it beforehand was a CLEANMARKER node (we think).
432 So mark it obsolete now that there's going to be another node
433 in the block. This will reduce used_size to zero but We've
434 already set c->nextblock so that jffs2_mark_node_obsolete()
435 won't try to refile it to the dirty_list.
436 */
437 spin_unlock(&c->erase_completion_lock);
438 jffs2_mark_node_obsolete(c, jeb->first_node);
439 spin_lock(&c->erase_completion_lock);
440 }
441
442 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
443 __func__,
444 *len, jeb->offset + (c->sector_size - jeb->free_size));
445 return 0;
446}
447
448/**
449 * jffs2_add_physical_node_ref - add a physical node reference to the list
450 * @c: superblock info
451 * @new: new node reference to add
452 * @len: length of this physical node
453 *
454 * Should only be used to report nodes for which space has been allocated
455 * by jffs2_reserve_space.
456 *
457 * Must be called with the alloc_sem held.
458 */
459
460struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
461 uint32_t ofs, uint32_t len,
462 struct jffs2_inode_cache *ic)
463{
464 struct jffs2_eraseblock *jeb;
465 struct jffs2_raw_node_ref *new;
466
467 jeb = &c->blocks[ofs / c->sector_size];
468
469 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
470 __func__, ofs & ~3, ofs & 3, len);
471#if 1
472 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
473 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
474 even after refiling c->nextblock */
475 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
476 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
477 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
478 ofs & ~3, ofs & 3);
479 if (c->nextblock)
480 pr_warn("nextblock 0x%08x", c->nextblock->offset);
481 else
482 pr_warn("No nextblock");
483 pr_cont(", expected at %08x\n",
484 jeb->offset + (c->sector_size - jeb->free_size));
485 return ERR_PTR(-EINVAL);
486 }
487#endif
488 spin_lock(&c->erase_completion_lock);
489
490 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
491
492 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(c->flags, jeb->wasted_size)) {
493 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
494 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
495 jeb->offset, jeb->free_size, jeb->dirty_size,
496 jeb->used_size);
497 if (jffs2_wbuf_dirty(c)) {
498 /* Flush the last write in the block if it's outstanding */
499 spin_unlock(&c->erase_completion_lock);
500 jffs2_flush_wbuf_pad(c);
501 spin_lock(&c->erase_completion_lock);
502 }
503
504 list_add_tail(&jeb->list, &c->clean_list);
505 c->nextblock = NULL;
506 }
507 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
508 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
509
510 spin_unlock(&c->erase_completion_lock);
511
512 return new;
513}
514
515
516void jffs2_complete_reservation(struct jffs2_sb_info *c)
517{
518 jffs2_dbg(1, "jffs2_complete_reservation()\n");
519 spin_lock(&c->erase_completion_lock);
520 jffs2_garbage_collect_trigger(c);
521 spin_unlock(&c->erase_completion_lock);
522 mutex_unlock(&c->alloc_sem);
523}
524
525static inline int on_list(struct list_head *obj, struct list_head *head)
526{
527 struct list_head *this;
528
529 list_for_each(this, head) {
530 if (this == obj) {
531 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
532 return 1;
533
534 }
535 }
536 return 0;
537}
538
539void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
540{
541 struct jffs2_eraseblock *jeb;
542 int blocknr;
543 struct jffs2_unknown_node n;
544 int ret, addedsize;
545 size_t retlen;
546 uint32_t freed_len;
547
548 if(unlikely(!ref)) {
549 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
550 return;
551 }
552 if (ref_obsolete(ref)) {
553 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
554 __func__, ref_offset(ref));
555 return;
556 }
557 blocknr = ref->flash_offset / c->sector_size;
558 if (blocknr >= c->nr_blocks) {
559 pr_notice("raw node at 0x%08x is off the end of device!\n",
560 ref->flash_offset);
561 BUG();
562 }
563 jeb = &c->blocks[blocknr];
564
565 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
566 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
567 /* Hm. This may confuse static lock analysis. If any of the above
568 three conditions is false, we're going to return from this
569 function without actually obliterating any nodes or freeing
570 any jffs2_raw_node_refs. So we don't need to stop erases from
571 happening, or protect against people holding an obsolete
572 jffs2_raw_node_ref without the erase_completion_lock. */
573 mutex_lock(&c->erase_free_sem);
574 }
575
576 spin_lock(&c->erase_completion_lock);
577
578 freed_len = ref_totlen(c, jeb, ref);
579
580 if (ref_flags(ref) == REF_UNCHECKED) {
581 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
582 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
583 freed_len, blocknr,
584 ref->flash_offset, jeb->used_size);
585 BUG();
586 })
587 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
588 ref_offset(ref), freed_len);
589 jeb->unchecked_size -= freed_len;
590 c->unchecked_size -= freed_len;
591 } else {
592 D1(if (unlikely(jeb->used_size < freed_len)) {
593 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
594 freed_len, blocknr,
595 ref->flash_offset, jeb->used_size);
596 BUG();
597 })
598 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
599 ref_offset(ref), freed_len);
600 jeb->used_size -= freed_len;
601 c->used_size -= freed_len;
602 }
603
604 // Take care, that wasted size is taken into concern
605 if ((jeb->dirty_size || ISDIRTY(c->flags, jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
606 jffs2_dbg(1, "Dirtying\n");
607 addedsize = freed_len;
608 jeb->dirty_size += freed_len;
609 c->dirty_size += freed_len;
610
611 /* Convert wasted space to dirty, if not a bad block */
612 if (jeb->wasted_size) {
613 if (on_list(&jeb->list, &c->bad_used_list)) {
614 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
615 jeb->offset);
616 addedsize = 0; /* To fool the refiling code later */
617 } else {
618 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
619 jeb->wasted_size, jeb->offset);
620 addedsize += jeb->wasted_size;
621 jeb->dirty_size += jeb->wasted_size;
622 c->dirty_size += jeb->wasted_size;
623 c->wasted_size -= jeb->wasted_size;
624 jeb->wasted_size = 0;
625 }
626 }
627 } else {
628 jffs2_dbg(1, "Wasting\n");
629 addedsize = 0;
630 jeb->wasted_size += freed_len;
631 c->wasted_size += freed_len;
632 }
633 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
634
635 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
636 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
637
638 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
639 /* Flash scanning is in progress. Don't muck about with the block
640 lists because they're not ready yet, and don't actually
641 obliterate nodes that look obsolete. If they weren't
642 marked obsolete on the flash at the time they _became_
643 obsolete, there was probably a reason for that. */
644 spin_unlock(&c->erase_completion_lock);
645 /* We didn't lock the erase_free_sem */
646 return;
647 }
648
649 if (jeb == c->nextblock) {
650 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
651 jeb->offset);
652 } else if (!jeb->used_size && !jeb->unchecked_size) {
653 if (jeb == c->gcblock) {
654 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
655 jeb->offset);
656 c->gcblock = NULL;
657 } else {
658 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
659 jeb->offset);
660 list_del(&jeb->list);
661 }
662 if (jffs2_wbuf_dirty(c)) {
663 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
664 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
665 } else {
666 if (jiffies & 127) {
667 /* Most of the time, we just erase it immediately. Otherwise we
668 spend ages scanning it on mount, etc. */
669 jffs2_dbg(1, "...and adding to erase_pending_list\n");
670 list_add_tail(&jeb->list, &c->erase_pending_list);
671 c->nr_erasing_blocks++;
672 jffs2_garbage_collect_trigger(c);
673 } else {
674 /* Sometimes, however, we leave it elsewhere so it doesn't get
675 immediately reused, and we spread the load a bit. */
676 jffs2_dbg(1, "...and adding to erasable_list\n");
677 list_add_tail(&jeb->list, &c->erasable_list);
678 }
679 }
680 jffs2_dbg(1, "Done OK\n");
681 } else if (jeb == c->gcblock) {
682 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
683 jeb->offset);
684 } else if (ISDIRTY(c->flags, jeb->dirty_size) && !ISDIRTY(c->flags, jeb->dirty_size - addedsize)) {
685 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
686 jeb->offset);
687 list_del(&jeb->list);
688 jffs2_dbg(1, "...and adding to dirty_list\n");
689 list_add_tail(&jeb->list, &c->dirty_list);
690 } else if (VERYDIRTY(c, jeb->dirty_size) &&
691 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
692 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
693 jeb->offset);
694 list_del(&jeb->list);
695 jffs2_dbg(1, "...and adding to very_dirty_list\n");
696 list_add_tail(&jeb->list, &c->very_dirty_list);
697 } else {
698 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
699 jeb->offset, jeb->free_size, jeb->dirty_size,
700 jeb->used_size);
701 }
702
703 spin_unlock(&c->erase_completion_lock);
704
705 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
706 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
707 /* We didn't lock the erase_free_sem */
708 return;
709 }
710
711 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
712 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
713 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
714 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
715
716 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
717 ref_offset(ref));
718 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
719 if (ret) {
720 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
721 ref_offset(ref), ret);
722 goto out_erase_sem;
723 }
724 if (retlen != sizeof(n)) {
725 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
726 ref_offset(ref), retlen);
727 goto out_erase_sem;
728 }
729 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
730 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
731 je32_to_cpu(n.totlen), freed_len);
732 goto out_erase_sem;
733 }
734 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
735 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
736 ref_offset(ref), je16_to_cpu(n.nodetype));
737 goto out_erase_sem;
738 }
739 /* XXX FIXME: This is ugly now */
740 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
741 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
742 if (ret) {
743 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
744 ref_offset(ref), ret);
745 goto out_erase_sem;
746 }
747 if (retlen != sizeof(n)) {
748 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
749 ref_offset(ref), retlen);
750 goto out_erase_sem;
751 }
752
753 /* Nodes which have been marked obsolete no longer need to be
754 associated with any inode. Remove them from the per-inode list.
755
756 Note we can't do this for NAND at the moment because we need
757 obsolete dirent nodes to stay on the lists, because of the
758 horridness in jffs2_garbage_collect_deletion_dirent(). Also
759 because we delete the inocache, and on NAND we need that to
760 stay around until all the nodes are actually erased, in order
761 to stop us from giving the same inode number to another newly
762 created inode. */
763 if (ref->next_in_ino) {
764 struct jffs2_inode_cache *ic;
765 struct jffs2_raw_node_ref **p;
766
767 spin_lock(&c->erase_completion_lock);
768
769 ic = jffs2_raw_ref_to_ic(ref);
770 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
771 ;
772
773 *p = ref->next_in_ino;
774 ref->next_in_ino = NULL;
775
776 switch (ic->class) {
777#ifdef CONFIG_JFFS2_FS_XATTR
778 case RAWNODE_CLASS_XATTR_DATUM:
779 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
780 break;
781 case RAWNODE_CLASS_XATTR_REF:
782 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
783 break;
784#endif
785 default:
786 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
787 jffs2_del_ino_cache(c, ic);
788 break;
789 }
790 spin_unlock(&c->erase_completion_lock);
791 }
792
793 out_erase_sem:
794 mutex_unlock(&c->erase_free_sem);
795}
796
797int jffs2_sb_is_dirty(struct jffs2_sb_info *c)
798{
799 if(c->quick_wait == 1) {
800 c->quick_wait = 2;
801 c->quick_gcblock_count = 0;
802 return 1;
803 }
804
805 if(c->quick_gcblock_count > 40000)
806 return 0;
807
808 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
809 return 1;
810 } else if (!list_empty(&c->erasable_list)) {
811 return 1;
812 } else if (!list_empty(&c->very_dirty_list)) {
813 return 1;
814 } else if (!list_empty(&c->dirty_list)) {
815 if (c->quick_wait == 2)
816 c->quick_gcblock_count += 1;
817 return 1;
818 } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
819 return 1;
820 }
821
822 if(c->gcblock) {
823 if(c->gcblock->used_size > 0)
824 return 1;
825 } else
826 c->quick_gcblock_count += 1;
827
828 /* Eep. All were empty */
829 return 0;
830}
831
832int jffs2_thread_should_wake(struct jffs2_sb_info *c)
833{
834 int ret = 0;
835 uint32_t dirty;
836 int nr_very_dirty = 0;
837 struct jffs2_eraseblock *jeb;
838
839 if(c->flags & JFFS2_SB_FLAG_QUICK_GC) {
840 if(jffs2_sb_is_dirty(c))
841 return 1;
842 }
843
844 if (!list_empty(&c->erase_complete_list) ||
845 !list_empty(&c->erase_pending_list))
846 return 1;
847
848 if (c->unchecked_size) {
849 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
850 c->unchecked_size, c->checked_ino);
851 return 1;
852 }
853
854
855 /* dirty_size contains blocks on erase_pending_list
856 * those blocks are counted in c->nr_erasing_blocks.
857 * If one block is actually erased, it is not longer counted as dirty_space
858 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
859 * with c->nr_erasing_blocks * c->sector_size again.
860 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
861 * This helps us to force gc and pick eventually a clean block to spread the load.
862 */
863 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
864
865 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
866 (dirty > c->nospc_dirty_size))
867 ret = 1;
868
869 list_for_each_entry(jeb, &c->very_dirty_list, list) {
870 nr_very_dirty++;
871 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
872 ret = 1;
873 /* In debug mode, actually go through and count them all */
874 D1(continue);
875 break;
876 }
877 }
878
879 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
880 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
881 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
882
883 return ret;
884}