blob: 175da421d0a91662fc6b814324f63ff80c75d0a2 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Adrian Hunter
20 * Artem Bityutskiy (Битюцкий Артём)
21 */
22
23/* This file implements TNC functions for committing */
24
25#include <linux/random.h>
26#include "ubifs.h"
27
28/**
29 * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
30 * @c: UBIFS file-system description object
31 * @idx: buffer in which to place new index node
32 * @znode: znode from which to make new index node
33 * @lnum: LEB number where new index node will be written
34 * @offs: offset where new index node will be written
35 * @len: length of new index node
36 */
37static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
38 struct ubifs_znode *znode, int lnum, int offs, int len)
39{
40 struct ubifs_znode *zp;
41 int i, err;
42
43 /* Make index node */
44 idx->ch.node_type = UBIFS_IDX_NODE;
45 idx->child_cnt = cpu_to_le16(znode->child_cnt);
46 idx->level = cpu_to_le16(znode->level);
47 for (i = 0; i < znode->child_cnt; i++) {
48 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
49 struct ubifs_zbranch *zbr = &znode->zbranch[i];
50
51 key_write_idx(c, &zbr->key, &br->key);
52 br->lnum = cpu_to_le32(zbr->lnum);
53 br->offs = cpu_to_le32(zbr->offs);
54 br->len = cpu_to_le32(zbr->len);
55 if (!zbr->lnum || !zbr->len) {
56 ubifs_err(c, "bad ref in znode");
57 ubifs_dump_znode(c, znode);
58 if (zbr->znode)
59 ubifs_dump_znode(c, zbr->znode);
60
61 return -EINVAL;
62 }
63 }
64 ubifs_prepare_node(c, idx, len, 0);
65
66 znode->lnum = lnum;
67 znode->offs = offs;
68 znode->len = len;
69
70 err = insert_old_idx_znode(c, znode);
71
72 /* Update the parent */
73 zp = znode->parent;
74 if (zp) {
75 struct ubifs_zbranch *zbr;
76
77 zbr = &zp->zbranch[znode->iip];
78 zbr->lnum = lnum;
79 zbr->offs = offs;
80 zbr->len = len;
81 } else {
82 c->zroot.lnum = lnum;
83 c->zroot.offs = offs;
84 c->zroot.len = len;
85 }
86 c->calc_idx_sz += ALIGN(len, 8);
87
88 atomic_long_dec(&c->dirty_zn_cnt);
89
90 ubifs_assert(ubifs_zn_dirty(znode));
91 ubifs_assert(ubifs_zn_cow(znode));
92
93 /*
94 * Note, unlike 'write_index()' we do not add memory barriers here
95 * because this function is called with @c->tnc_mutex locked.
96 */
97 __clear_bit(DIRTY_ZNODE, &znode->flags);
98 __clear_bit(COW_ZNODE, &znode->flags);
99
100 return err;
101}
102
103/**
104 * fill_gap - make index nodes in gaps in dirty index LEBs.
105 * @c: UBIFS file-system description object
106 * @lnum: LEB number that gap appears in
107 * @gap_start: offset of start of gap
108 * @gap_end: offset of end of gap
109 * @dirt: adds dirty space to this
110 *
111 * This function returns the number of index nodes written into the gap.
112 */
113static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
114 int *dirt)
115{
116 int len, gap_remains, gap_pos, written, pad_len;
117
118 ubifs_assert((gap_start & 7) == 0);
119 ubifs_assert((gap_end & 7) == 0);
120 ubifs_assert(gap_end >= gap_start);
121
122 gap_remains = gap_end - gap_start;
123 if (!gap_remains)
124 return 0;
125 gap_pos = gap_start;
126 written = 0;
127 while (c->enext) {
128 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
129 if (len < gap_remains) {
130 struct ubifs_znode *znode = c->enext;
131 const int alen = ALIGN(len, 8);
132 int err;
133
134 ubifs_assert(alen <= gap_remains);
135 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
136 lnum, gap_pos, len);
137 if (err)
138 return err;
139 gap_remains -= alen;
140 gap_pos += alen;
141 c->enext = znode->cnext;
142 if (c->enext == c->cnext)
143 c->enext = NULL;
144 written += 1;
145 } else
146 break;
147 }
148 if (gap_end == c->leb_size) {
149 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
150 /* Pad to end of min_io_size */
151 pad_len = c->ileb_len - gap_pos;
152 } else
153 /* Pad to end of gap */
154 pad_len = gap_remains;
155 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
156 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
157 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
158 *dirt += pad_len;
159 return written;
160}
161
162/**
163 * find_old_idx - find an index node obsoleted since the last commit start.
164 * @c: UBIFS file-system description object
165 * @lnum: LEB number of obsoleted index node
166 * @offs: offset of obsoleted index node
167 *
168 * Returns %1 if found and %0 otherwise.
169 */
170static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
171{
172 struct ubifs_old_idx *o;
173 struct rb_node *p;
174
175 p = c->old_idx.rb_node;
176 while (p) {
177 o = rb_entry(p, struct ubifs_old_idx, rb);
178 if (lnum < o->lnum)
179 p = p->rb_left;
180 else if (lnum > o->lnum)
181 p = p->rb_right;
182 else if (offs < o->offs)
183 p = p->rb_left;
184 else if (offs > o->offs)
185 p = p->rb_right;
186 else
187 return 1;
188 }
189 return 0;
190}
191
192/**
193 * is_idx_node_in_use - determine if an index node can be overwritten.
194 * @c: UBIFS file-system description object
195 * @key: key of index node
196 * @level: index node level
197 * @lnum: LEB number of index node
198 * @offs: offset of index node
199 *
200 * If @key / @lnum / @offs identify an index node that was not part of the old
201 * index, then this function returns %0 (obsolete). Else if the index node was
202 * part of the old index but is now dirty %1 is returned, else if it is clean %2
203 * is returned. A negative error code is returned on failure.
204 */
205static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
206 int level, int lnum, int offs)
207{
208 int ret;
209
210 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
211 if (ret < 0)
212 return ret; /* Error code */
213 if (ret == 0)
214 if (find_old_idx(c, lnum, offs))
215 return 1;
216 return ret;
217}
218
219/**
220 * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
221 * @c: UBIFS file-system description object
222 * @p: return LEB number here
223 *
224 * This function lays out new index nodes for dirty znodes using in-the-gaps
225 * method of TNC commit.
226 * This function merely puts the next znode into the next gap, making no attempt
227 * to try to maximise the number of znodes that fit.
228 * This function returns the number of index nodes written into the gaps, or a
229 * negative error code on failure.
230 */
231static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
232{
233 struct ubifs_scan_leb *sleb;
234 struct ubifs_scan_node *snod;
235 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
236
237 tot_written = 0;
238 /* Get an index LEB with lots of obsolete index nodes */
239 lnum = ubifs_find_dirty_idx_leb(c);
240 if (lnum < 0)
241 /*
242 * There also may be dirt in the index head that could be
243 * filled, however we do not check there at present.
244 */
245 return lnum; /* Error code */
246 *p = lnum;
247 dbg_gc("LEB %d", lnum);
248 /*
249 * Scan the index LEB. We use the generic scan for this even though
250 * it is more comprehensive and less efficient than is needed for this
251 * purpose.
252 */
253#ifdef CONFIG_UBIFS_SHARE_BUFFER
254 if (mutex_trylock(&ubifs_sbuf_mutex) == 0) {
255 atomic_long_inc(&ubifs_sbuf_lock_count);
256 ubifs_err(c, "trylock fail count %ld\n", READ_LOCK_COUNT);
257 mutex_lock(&ubifs_sbuf_mutex);
258 ubifs_err(c, "locked count %ld\n", READ_LOCK_COUNT);
259 }
260#endif
261 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
262 c->ileb_len = 0;
263 if (IS_ERR(sleb)) {
264#ifdef CONFIG_UBIFS_SHARE_BUFFER
265 mutex_unlock(&ubifs_sbuf_mutex);
266#endif
267 return PTR_ERR(sleb);
268 }
269 gap_start = 0;
270 list_for_each_entry(snod, &sleb->nodes, list) {
271 struct ubifs_idx_node *idx;
272 int in_use, level;
273
274 ubifs_assert(snod->type == UBIFS_IDX_NODE);
275 idx = snod->node;
276 key_read(c, ubifs_idx_key(c, idx), &snod->key);
277 level = le16_to_cpu(idx->level);
278 /* Determine if the index node is in use (not obsolete) */
279 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
280 snod->offs);
281 if (in_use < 0) {
282 ubifs_scan_destroy(sleb);
283#ifdef CONFIG_UBIFS_SHARE_BUFFER
284 mutex_unlock(&ubifs_sbuf_mutex);
285#endif
286 return in_use; /* Error code */
287 }
288 if (in_use) {
289 if (in_use == 1)
290 dirt += ALIGN(snod->len, 8);
291 /*
292 * The obsolete index nodes form gaps that can be
293 * overwritten. This gap has ended because we have
294 * found an index node that is still in use
295 * i.e. not obsolete
296 */
297 gap_end = snod->offs;
298 /* Try to fill gap */
299 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
300 if (written < 0) {
301 ubifs_scan_destroy(sleb);
302#ifdef CONFIG_UBIFS_SHARE_BUFFER
303 mutex_unlock(&ubifs_sbuf_mutex);
304#endif
305 return written; /* Error code */
306 }
307 tot_written += written;
308 gap_start = ALIGN(snod->offs + snod->len, 8);
309 }
310 }
311 ubifs_scan_destroy(sleb);
312 c->ileb_len = c->leb_size;
313 gap_end = c->leb_size;
314 /* Try to fill gap */
315 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
316 if (written < 0) {
317#ifdef CONFIG_UBIFS_SHARE_BUFFER
318 mutex_unlock(&ubifs_sbuf_mutex);
319#endif
320 return written; /* Error code */
321 }
322 tot_written += written;
323 if (tot_written == 0) {
324 struct ubifs_lprops lp;
325
326#ifdef CONFIG_UBIFS_SHARE_BUFFER
327 mutex_unlock(&ubifs_sbuf_mutex);
328#endif
329 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
330 err = ubifs_read_one_lp(c, lnum, &lp);
331 if (err)
332 return err;
333 if (lp.free == c->leb_size) {
334 /*
335 * We must have snatched this LEB from the idx_gc list
336 * so we need to correct the free and dirty space.
337 */
338 err = ubifs_change_one_lp(c, lnum,
339 c->leb_size - c->ileb_len,
340 dirt, 0, 0, 0);
341 if (err)
342 return err;
343 }
344 return 0;
345 }
346 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
347 0, 0, 0);
348 if (err) {
349#ifdef CONFIG_UBIFS_SHARE_BUFFER
350 mutex_unlock(&ubifs_sbuf_mutex);
351#endif
352 return err;
353 }
354 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
355#ifdef CONFIG_UBIFS_SHARE_BUFFER
356 mutex_unlock(&ubifs_sbuf_mutex);
357#endif
358 if (err)
359 return err;
360 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
361 return tot_written;
362}
363
364/**
365 * get_leb_cnt - calculate the number of empty LEBs needed to commit.
366 * @c: UBIFS file-system description object
367 * @cnt: number of znodes to commit
368 *
369 * This function returns the number of empty LEBs needed to commit @cnt znodes
370 * to the current index head. The number is not exact and may be more than
371 * needed.
372 */
373static int get_leb_cnt(struct ubifs_info *c, int cnt)
374{
375 int d;
376
377 /* Assume maximum index node size (i.e. overestimate space needed) */
378 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
379 if (cnt < 0)
380 cnt = 0;
381 d = c->leb_size / c->max_idx_node_sz;
382 return DIV_ROUND_UP(cnt, d);
383}
384
385/**
386 * layout_in_gaps - in-the-gaps method of committing TNC.
387 * @c: UBIFS file-system description object
388 * @cnt: number of dirty znodes to commit.
389 *
390 * This function lays out new index nodes for dirty znodes using in-the-gaps
391 * method of TNC commit.
392 *
393 * This function returns %0 on success and a negative error code on failure.
394 */
395static int layout_in_gaps(struct ubifs_info *c, int cnt)
396{
397 int err, leb_needed_cnt, written, *p;
398
399 dbg_gc("%d znodes to write", cnt);
400
401 c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
402 if (!c->gap_lebs)
403 return -ENOMEM;
404
405 p = c->gap_lebs;
406 do {
407 ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
408 written = layout_leb_in_gaps(c, p);
409 if (written < 0) {
410 err = written;
411 if (err != -ENOSPC) {
412 kfree(c->gap_lebs);
413 c->gap_lebs = NULL;
414 return err;
415 }
416 if (!dbg_is_chk_index(c)) {
417 /*
418 * Do not print scary warnings if the debugging
419 * option which forces in-the-gaps is enabled.
420 */
421 ubifs_warn(c, "out of space");
422 ubifs_dump_budg(c, &c->bi);
423 ubifs_dump_lprops(c);
424 }
425 /* Try to commit anyway */
426 break;
427 }
428 p++;
429 cnt -= written;
430 leb_needed_cnt = get_leb_cnt(c, cnt);
431 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
432 leb_needed_cnt, c->ileb_cnt);
433 } while (leb_needed_cnt > c->ileb_cnt);
434
435 *p = -1;
436 return 0;
437}
438
439/**
440 * layout_in_empty_space - layout index nodes in empty space.
441 * @c: UBIFS file-system description object
442 *
443 * This function lays out new index nodes for dirty znodes using empty LEBs.
444 *
445 * This function returns %0 on success and a negative error code on failure.
446 */
447static int layout_in_empty_space(struct ubifs_info *c)
448{
449 struct ubifs_znode *znode, *cnext, *zp;
450 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
451 int wlen, blen, err;
452
453 cnext = c->enext;
454 if (!cnext)
455 return 0;
456
457 lnum = c->ihead_lnum;
458 buf_offs = c->ihead_offs;
459
460 buf_len = ubifs_idx_node_sz(c, c->fanout);
461 buf_len = ALIGN(buf_len, c->min_io_size);
462 used = 0;
463 avail = buf_len;
464
465 /* Ensure there is enough room for first write */
466 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
467 if (buf_offs + next_len > c->leb_size)
468 lnum = -1;
469
470 while (1) {
471 znode = cnext;
472
473 len = ubifs_idx_node_sz(c, znode->child_cnt);
474
475 /* Determine the index node position */
476 if (lnum == -1) {
477 if (c->ileb_nxt >= c->ileb_cnt) {
478 ubifs_err(c, "out of space");
479 return -ENOSPC;
480 }
481 lnum = c->ilebs[c->ileb_nxt++];
482 buf_offs = 0;
483 used = 0;
484 avail = buf_len;
485 }
486
487 offs = buf_offs + used;
488
489 znode->lnum = lnum;
490 znode->offs = offs;
491 znode->len = len;
492
493 /* Update the parent */
494 zp = znode->parent;
495 if (zp) {
496 struct ubifs_zbranch *zbr;
497 int i;
498
499 i = znode->iip;
500 zbr = &zp->zbranch[i];
501 zbr->lnum = lnum;
502 zbr->offs = offs;
503 zbr->len = len;
504 } else {
505 c->zroot.lnum = lnum;
506 c->zroot.offs = offs;
507 c->zroot.len = len;
508 }
509 c->calc_idx_sz += ALIGN(len, 8);
510
511 /*
512 * Once lprops is updated, we can decrease the dirty znode count
513 * but it is easier to just do it here.
514 */
515 atomic_long_dec(&c->dirty_zn_cnt);
516
517 /*
518 * Calculate the next index node length to see if there is
519 * enough room for it
520 */
521 cnext = znode->cnext;
522 if (cnext == c->cnext)
523 next_len = 0;
524 else
525 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
526
527 /* Update buffer positions */
528 wlen = used + len;
529 used += ALIGN(len, 8);
530 avail -= ALIGN(len, 8);
531
532 if (next_len != 0 &&
533 buf_offs + used + next_len <= c->leb_size &&
534 avail > 0)
535 continue;
536
537 if (avail <= 0 && next_len &&
538 buf_offs + used + next_len <= c->leb_size)
539 blen = buf_len;
540 else
541 blen = ALIGN(wlen, c->min_io_size);
542
543 /* The buffer is full or there are no more znodes to do */
544 buf_offs += blen;
545 if (next_len) {
546 if (buf_offs + next_len > c->leb_size) {
547 err = ubifs_update_one_lp(c, lnum,
548 c->leb_size - buf_offs, blen - used,
549 0, 0);
550 if (err)
551 return err;
552 lnum = -1;
553 }
554 used -= blen;
555 if (used < 0)
556 used = 0;
557 avail = buf_len - used;
558 continue;
559 }
560 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
561 blen - used, 0, 0);
562 if (err)
563 return err;
564 break;
565 }
566
567 c->dbg->new_ihead_lnum = lnum;
568 c->dbg->new_ihead_offs = buf_offs;
569
570 return 0;
571}
572
573/**
574 * layout_commit - determine positions of index nodes to commit.
575 * @c: UBIFS file-system description object
576 * @no_space: indicates that insufficient empty LEBs were allocated
577 * @cnt: number of znodes to commit
578 *
579 * Calculate and update the positions of index nodes to commit. If there were
580 * an insufficient number of empty LEBs allocated, then index nodes are placed
581 * into the gaps created by obsolete index nodes in non-empty index LEBs. For
582 * this purpose, an obsolete index node is one that was not in the index as at
583 * the end of the last commit. To write "in-the-gaps" requires that those index
584 * LEBs are updated atomically in-place.
585 */
586static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
587{
588 int err;
589
590 if (no_space) {
591 err = layout_in_gaps(c, cnt);
592 if (err)
593 return err;
594 }
595 err = layout_in_empty_space(c);
596 return err;
597}
598
599/**
600 * find_first_dirty - find first dirty znode.
601 * @znode: znode to begin searching from
602 */
603static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
604{
605 int i, cont;
606
607 if (!znode)
608 return NULL;
609
610 while (1) {
611 if (znode->level == 0) {
612 if (ubifs_zn_dirty(znode))
613 return znode;
614 return NULL;
615 }
616 cont = 0;
617 for (i = 0; i < znode->child_cnt; i++) {
618 struct ubifs_zbranch *zbr = &znode->zbranch[i];
619
620 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
621 znode = zbr->znode;
622 cont = 1;
623 break;
624 }
625 }
626 if (!cont) {
627 if (ubifs_zn_dirty(znode))
628 return znode;
629 return NULL;
630 }
631 }
632}
633
634/**
635 * find_next_dirty - find next dirty znode.
636 * @znode: znode to begin searching from
637 */
638static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
639{
640 int n = znode->iip + 1;
641
642 znode = znode->parent;
643 if (!znode)
644 return NULL;
645 for (; n < znode->child_cnt; n++) {
646 struct ubifs_zbranch *zbr = &znode->zbranch[n];
647
648 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
649 return find_first_dirty(zbr->znode);
650 }
651 return znode;
652}
653
654/**
655 * get_znodes_to_commit - create list of dirty znodes to commit.
656 * @c: UBIFS file-system description object
657 *
658 * This function returns the number of znodes to commit.
659 */
660static int get_znodes_to_commit(struct ubifs_info *c)
661{
662 struct ubifs_znode *znode, *cnext;
663 int cnt = 0;
664
665 c->cnext = find_first_dirty(c->zroot.znode);
666 znode = c->enext = c->cnext;
667 if (!znode) {
668 dbg_cmt("no znodes to commit");
669 return 0;
670 }
671 cnt += 1;
672 while (1) {
673 ubifs_assert(!ubifs_zn_cow(znode));
674 __set_bit(COW_ZNODE, &znode->flags);
675 znode->alt = 0;
676 cnext = find_next_dirty(znode);
677 if (!cnext) {
678 znode->cnext = c->cnext;
679 break;
680 }
681 znode->cnext = cnext;
682 znode = cnext;
683 cnt += 1;
684 }
685 dbg_cmt("committing %d znodes", cnt);
686 ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
687 return cnt;
688}
689
690/**
691 * alloc_idx_lebs - allocate empty LEBs to be used to commit.
692 * @c: UBIFS file-system description object
693 * @cnt: number of znodes to commit
694 *
695 * This function returns %-ENOSPC if it cannot allocate a sufficient number of
696 * empty LEBs. %0 is returned on success, otherwise a negative error code
697 * is returned.
698 */
699static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
700{
701 int i, leb_cnt, lnum;
702
703 c->ileb_cnt = 0;
704 c->ileb_nxt = 0;
705 leb_cnt = get_leb_cnt(c, cnt);
706 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
707 if (!leb_cnt)
708 return 0;
709 c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
710 if (!c->ilebs)
711 return -ENOMEM;
712 for (i = 0; i < leb_cnt; i++) {
713 lnum = ubifs_find_free_leb_for_idx(c);
714 if (lnum < 0)
715 return lnum;
716 c->ilebs[c->ileb_cnt++] = lnum;
717 dbg_cmt("LEB %d", lnum);
718 }
719 if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
720 return -ENOSPC;
721 return 0;
722}
723
724/**
725 * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
726 * @c: UBIFS file-system description object
727 *
728 * It is possible that we allocate more empty LEBs for the commit than we need.
729 * This functions frees the surplus.
730 *
731 * This function returns %0 on success and a negative error code on failure.
732 */
733static int free_unused_idx_lebs(struct ubifs_info *c)
734{
735 int i, err = 0, lnum, er;
736
737 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
738 lnum = c->ilebs[i];
739 dbg_cmt("LEB %d", lnum);
740 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
741 LPROPS_INDEX | LPROPS_TAKEN, 0);
742 if (!err)
743 err = er;
744 }
745 return err;
746}
747
748/**
749 * free_idx_lebs - free unused LEBs after commit end.
750 * @c: UBIFS file-system description object
751 *
752 * This function returns %0 on success and a negative error code on failure.
753 */
754static int free_idx_lebs(struct ubifs_info *c)
755{
756 int err;
757
758 err = free_unused_idx_lebs(c);
759 kfree(c->ilebs);
760 c->ilebs = NULL;
761 return err;
762}
763
764/**
765 * ubifs_tnc_start_commit - start TNC commit.
766 * @c: UBIFS file-system description object
767 * @zroot: new index root position is returned here
768 *
769 * This function prepares the list of indexing nodes to commit and lays out
770 * their positions on flash. If there is not enough free space it uses the
771 * in-gap commit method. Returns zero in case of success and a negative error
772 * code in case of failure.
773 */
774int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
775{
776 int err = 0, cnt;
777
778 mutex_lock(&c->tnc_mutex);
779 err = dbg_check_tnc(c, 1);
780 if (err)
781 goto out;
782 cnt = get_znodes_to_commit(c);
783 if (cnt != 0) {
784 int no_space = 0;
785
786 err = alloc_idx_lebs(c, cnt);
787 if (err == -ENOSPC)
788 no_space = 1;
789 else if (err)
790 goto out_free;
791 err = layout_commit(c, no_space, cnt);
792 if (err)
793 goto out_free;
794 ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
795 err = free_unused_idx_lebs(c);
796 if (err)
797 goto out;
798 }
799 destroy_old_idx(c);
800 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
801
802 err = ubifs_save_dirty_idx_lnums(c);
803 if (err)
804 goto out;
805
806 spin_lock(&c->space_lock);
807 /*
808 * Although we have not finished committing yet, update size of the
809 * committed index ('c->bi.old_idx_sz') and zero out the index growth
810 * budget. It is OK to do this now, because we've reserved all the
811 * space which is needed to commit the index, and it is save for the
812 * budgeting subsystem to assume the index is already committed,
813 * even though it is not.
814 */
815 ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
816 c->bi.old_idx_sz = c->calc_idx_sz;
817 c->bi.uncommitted_idx = 0;
818 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
819 spin_unlock(&c->space_lock);
820 mutex_unlock(&c->tnc_mutex);
821
822 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
823 dbg_cmt("size of index %llu", c->calc_idx_sz);
824 return err;
825
826out_free:
827 free_idx_lebs(c);
828out:
829 mutex_unlock(&c->tnc_mutex);
830 return err;
831}
832
833/**
834 * write_index - write index nodes.
835 * @c: UBIFS file-system description object
836 *
837 * This function writes the index nodes whose positions were laid out in the
838 * layout_in_empty_space function.
839 */
840static int write_index(struct ubifs_info *c)
841{
842 struct ubifs_idx_node *idx;
843 struct ubifs_znode *znode, *cnext;
844 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
845 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
846
847 cnext = c->enext;
848 if (!cnext)
849 return 0;
850
851 /*
852 * Always write index nodes to the index head so that index nodes and
853 * other types of nodes are never mixed in the same erase block.
854 */
855 lnum = c->ihead_lnum;
856 buf_offs = c->ihead_offs;
857
858 /* Allocate commit buffer */
859 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
860 used = 0;
861 avail = buf_len;
862
863 /* Ensure there is enough room for first write */
864 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
865 if (buf_offs + next_len > c->leb_size) {
866 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
867 LPROPS_TAKEN);
868 if (err)
869 return err;
870 lnum = -1;
871 }
872
873 while (1) {
874 cond_resched();
875
876 znode = cnext;
877 idx = c->cbuf + used;
878
879 /* Make index node */
880 idx->ch.node_type = UBIFS_IDX_NODE;
881 idx->child_cnt = cpu_to_le16(znode->child_cnt);
882 idx->level = cpu_to_le16(znode->level);
883 for (i = 0; i < znode->child_cnt; i++) {
884 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
885 struct ubifs_zbranch *zbr = &znode->zbranch[i];
886
887 key_write_idx(c, &zbr->key, &br->key);
888 br->lnum = cpu_to_le32(zbr->lnum);
889 br->offs = cpu_to_le32(zbr->offs);
890 br->len = cpu_to_le32(zbr->len);
891 if (!zbr->lnum || !zbr->len) {
892 ubifs_err(c, "bad ref in znode");
893 ubifs_dump_znode(c, znode);
894 if (zbr->znode)
895 ubifs_dump_znode(c, zbr->znode);
896
897 return -EINVAL;
898 }
899 }
900 len = ubifs_idx_node_sz(c, znode->child_cnt);
901 ubifs_prepare_node(c, idx, len, 0);
902
903 /* Determine the index node position */
904 if (lnum == -1) {
905 lnum = c->ilebs[lnum_pos++];
906 buf_offs = 0;
907 used = 0;
908 avail = buf_len;
909 }
910 offs = buf_offs + used;
911
912 if (lnum != znode->lnum || offs != znode->offs ||
913 len != znode->len) {
914 ubifs_err(c, "inconsistent znode posn");
915 return -EINVAL;
916 }
917
918 /* Grab some stuff from znode while we still can */
919 cnext = znode->cnext;
920
921 ubifs_assert(ubifs_zn_dirty(znode));
922 ubifs_assert(ubifs_zn_cow(znode));
923
924 /*
925 * It is important that other threads should see %DIRTY_ZNODE
926 * flag cleared before %COW_ZNODE. Specifically, it matters in
927 * the 'dirty_cow_znode()' function. This is the reason for the
928 * first barrier. Also, we want the bit changes to be seen to
929 * other threads ASAP, to avoid unnecesarry copying, which is
930 * the reason for the second barrier.
931 */
932 clear_bit(DIRTY_ZNODE, &znode->flags);
933 smp_mb__before_atomic();
934 clear_bit(COW_ZNODE, &znode->flags);
935 smp_mb__after_atomic();
936
937 /*
938 * We have marked the znode as clean but have not updated the
939 * @c->clean_zn_cnt counter. If this znode becomes dirty again
940 * before 'free_obsolete_znodes()' is called, then
941 * @c->clean_zn_cnt will be decremented before it gets
942 * incremented (resulting in 2 decrements for the same znode).
943 * This means that @c->clean_zn_cnt may become negative for a
944 * while.
945 *
946 * Q: why we cannot increment @c->clean_zn_cnt?
947 * A: because we do not have the @c->tnc_mutex locked, and the
948 * following code would be racy and buggy:
949 *
950 * if (!ubifs_zn_obsolete(znode)) {
951 * atomic_long_inc(&c->clean_zn_cnt);
952 * atomic_long_inc(&ubifs_clean_zn_cnt);
953 * }
954 *
955 * Thus, we just delay the @c->clean_zn_cnt update until we
956 * have the mutex locked.
957 */
958
959 /* Do not access znode from this point on */
960
961 /* Update buffer positions */
962 wlen = used + len;
963 used += ALIGN(len, 8);
964 avail -= ALIGN(len, 8);
965
966 /*
967 * Calculate the next index node length to see if there is
968 * enough room for it
969 */
970 if (cnext == c->cnext)
971 next_len = 0;
972 else
973 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
974
975 nxt_offs = buf_offs + used + next_len;
976 if (next_len && nxt_offs <= c->leb_size) {
977 if (avail > 0)
978 continue;
979 else
980 blen = buf_len;
981 } else {
982 wlen = ALIGN(wlen, 8);
983 blen = ALIGN(wlen, c->min_io_size);
984 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
985 }
986
987 /* The buffer is full or there are no more znodes to do */
988 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
989 if (err)
990 return err;
991 buf_offs += blen;
992 if (next_len) {
993 if (nxt_offs > c->leb_size) {
994 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
995 0, LPROPS_TAKEN);
996 if (err)
997 return err;
998 lnum = -1;
999 }
1000 used -= blen;
1001 if (used < 0)
1002 used = 0;
1003 avail = buf_len - used;
1004 memmove(c->cbuf, c->cbuf + blen, used);
1005 continue;
1006 }
1007 break;
1008 }
1009
1010 if (lnum != c->dbg->new_ihead_lnum ||
1011 buf_offs != c->dbg->new_ihead_offs) {
1012 ubifs_err(c, "inconsistent ihead");
1013 return -EINVAL;
1014 }
1015
1016 c->ihead_lnum = lnum;
1017 c->ihead_offs = buf_offs;
1018
1019 return 0;
1020}
1021
1022/**
1023 * free_obsolete_znodes - free obsolete znodes.
1024 * @c: UBIFS file-system description object
1025 *
1026 * At the end of commit end, obsolete znodes are freed.
1027 */
1028static void free_obsolete_znodes(struct ubifs_info *c)
1029{
1030 struct ubifs_znode *znode, *cnext;
1031
1032 cnext = c->cnext;
1033 do {
1034 znode = cnext;
1035 cnext = znode->cnext;
1036 if (ubifs_zn_obsolete(znode))
1037 kfree(znode);
1038 else {
1039 znode->cnext = NULL;
1040 atomic_long_inc(&c->clean_zn_cnt);
1041 atomic_long_inc(&ubifs_clean_zn_cnt);
1042 }
1043 } while (cnext != c->cnext);
1044}
1045
1046/**
1047 * return_gap_lebs - return LEBs used by the in-gap commit method.
1048 * @c: UBIFS file-system description object
1049 *
1050 * This function clears the "taken" flag for the LEBs which were used by the
1051 * "commit in-the-gaps" method.
1052 */
1053static int return_gap_lebs(struct ubifs_info *c)
1054{
1055 int *p, err;
1056
1057 if (!c->gap_lebs)
1058 return 0;
1059
1060 dbg_cmt("");
1061 for (p = c->gap_lebs; *p != -1; p++) {
1062 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1063 LPROPS_TAKEN, 0);
1064 if (err)
1065 return err;
1066 }
1067
1068 kfree(c->gap_lebs);
1069 c->gap_lebs = NULL;
1070 return 0;
1071}
1072
1073/**
1074 * ubifs_tnc_end_commit - update the TNC for commit end.
1075 * @c: UBIFS file-system description object
1076 *
1077 * Write the dirty znodes.
1078 */
1079int ubifs_tnc_end_commit(struct ubifs_info *c)
1080{
1081 int err;
1082
1083 if (!c->cnext)
1084 return 0;
1085
1086 err = return_gap_lebs(c);
1087 if (err)
1088 return err;
1089
1090 err = write_index(c);
1091 if (err)
1092 return err;
1093
1094 mutex_lock(&c->tnc_mutex);
1095
1096 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1097
1098 free_obsolete_znodes(c);
1099
1100 c->cnext = NULL;
1101 kfree(c->ilebs);
1102 c->ilebs = NULL;
1103
1104 mutex_unlock(&c->tnc_mutex);
1105
1106 return 0;
1107}