blob: 9a5dd4106c3d29b6272ccdb6a526a5502fa80a78 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * segment.c - NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11#include <linux/pagemap.h>
12#include <linux/buffer_head.h>
13#include <linux/writeback.h>
14#include <linux/bitops.h>
15#include <linux/bio.h>
16#include <linux/completion.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/crc32.h>
22#include <linux/pagevec.h>
23#include <linux/slab.h>
24#include <linux/sched/signal.h>
25
26#include "nilfs.h"
27#include "btnode.h"
28#include "page.h"
29#include "segment.h"
30#include "sufile.h"
31#include "cpfile.h"
32#include "ifile.h"
33#include "segbuf.h"
34
35
36/*
37 * Segment constructor
38 */
39#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41#define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46/* Construction mode */
47enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61};
62
63/* Stage numbers of dirty block collection */
64enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75};
76
77#define CREATE_TRACE_POINTS
78#include <trace/events/nilfs2.h>
79
80/*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91{
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94}
95
96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97{
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100}
101
102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103{
104 return sci->sc_stage.scnt;
105}
106
107/* State flags of collection */
108#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113/* Operations depending on the construction mode and file type */
114struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127};
128
129/*
130 * Other definitions
131 */
132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137#define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)((a) - (b)) >= 0))
140
141static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
143{
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
156 nilfs_warn(sb, "journal info from a different FS");
157 save = current->journal_info;
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172}
173
174/**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
189 * instead; otherwise a new struct is assigned from a slab.
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
199 * %-ENOSPC - No space left on device
200 */
201int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204{
205 struct the_nilfs *nilfs;
206 int ret = nilfs_prepare_segment_lock(sb, ti);
207 struct nilfs_transaction_info *trace_ti;
208
209 if (unlikely(ret < 0))
210 return ret;
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
217 return 0;
218 }
219
220 sb_start_intwrite(sb);
221
222 nilfs = sb->s_fs_info;
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
241 sb_end_intwrite(sb);
242 return ret;
243}
244
245/**
246 * nilfs_transaction_commit - commit indivisible file operations.
247 * @sb: super block
248 *
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
255 */
256int nilfs_transaction_commit(struct super_block *sb)
257{
258 struct nilfs_transaction_info *ti = current->journal_info;
259 struct the_nilfs *nilfs = sb->s_fs_info;
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263 ti->ti_flags |= NILFS_TI_COMMIT;
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268 return 0;
269 }
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276 nilfs_segctor_do_flush(sci, 0);
277 }
278 up_read(&nilfs->ns_segctor_sem);
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
288 sb_end_intwrite(sb);
289 return err;
290}
291
292void nilfs_transaction_abort(struct super_block *sb)
293{
294 struct nilfs_transaction_info *ti = current->journal_info;
295 struct the_nilfs *nilfs = sb->s_fs_info;
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302 return;
303 }
304 up_read(&nilfs->ns_segctor_sem);
305
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
312 sb_end_intwrite(sb);
313}
314
315void nilfs_relax_pressure_in_lock(struct super_block *sb)
316{
317 struct the_nilfs *nilfs = sb->s_fs_info;
318 struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336}
337
338static void nilfs_transaction_lock(struct super_block *sb,
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341{
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
343 struct the_nilfs *nilfs = sb->s_fs_info;
344 struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346 WARN_ON(cur_ti);
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
351 current->journal_info = ti;
352
353 for (;;) {
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359 break;
360
361 nilfs_segctor_do_immediate_flush(sci);
362
363 up_write(&nilfs->ns_segctor_sem);
364 cond_resched();
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371}
372
373static void nilfs_transaction_unlock(struct super_block *sb)
374{
375 struct nilfs_transaction_info *ti = current->journal_info;
376 struct the_nilfs *nilfs = sb->s_fs_info;
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
381 up_write(&nilfs->ns_segctor_sem);
382 current->journal_info = ti->ti_save;
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386}
387
388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
390 unsigned int bytes)
391{
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393 unsigned int blocksize = sci->sc_super->s_blocksize;
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405}
406
407/**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412{
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
415 unsigned int sumbytes;
416 unsigned int flags = 0;
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431}
432
433/**
434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435 * @sci: segment constructor object
436 *
437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438 * the current segment summary block.
439 */
440static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441{
442 struct nilfs_segsum_pointer *ssp;
443
444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445 if (ssp->offset < ssp->bh->b_size)
446 memset(ssp->bh->b_data + ssp->offset, 0,
447 ssp->bh->b_size - ssp->offset);
448}
449
450static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451{
452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
454 return -E2BIG; /*
455 * The current segment is filled up
456 * (internal code)
457 */
458 nilfs_segctor_zeropad_segsum(sci);
459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460 return nilfs_segctor_reset_segment_buffer(sci);
461}
462
463static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464{
465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466 int err;
467
468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469 err = nilfs_segctor_feed_segment(sci);
470 if (err)
471 return err;
472 segbuf = sci->sc_curseg;
473 }
474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
475 if (likely(!err))
476 segbuf->sb_sum.flags |= NILFS_SS_SR;
477 return err;
478}
479
480/*
481 * Functions for making segment summary and payloads
482 */
483static int nilfs_segctor_segsum_block_required(
484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
485 unsigned int binfo_size)
486{
487 unsigned int blocksize = sci->sc_super->s_blocksize;
488 /* Size of finfo and binfo is enough small against blocksize */
489
490 return ssp->offset + binfo_size +
491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492 blocksize;
493}
494
495static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496 struct inode *inode)
497{
498 sci->sc_curseg->sb_sum.nfinfo++;
499 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500 nilfs_segctor_map_segsum_entry(
501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
502
503 if (NILFS_I(inode)->i_root &&
504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
506 /* skip finfo */
507}
508
509static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510 struct inode *inode)
511{
512 struct nilfs_finfo *finfo;
513 struct nilfs_inode_info *ii;
514 struct nilfs_segment_buffer *segbuf;
515 __u64 cno;
516
517 if (sci->sc_blk_cnt == 0)
518 return;
519
520 ii = NILFS_I(inode);
521
522 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
523 cno = ii->i_cno;
524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525 cno = 0;
526 else
527 cno = sci->sc_cno;
528
529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530 sizeof(*finfo));
531 finfo->fi_ino = cpu_to_le64(inode->i_ino);
532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
534 finfo->fi_cno = cpu_to_le64(cno);
535
536 segbuf = sci->sc_curseg;
537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541}
542
543static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544 struct buffer_head *bh,
545 struct inode *inode,
546 unsigned int binfo_size)
547{
548 struct nilfs_segment_buffer *segbuf;
549 int required, err = 0;
550
551 retry:
552 segbuf = sci->sc_curseg;
553 required = nilfs_segctor_segsum_block_required(
554 sci, &sci->sc_binfo_ptr, binfo_size);
555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556 nilfs_segctor_end_finfo(sci, inode);
557 err = nilfs_segctor_feed_segment(sci);
558 if (err)
559 return err;
560 goto retry;
561 }
562 if (unlikely(required)) {
563 nilfs_segctor_zeropad_segsum(sci);
564 err = nilfs_segbuf_extend_segsum(segbuf);
565 if (unlikely(err))
566 goto failed;
567 }
568 if (sci->sc_blk_cnt == 0)
569 nilfs_segctor_begin_finfo(sci, inode);
570
571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572 /* Substitution to vblocknr is delayed until update_blocknr() */
573 nilfs_segbuf_add_file_buffer(segbuf, bh);
574 sci->sc_blk_cnt++;
575 failed:
576 return err;
577}
578
579/*
580 * Callback functions that enumerate, mark, and collect dirty blocks
581 */
582static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583 struct buffer_head *bh, struct inode *inode)
584{
585 int err;
586
587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588 if (err < 0)
589 return err;
590
591 err = nilfs_segctor_add_file_block(sci, bh, inode,
592 sizeof(struct nilfs_binfo_v));
593 if (!err)
594 sci->sc_datablk_cnt++;
595 return err;
596}
597
598static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599 struct buffer_head *bh,
600 struct inode *inode)
601{
602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
603}
604
605static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606 struct buffer_head *bh,
607 struct inode *inode)
608{
609 WARN_ON(!buffer_dirty(bh));
610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611}
612
613static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614 struct nilfs_segsum_pointer *ssp,
615 union nilfs_binfo *binfo)
616{
617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618 sci, ssp, sizeof(*binfo_v));
619 *binfo_v = binfo->bi_v;
620}
621
622static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623 struct nilfs_segsum_pointer *ssp,
624 union nilfs_binfo *binfo)
625{
626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627 sci, ssp, sizeof(*vblocknr));
628 *vblocknr = binfo->bi_v.bi_vblocknr;
629}
630
631static const struct nilfs_sc_operations nilfs_sc_file_ops = {
632 .collect_data = nilfs_collect_file_data,
633 .collect_node = nilfs_collect_file_node,
634 .collect_bmap = nilfs_collect_file_bmap,
635 .write_data_binfo = nilfs_write_file_data_binfo,
636 .write_node_binfo = nilfs_write_file_node_binfo,
637};
638
639static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640 struct buffer_head *bh, struct inode *inode)
641{
642 int err;
643
644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
645 if (err < 0)
646 return err;
647
648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649 if (!err)
650 sci->sc_datablk_cnt++;
651 return err;
652}
653
654static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655 struct buffer_head *bh, struct inode *inode)
656{
657 WARN_ON(!buffer_dirty(bh));
658 return nilfs_segctor_add_file_block(sci, bh, inode,
659 sizeof(struct nilfs_binfo_dat));
660}
661
662static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663 struct nilfs_segsum_pointer *ssp,
664 union nilfs_binfo *binfo)
665{
666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667 sizeof(*blkoff));
668 *blkoff = binfo->bi_dat.bi_blkoff;
669}
670
671static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672 struct nilfs_segsum_pointer *ssp,
673 union nilfs_binfo *binfo)
674{
675 struct nilfs_binfo_dat *binfo_dat =
676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677 *binfo_dat = binfo->bi_dat;
678}
679
680static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
681 .collect_data = nilfs_collect_dat_data,
682 .collect_node = nilfs_collect_file_node,
683 .collect_bmap = nilfs_collect_dat_bmap,
684 .write_data_binfo = nilfs_write_dat_data_binfo,
685 .write_node_binfo = nilfs_write_dat_node_binfo,
686};
687
688static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
689 .collect_data = nilfs_collect_file_data,
690 .collect_node = NULL,
691 .collect_bmap = NULL,
692 .write_data_binfo = nilfs_write_file_data_binfo,
693 .write_node_binfo = NULL,
694};
695
696static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697 struct list_head *listp,
698 size_t nlimit,
699 loff_t start, loff_t end)
700{
701 struct address_space *mapping = inode->i_mapping;
702 struct pagevec pvec;
703 pgoff_t index = 0, last = ULONG_MAX;
704 size_t ndirties = 0;
705 int i;
706
707 if (unlikely(start != 0 || end != LLONG_MAX)) {
708 /*
709 * A valid range is given for sync-ing data pages. The
710 * range is rounded to per-page; extra dirty buffers
711 * may be included if blocksize < pagesize.
712 */
713 index = start >> PAGE_SHIFT;
714 last = end >> PAGE_SHIFT;
715 }
716 pagevec_init(&pvec);
717 repeat:
718 if (unlikely(index > last) ||
719 !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
720 PAGECACHE_TAG_DIRTY))
721 return ndirties;
722
723 for (i = 0; i < pagevec_count(&pvec); i++) {
724 struct buffer_head *bh, *head;
725 struct page *page = pvec.pages[i];
726
727 lock_page(page);
728 if (unlikely(page->mapping != mapping)) {
729 /* Exclude pages removed from the address space */
730 unlock_page(page);
731 continue;
732 }
733 if (!page_has_buffers(page))
734 create_empty_buffers(page, i_blocksize(inode), 0);
735 unlock_page(page);
736
737 bh = head = page_buffers(page);
738 do {
739 if (!buffer_dirty(bh) || buffer_async_write(bh))
740 continue;
741 get_bh(bh);
742 list_add_tail(&bh->b_assoc_buffers, listp);
743 ndirties++;
744 if (unlikely(ndirties >= nlimit)) {
745 pagevec_release(&pvec);
746 cond_resched();
747 return ndirties;
748 }
749 } while (bh = bh->b_this_page, bh != head);
750 }
751 pagevec_release(&pvec);
752 cond_resched();
753 goto repeat;
754}
755
756static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
757 struct list_head *listp)
758{
759 struct nilfs_inode_info *ii = NILFS_I(inode);
760 struct inode *btnc_inode = ii->i_assoc_inode;
761 struct pagevec pvec;
762 struct buffer_head *bh, *head;
763 unsigned int i;
764 pgoff_t index = 0;
765
766 if (!btnc_inode)
767 return;
768
769 pagevec_init(&pvec);
770
771 while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
772 PAGECACHE_TAG_DIRTY)) {
773 for (i = 0; i < pagevec_count(&pvec); i++) {
774 bh = head = page_buffers(pvec.pages[i]);
775 do {
776 if (buffer_dirty(bh) &&
777 !buffer_async_write(bh)) {
778 get_bh(bh);
779 list_add_tail(&bh->b_assoc_buffers,
780 listp);
781 }
782 bh = bh->b_this_page;
783 } while (bh != head);
784 }
785 pagevec_release(&pvec);
786 cond_resched();
787 }
788}
789
790static void nilfs_dispose_list(struct the_nilfs *nilfs,
791 struct list_head *head, int force)
792{
793 struct nilfs_inode_info *ii, *n;
794 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
795 unsigned int nv = 0;
796
797 while (!list_empty(head)) {
798 spin_lock(&nilfs->ns_inode_lock);
799 list_for_each_entry_safe(ii, n, head, i_dirty) {
800 list_del_init(&ii->i_dirty);
801 if (force) {
802 if (unlikely(ii->i_bh)) {
803 brelse(ii->i_bh);
804 ii->i_bh = NULL;
805 }
806 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
807 set_bit(NILFS_I_QUEUED, &ii->i_state);
808 list_add_tail(&ii->i_dirty,
809 &nilfs->ns_dirty_files);
810 continue;
811 }
812 ivec[nv++] = ii;
813 if (nv == SC_N_INODEVEC)
814 break;
815 }
816 spin_unlock(&nilfs->ns_inode_lock);
817
818 for (pii = ivec; nv > 0; pii++, nv--)
819 iput(&(*pii)->vfs_inode);
820 }
821}
822
823static void nilfs_iput_work_func(struct work_struct *work)
824{
825 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
826 sc_iput_work);
827 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
828
829 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
830}
831
832static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
833 struct nilfs_root *root)
834{
835 int ret = 0;
836
837 if (nilfs_mdt_fetch_dirty(root->ifile))
838 ret++;
839 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
840 ret++;
841 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
842 ret++;
843 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
844 ret++;
845 return ret;
846}
847
848static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
849{
850 return list_empty(&sci->sc_dirty_files) &&
851 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
852 sci->sc_nfreesegs == 0 &&
853 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
854}
855
856static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
857{
858 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
859 int ret = 0;
860
861 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
862 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
863
864 spin_lock(&nilfs->ns_inode_lock);
865 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
866 ret++;
867
868 spin_unlock(&nilfs->ns_inode_lock);
869 return ret;
870}
871
872static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
873{
874 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
875
876 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
877 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
878 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
879 nilfs_mdt_clear_dirty(nilfs->ns_dat);
880}
881
882static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
883{
884 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
885 struct buffer_head *bh_cp;
886 struct nilfs_checkpoint *raw_cp;
887 int err;
888
889 /* XXX: this interface will be changed */
890 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
891 &raw_cp, &bh_cp);
892 if (likely(!err)) {
893 /*
894 * The following code is duplicated with cpfile. But, it is
895 * needed to collect the checkpoint even if it was not newly
896 * created.
897 */
898 mark_buffer_dirty(bh_cp);
899 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
900 nilfs_cpfile_put_checkpoint(
901 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
902 } else if (err == -EINVAL || err == -ENOENT) {
903 nilfs_error(sci->sc_super,
904 "checkpoint creation failed due to metadata corruption.");
905 err = -EIO;
906 }
907 return err;
908}
909
910static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
911{
912 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
913 struct buffer_head *bh_cp;
914 struct nilfs_checkpoint *raw_cp;
915 int err;
916
917 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
918 &raw_cp, &bh_cp);
919 if (unlikely(err)) {
920 if (err == -EINVAL || err == -ENOENT) {
921 nilfs_error(sci->sc_super,
922 "checkpoint finalization failed due to metadata corruption.");
923 err = -EIO;
924 }
925 goto failed_ibh;
926 }
927 raw_cp->cp_snapshot_list.ssl_next = 0;
928 raw_cp->cp_snapshot_list.ssl_prev = 0;
929 raw_cp->cp_inodes_count =
930 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
931 raw_cp->cp_blocks_count =
932 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
933 raw_cp->cp_nblk_inc =
934 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
935 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
936 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
937
938 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
939 nilfs_checkpoint_clear_minor(raw_cp);
940 else
941 nilfs_checkpoint_set_minor(raw_cp);
942
943 nilfs_write_inode_common(sci->sc_root->ifile,
944 &raw_cp->cp_ifile_inode, 1);
945 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
946 return 0;
947
948 failed_ibh:
949 return err;
950}
951
952static void nilfs_fill_in_file_bmap(struct inode *ifile,
953 struct nilfs_inode_info *ii)
954
955{
956 struct buffer_head *ibh;
957 struct nilfs_inode *raw_inode;
958
959 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
960 ibh = ii->i_bh;
961 BUG_ON(!ibh);
962 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
963 ibh);
964 nilfs_bmap_write(ii->i_bmap, raw_inode);
965 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
966 }
967}
968
969static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
970{
971 struct nilfs_inode_info *ii;
972
973 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
974 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
975 set_bit(NILFS_I_COLLECTED, &ii->i_state);
976 }
977}
978
979static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
980 struct the_nilfs *nilfs)
981{
982 struct buffer_head *bh_sr;
983 struct nilfs_super_root *raw_sr;
984 unsigned int isz, srsz;
985
986 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
987
988 lock_buffer(bh_sr);
989 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
990 isz = nilfs->ns_inode_size;
991 srsz = NILFS_SR_BYTES(isz);
992
993 raw_sr->sr_sum = 0; /* Ensure initialization within this update */
994 raw_sr->sr_bytes = cpu_to_le16(srsz);
995 raw_sr->sr_nongc_ctime
996 = cpu_to_le64(nilfs_doing_gc() ?
997 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
998 raw_sr->sr_flags = 0;
999
1000 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
1001 NILFS_SR_DAT_OFFSET(isz), 1);
1002 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
1003 NILFS_SR_CPFILE_OFFSET(isz), 1);
1004 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
1005 NILFS_SR_SUFILE_OFFSET(isz), 1);
1006 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
1007 set_buffer_uptodate(bh_sr);
1008 unlock_buffer(bh_sr);
1009}
1010
1011static void nilfs_redirty_inodes(struct list_head *head)
1012{
1013 struct nilfs_inode_info *ii;
1014
1015 list_for_each_entry(ii, head, i_dirty) {
1016 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
1017 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
1018 }
1019}
1020
1021static void nilfs_drop_collected_inodes(struct list_head *head)
1022{
1023 struct nilfs_inode_info *ii;
1024
1025 list_for_each_entry(ii, head, i_dirty) {
1026 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1027 continue;
1028
1029 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1030 set_bit(NILFS_I_UPDATED, &ii->i_state);
1031 }
1032}
1033
1034static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1035 struct inode *inode,
1036 struct list_head *listp,
1037 int (*collect)(struct nilfs_sc_info *,
1038 struct buffer_head *,
1039 struct inode *))
1040{
1041 struct buffer_head *bh, *n;
1042 int err = 0;
1043
1044 if (collect) {
1045 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1046 list_del_init(&bh->b_assoc_buffers);
1047 err = collect(sci, bh, inode);
1048 brelse(bh);
1049 if (unlikely(err))
1050 goto dispose_buffers;
1051 }
1052 return 0;
1053 }
1054
1055 dispose_buffers:
1056 while (!list_empty(listp)) {
1057 bh = list_first_entry(listp, struct buffer_head,
1058 b_assoc_buffers);
1059 list_del_init(&bh->b_assoc_buffers);
1060 brelse(bh);
1061 }
1062 return err;
1063}
1064
1065static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1066{
1067 /* Remaining number of blocks within segment buffer */
1068 return sci->sc_segbuf_nblocks -
1069 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1070}
1071
1072static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1073 struct inode *inode,
1074 const struct nilfs_sc_operations *sc_ops)
1075{
1076 LIST_HEAD(data_buffers);
1077 LIST_HEAD(node_buffers);
1078 int err;
1079
1080 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1081 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1082
1083 n = nilfs_lookup_dirty_data_buffers(
1084 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1085 if (n > rest) {
1086 err = nilfs_segctor_apply_buffers(
1087 sci, inode, &data_buffers,
1088 sc_ops->collect_data);
1089 BUG_ON(!err); /* always receive -E2BIG or true error */
1090 goto break_or_fail;
1091 }
1092 }
1093 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1094
1095 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1096 err = nilfs_segctor_apply_buffers(
1097 sci, inode, &data_buffers, sc_ops->collect_data);
1098 if (unlikely(err)) {
1099 /* dispose node list */
1100 nilfs_segctor_apply_buffers(
1101 sci, inode, &node_buffers, NULL);
1102 goto break_or_fail;
1103 }
1104 sci->sc_stage.flags |= NILFS_CF_NODE;
1105 }
1106 /* Collect node */
1107 err = nilfs_segctor_apply_buffers(
1108 sci, inode, &node_buffers, sc_ops->collect_node);
1109 if (unlikely(err))
1110 goto break_or_fail;
1111
1112 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1113 err = nilfs_segctor_apply_buffers(
1114 sci, inode, &node_buffers, sc_ops->collect_bmap);
1115 if (unlikely(err))
1116 goto break_or_fail;
1117
1118 nilfs_segctor_end_finfo(sci, inode);
1119 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1120
1121 break_or_fail:
1122 return err;
1123}
1124
1125static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1126 struct inode *inode)
1127{
1128 LIST_HEAD(data_buffers);
1129 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1130 int err;
1131
1132 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1133 sci->sc_dsync_start,
1134 sci->sc_dsync_end);
1135
1136 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1137 nilfs_collect_file_data);
1138 if (!err) {
1139 nilfs_segctor_end_finfo(sci, inode);
1140 BUG_ON(n > rest);
1141 /* always receive -E2BIG or true error if n > rest */
1142 }
1143 return err;
1144}
1145
1146static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1147{
1148 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1149 struct list_head *head;
1150 struct nilfs_inode_info *ii;
1151 size_t ndone;
1152 int err = 0;
1153
1154 switch (nilfs_sc_cstage_get(sci)) {
1155 case NILFS_ST_INIT:
1156 /* Pre-processes */
1157 sci->sc_stage.flags = 0;
1158
1159 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1160 sci->sc_nblk_inc = 0;
1161 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1162 if (mode == SC_LSEG_DSYNC) {
1163 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1164 goto dsync_mode;
1165 }
1166 }
1167
1168 sci->sc_stage.dirty_file_ptr = NULL;
1169 sci->sc_stage.gc_inode_ptr = NULL;
1170 if (mode == SC_FLUSH_DAT) {
1171 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1172 goto dat_stage;
1173 }
1174 nilfs_sc_cstage_inc(sci); /* Fall through */
1175 case NILFS_ST_GC:
1176 if (nilfs_doing_gc()) {
1177 head = &sci->sc_gc_inodes;
1178 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1179 head, i_dirty);
1180 list_for_each_entry_continue(ii, head, i_dirty) {
1181 err = nilfs_segctor_scan_file(
1182 sci, &ii->vfs_inode,
1183 &nilfs_sc_file_ops);
1184 if (unlikely(err)) {
1185 sci->sc_stage.gc_inode_ptr = list_entry(
1186 ii->i_dirty.prev,
1187 struct nilfs_inode_info,
1188 i_dirty);
1189 goto break_or_fail;
1190 }
1191 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1192 }
1193 sci->sc_stage.gc_inode_ptr = NULL;
1194 }
1195 nilfs_sc_cstage_inc(sci); /* Fall through */
1196 case NILFS_ST_FILE:
1197 head = &sci->sc_dirty_files;
1198 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1199 i_dirty);
1200 list_for_each_entry_continue(ii, head, i_dirty) {
1201 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1202
1203 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1204 &nilfs_sc_file_ops);
1205 if (unlikely(err)) {
1206 sci->sc_stage.dirty_file_ptr =
1207 list_entry(ii->i_dirty.prev,
1208 struct nilfs_inode_info,
1209 i_dirty);
1210 goto break_or_fail;
1211 }
1212 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1213 /* XXX: required ? */
1214 }
1215 sci->sc_stage.dirty_file_ptr = NULL;
1216 if (mode == SC_FLUSH_FILE) {
1217 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1218 return 0;
1219 }
1220 nilfs_sc_cstage_inc(sci);
1221 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1222 /* Fall through */
1223 case NILFS_ST_IFILE:
1224 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1225 &nilfs_sc_file_ops);
1226 if (unlikely(err))
1227 break;
1228 nilfs_sc_cstage_inc(sci);
1229 /* Creating a checkpoint */
1230 err = nilfs_segctor_create_checkpoint(sci);
1231 if (unlikely(err))
1232 break;
1233 /* Fall through */
1234 case NILFS_ST_CPFILE:
1235 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1236 &nilfs_sc_file_ops);
1237 if (unlikely(err))
1238 break;
1239 nilfs_sc_cstage_inc(sci); /* Fall through */
1240 case NILFS_ST_SUFILE:
1241 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1242 sci->sc_nfreesegs, &ndone);
1243 if (unlikely(err)) {
1244 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1245 sci->sc_freesegs, ndone,
1246 NULL);
1247 break;
1248 }
1249 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1250
1251 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1252 &nilfs_sc_file_ops);
1253 if (unlikely(err))
1254 break;
1255 nilfs_sc_cstage_inc(sci); /* Fall through */
1256 case NILFS_ST_DAT:
1257 dat_stage:
1258 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1259 &nilfs_sc_dat_ops);
1260 if (unlikely(err))
1261 break;
1262 if (mode == SC_FLUSH_DAT) {
1263 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1264 return 0;
1265 }
1266 nilfs_sc_cstage_inc(sci); /* Fall through */
1267 case NILFS_ST_SR:
1268 if (mode == SC_LSEG_SR) {
1269 /* Appending a super root */
1270 err = nilfs_segctor_add_super_root(sci);
1271 if (unlikely(err))
1272 break;
1273 }
1274 /* End of a logical segment */
1275 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1276 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1277 return 0;
1278 case NILFS_ST_DSYNC:
1279 dsync_mode:
1280 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1281 ii = sci->sc_dsync_inode;
1282 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1283 break;
1284
1285 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1286 if (unlikely(err))
1287 break;
1288 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1289 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1290 return 0;
1291 case NILFS_ST_DONE:
1292 return 0;
1293 default:
1294 BUG();
1295 }
1296
1297 break_or_fail:
1298 return err;
1299}
1300
1301/**
1302 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1303 * @sci: nilfs_sc_info
1304 * @nilfs: nilfs object
1305 */
1306static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1307 struct the_nilfs *nilfs)
1308{
1309 struct nilfs_segment_buffer *segbuf, *prev;
1310 __u64 nextnum;
1311 int err, alloc = 0;
1312
1313 segbuf = nilfs_segbuf_new(sci->sc_super);
1314 if (unlikely(!segbuf))
1315 return -ENOMEM;
1316
1317 if (list_empty(&sci->sc_write_logs)) {
1318 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1319 nilfs->ns_pseg_offset, nilfs);
1320 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1321 nilfs_shift_to_next_segment(nilfs);
1322 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1323 }
1324
1325 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1326 nextnum = nilfs->ns_nextnum;
1327
1328 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1329 /* Start from the head of a new full segment */
1330 alloc++;
1331 } else {
1332 /* Continue logs */
1333 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1334 nilfs_segbuf_map_cont(segbuf, prev);
1335 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1336 nextnum = prev->sb_nextnum;
1337
1338 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1339 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1340 segbuf->sb_sum.seg_seq++;
1341 alloc++;
1342 }
1343 }
1344
1345 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1346 if (err)
1347 goto failed;
1348
1349 if (alloc) {
1350 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1351 if (err)
1352 goto failed;
1353 }
1354 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1355
1356 BUG_ON(!list_empty(&sci->sc_segbufs));
1357 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1358 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1359 return 0;
1360
1361 failed:
1362 nilfs_segbuf_free(segbuf);
1363 return err;
1364}
1365
1366static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1367 struct the_nilfs *nilfs, int nadd)
1368{
1369 struct nilfs_segment_buffer *segbuf, *prev;
1370 struct inode *sufile = nilfs->ns_sufile;
1371 __u64 nextnextnum;
1372 LIST_HEAD(list);
1373 int err, ret, i;
1374
1375 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1376 /*
1377 * Since the segment specified with nextnum might be allocated during
1378 * the previous construction, the buffer including its segusage may
1379 * not be dirty. The following call ensures that the buffer is dirty
1380 * and will pin the buffer on memory until the sufile is written.
1381 */
1382 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1383 if (unlikely(err))
1384 return err;
1385
1386 for (i = 0; i < nadd; i++) {
1387 /* extend segment info */
1388 err = -ENOMEM;
1389 segbuf = nilfs_segbuf_new(sci->sc_super);
1390 if (unlikely(!segbuf))
1391 goto failed;
1392
1393 /* map this buffer to region of segment on-disk */
1394 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1395 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1396
1397 /* allocate the next next full segment */
1398 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1399 if (unlikely(err))
1400 goto failed_segbuf;
1401
1402 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1403 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1404
1405 list_add_tail(&segbuf->sb_list, &list);
1406 prev = segbuf;
1407 }
1408 list_splice_tail(&list, &sci->sc_segbufs);
1409 return 0;
1410
1411 failed_segbuf:
1412 nilfs_segbuf_free(segbuf);
1413 failed:
1414 list_for_each_entry(segbuf, &list, sb_list) {
1415 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1416 WARN_ON(ret); /* never fails */
1417 }
1418 nilfs_destroy_logs(&list);
1419 return err;
1420}
1421
1422static void nilfs_free_incomplete_logs(struct list_head *logs,
1423 struct the_nilfs *nilfs)
1424{
1425 struct nilfs_segment_buffer *segbuf, *prev;
1426 struct inode *sufile = nilfs->ns_sufile;
1427 int ret;
1428
1429 segbuf = NILFS_FIRST_SEGBUF(logs);
1430 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1431 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1432 WARN_ON(ret); /* never fails */
1433 }
1434 if (atomic_read(&segbuf->sb_err)) {
1435 /* Case 1: The first segment failed */
1436 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1437 /*
1438 * Case 1a: Partial segment appended into an existing
1439 * segment
1440 */
1441 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1442 segbuf->sb_fseg_end);
1443 else /* Case 1b: New full segment */
1444 set_nilfs_discontinued(nilfs);
1445 }
1446
1447 prev = segbuf;
1448 list_for_each_entry_continue(segbuf, logs, sb_list) {
1449 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1450 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1451 WARN_ON(ret); /* never fails */
1452 }
1453 if (atomic_read(&segbuf->sb_err) &&
1454 segbuf->sb_segnum != nilfs->ns_nextnum)
1455 /* Case 2: extended segment (!= next) failed */
1456 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1457 prev = segbuf;
1458 }
1459}
1460
1461static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1462 struct inode *sufile)
1463{
1464 struct nilfs_segment_buffer *segbuf;
1465 unsigned long live_blocks;
1466 int ret;
1467
1468 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1469 live_blocks = segbuf->sb_sum.nblocks +
1470 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1471 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1472 live_blocks,
1473 sci->sc_seg_ctime);
1474 WARN_ON(ret); /* always succeed because the segusage is dirty */
1475 }
1476}
1477
1478static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1479{
1480 struct nilfs_segment_buffer *segbuf;
1481 int ret;
1482
1483 segbuf = NILFS_FIRST_SEGBUF(logs);
1484 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1485 segbuf->sb_pseg_start -
1486 segbuf->sb_fseg_start, 0);
1487 WARN_ON(ret); /* always succeed because the segusage is dirty */
1488
1489 list_for_each_entry_continue(segbuf, logs, sb_list) {
1490 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1491 0, 0);
1492 WARN_ON(ret); /* always succeed */
1493 }
1494}
1495
1496static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1497 struct nilfs_segment_buffer *last,
1498 struct inode *sufile)
1499{
1500 struct nilfs_segment_buffer *segbuf = last;
1501 int ret;
1502
1503 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1504 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1505 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1506 WARN_ON(ret);
1507 }
1508 nilfs_truncate_logs(&sci->sc_segbufs, last);
1509}
1510
1511
1512static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1513 struct the_nilfs *nilfs, int mode)
1514{
1515 struct nilfs_cstage prev_stage = sci->sc_stage;
1516 int err, nadd = 1;
1517
1518 /* Collection retry loop */
1519 for (;;) {
1520 sci->sc_nblk_this_inc = 0;
1521 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1522
1523 err = nilfs_segctor_reset_segment_buffer(sci);
1524 if (unlikely(err))
1525 goto failed;
1526
1527 err = nilfs_segctor_collect_blocks(sci, mode);
1528 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1529 if (!err)
1530 break;
1531
1532 if (unlikely(err != -E2BIG))
1533 goto failed;
1534
1535 /* The current segment is filled up */
1536 if (mode != SC_LSEG_SR ||
1537 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1538 break;
1539
1540 nilfs_clear_logs(&sci->sc_segbufs);
1541
1542 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1543 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1544 sci->sc_freesegs,
1545 sci->sc_nfreesegs,
1546 NULL);
1547 WARN_ON(err); /* do not happen */
1548 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1549 }
1550
1551 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1552 if (unlikely(err))
1553 return err;
1554
1555 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1556 sci->sc_stage = prev_stage;
1557 }
1558 nilfs_segctor_zeropad_segsum(sci);
1559 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1560 return 0;
1561
1562 failed:
1563 return err;
1564}
1565
1566static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1567 struct buffer_head *new_bh)
1568{
1569 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1570
1571 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1572 /* The caller must release old_bh */
1573}
1574
1575static int
1576nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1577 struct nilfs_segment_buffer *segbuf,
1578 int mode)
1579{
1580 struct inode *inode = NULL;
1581 sector_t blocknr;
1582 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1583 unsigned long nblocks = 0, ndatablk = 0;
1584 const struct nilfs_sc_operations *sc_op = NULL;
1585 struct nilfs_segsum_pointer ssp;
1586 struct nilfs_finfo *finfo = NULL;
1587 union nilfs_binfo binfo;
1588 struct buffer_head *bh, *bh_org;
1589 ino_t ino = 0;
1590 int err = 0;
1591
1592 if (!nfinfo)
1593 goto out;
1594
1595 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1596 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1597 ssp.offset = sizeof(struct nilfs_segment_summary);
1598
1599 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1600 if (bh == segbuf->sb_super_root)
1601 break;
1602 if (!finfo) {
1603 finfo = nilfs_segctor_map_segsum_entry(
1604 sci, &ssp, sizeof(*finfo));
1605 ino = le64_to_cpu(finfo->fi_ino);
1606 nblocks = le32_to_cpu(finfo->fi_nblocks);
1607 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1608
1609 inode = bh->b_page->mapping->host;
1610
1611 if (mode == SC_LSEG_DSYNC)
1612 sc_op = &nilfs_sc_dsync_ops;
1613 else if (ino == NILFS_DAT_INO)
1614 sc_op = &nilfs_sc_dat_ops;
1615 else /* file blocks */
1616 sc_op = &nilfs_sc_file_ops;
1617 }
1618 bh_org = bh;
1619 get_bh(bh_org);
1620 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1621 &binfo);
1622 if (bh != bh_org)
1623 nilfs_list_replace_buffer(bh_org, bh);
1624 brelse(bh_org);
1625 if (unlikely(err))
1626 goto failed_bmap;
1627
1628 if (ndatablk > 0)
1629 sc_op->write_data_binfo(sci, &ssp, &binfo);
1630 else
1631 sc_op->write_node_binfo(sci, &ssp, &binfo);
1632
1633 blocknr++;
1634 if (--nblocks == 0) {
1635 finfo = NULL;
1636 if (--nfinfo == 0)
1637 break;
1638 } else if (ndatablk > 0)
1639 ndatablk--;
1640 }
1641 out:
1642 return 0;
1643
1644 failed_bmap:
1645 return err;
1646}
1647
1648static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1649{
1650 struct nilfs_segment_buffer *segbuf;
1651 int err;
1652
1653 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1654 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1655 if (unlikely(err))
1656 return err;
1657 nilfs_segbuf_fill_in_segsum(segbuf);
1658 }
1659 return 0;
1660}
1661
1662static void nilfs_begin_page_io(struct page *page)
1663{
1664 if (!page || PageWriteback(page))
1665 /*
1666 * For split b-tree node pages, this function may be called
1667 * twice. We ignore the 2nd or later calls by this check.
1668 */
1669 return;
1670
1671 lock_page(page);
1672 clear_page_dirty_for_io(page);
1673 set_page_writeback(page);
1674 unlock_page(page);
1675}
1676
1677static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1678{
1679 struct nilfs_segment_buffer *segbuf;
1680 struct page *bd_page = NULL, *fs_page = NULL;
1681
1682 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1683 struct buffer_head *bh;
1684
1685 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1686 b_assoc_buffers) {
1687 if (bh->b_page != bd_page) {
1688 if (bd_page) {
1689 lock_page(bd_page);
1690 wait_on_page_writeback(bd_page);
1691 clear_page_dirty_for_io(bd_page);
1692 set_page_writeback(bd_page);
1693 unlock_page(bd_page);
1694 }
1695 bd_page = bh->b_page;
1696 }
1697 }
1698
1699 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1700 b_assoc_buffers) {
1701 if (bh == segbuf->sb_super_root) {
1702 if (bh->b_page != bd_page) {
1703 lock_page(bd_page);
1704 wait_on_page_writeback(bd_page);
1705 clear_page_dirty_for_io(bd_page);
1706 set_page_writeback(bd_page);
1707 unlock_page(bd_page);
1708 bd_page = bh->b_page;
1709 }
1710 break;
1711 }
1712 set_buffer_async_write(bh);
1713 if (bh->b_page != fs_page) {
1714 nilfs_begin_page_io(fs_page);
1715 fs_page = bh->b_page;
1716 }
1717 }
1718 }
1719 if (bd_page) {
1720 lock_page(bd_page);
1721 wait_on_page_writeback(bd_page);
1722 clear_page_dirty_for_io(bd_page);
1723 set_page_writeback(bd_page);
1724 unlock_page(bd_page);
1725 }
1726 nilfs_begin_page_io(fs_page);
1727}
1728
1729static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1730 struct the_nilfs *nilfs)
1731{
1732 int ret;
1733
1734 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1735 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1736 return ret;
1737}
1738
1739static void nilfs_end_page_io(struct page *page, int err)
1740{
1741 if (!page)
1742 return;
1743
1744 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1745 /*
1746 * For b-tree node pages, this function may be called twice
1747 * or more because they might be split in a segment.
1748 */
1749 if (PageDirty(page)) {
1750 /*
1751 * For pages holding split b-tree node buffers, dirty
1752 * flag on the buffers may be cleared discretely.
1753 * In that case, the page is once redirtied for
1754 * remaining buffers, and it must be cancelled if
1755 * all the buffers get cleaned later.
1756 */
1757 lock_page(page);
1758 if (nilfs_page_buffers_clean(page))
1759 __nilfs_clear_page_dirty(page);
1760 unlock_page(page);
1761 }
1762 return;
1763 }
1764
1765 if (!err) {
1766 if (!nilfs_page_buffers_clean(page))
1767 __set_page_dirty_nobuffers(page);
1768 ClearPageError(page);
1769 } else {
1770 __set_page_dirty_nobuffers(page);
1771 SetPageError(page);
1772 }
1773
1774 end_page_writeback(page);
1775}
1776
1777static void nilfs_abort_logs(struct list_head *logs, int err)
1778{
1779 struct nilfs_segment_buffer *segbuf;
1780 struct page *bd_page = NULL, *fs_page = NULL;
1781 struct buffer_head *bh;
1782
1783 if (list_empty(logs))
1784 return;
1785
1786 list_for_each_entry(segbuf, logs, sb_list) {
1787 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1788 b_assoc_buffers) {
1789 clear_buffer_uptodate(bh);
1790 if (bh->b_page != bd_page) {
1791 if (bd_page)
1792 end_page_writeback(bd_page);
1793 bd_page = bh->b_page;
1794 }
1795 }
1796
1797 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1798 b_assoc_buffers) {
1799 if (bh == segbuf->sb_super_root) {
1800 clear_buffer_uptodate(bh);
1801 if (bh->b_page != bd_page) {
1802 end_page_writeback(bd_page);
1803 bd_page = bh->b_page;
1804 }
1805 break;
1806 }
1807 clear_buffer_async_write(bh);
1808 if (bh->b_page != fs_page) {
1809 nilfs_end_page_io(fs_page, err);
1810 fs_page = bh->b_page;
1811 }
1812 }
1813 }
1814 if (bd_page)
1815 end_page_writeback(bd_page);
1816
1817 nilfs_end_page_io(fs_page, err);
1818}
1819
1820static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1821 struct the_nilfs *nilfs, int err)
1822{
1823 LIST_HEAD(logs);
1824 int ret;
1825
1826 list_splice_tail_init(&sci->sc_write_logs, &logs);
1827 ret = nilfs_wait_on_logs(&logs);
1828 nilfs_abort_logs(&logs, ret ? : err);
1829
1830 list_splice_tail_init(&sci->sc_segbufs, &logs);
1831 if (list_empty(&logs))
1832 return; /* if the first segment buffer preparation failed */
1833
1834 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1835 nilfs_free_incomplete_logs(&logs, nilfs);
1836
1837 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1838 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1839 sci->sc_freesegs,
1840 sci->sc_nfreesegs,
1841 NULL);
1842 WARN_ON(ret); /* do not happen */
1843 }
1844
1845 nilfs_destroy_logs(&logs);
1846}
1847
1848static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1849 struct nilfs_segment_buffer *segbuf)
1850{
1851 nilfs->ns_segnum = segbuf->sb_segnum;
1852 nilfs->ns_nextnum = segbuf->sb_nextnum;
1853 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1854 + segbuf->sb_sum.nblocks;
1855 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1856 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1857}
1858
1859static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1860{
1861 struct nilfs_segment_buffer *segbuf;
1862 struct page *bd_page = NULL, *fs_page = NULL;
1863 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1864 int update_sr = false;
1865
1866 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1867 struct buffer_head *bh;
1868
1869 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1870 b_assoc_buffers) {
1871 set_buffer_uptodate(bh);
1872 clear_buffer_dirty(bh);
1873 if (bh->b_page != bd_page) {
1874 if (bd_page)
1875 end_page_writeback(bd_page);
1876 bd_page = bh->b_page;
1877 }
1878 }
1879 /*
1880 * We assume that the buffers which belong to the same page
1881 * continue over the buffer list.
1882 * Under this assumption, the last BHs of pages is
1883 * identifiable by the discontinuity of bh->b_page
1884 * (page != fs_page).
1885 *
1886 * For B-tree node blocks, however, this assumption is not
1887 * guaranteed. The cleanup code of B-tree node pages needs
1888 * special care.
1889 */
1890 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1891 b_assoc_buffers) {
1892 const unsigned long set_bits = BIT(BH_Uptodate);
1893 const unsigned long clear_bits =
1894 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1895 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1896 BIT(BH_NILFS_Redirected));
1897
1898 if (bh == segbuf->sb_super_root) {
1899 set_buffer_uptodate(bh);
1900 clear_buffer_dirty(bh);
1901 if (bh->b_page != bd_page) {
1902 end_page_writeback(bd_page);
1903 bd_page = bh->b_page;
1904 }
1905 update_sr = true;
1906 break;
1907 }
1908 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1909 if (bh->b_page != fs_page) {
1910 nilfs_end_page_io(fs_page, 0);
1911 fs_page = bh->b_page;
1912 }
1913 }
1914
1915 if (!nilfs_segbuf_simplex(segbuf)) {
1916 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1917 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1918 sci->sc_lseg_stime = jiffies;
1919 }
1920 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1921 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1922 }
1923 }
1924 /*
1925 * Since pages may continue over multiple segment buffers,
1926 * end of the last page must be checked outside of the loop.
1927 */
1928 if (bd_page)
1929 end_page_writeback(bd_page);
1930
1931 nilfs_end_page_io(fs_page, 0);
1932
1933 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1934
1935 if (nilfs_doing_gc())
1936 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1937 else
1938 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1939
1940 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1941
1942 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1943 nilfs_set_next_segment(nilfs, segbuf);
1944
1945 if (update_sr) {
1946 nilfs->ns_flushed_device = 0;
1947 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1948 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1949
1950 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1951 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1952 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1953 nilfs_segctor_clear_metadata_dirty(sci);
1954 } else
1955 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1956}
1957
1958static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1959{
1960 int ret;
1961
1962 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1963 if (!ret) {
1964 nilfs_segctor_complete_write(sci);
1965 nilfs_destroy_logs(&sci->sc_write_logs);
1966 }
1967 return ret;
1968}
1969
1970static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1971 struct the_nilfs *nilfs)
1972{
1973 struct nilfs_inode_info *ii, *n;
1974 struct inode *ifile = sci->sc_root->ifile;
1975
1976 spin_lock(&nilfs->ns_inode_lock);
1977 retry:
1978 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1979 if (!ii->i_bh) {
1980 struct buffer_head *ibh;
1981 int err;
1982
1983 spin_unlock(&nilfs->ns_inode_lock);
1984 err = nilfs_ifile_get_inode_block(
1985 ifile, ii->vfs_inode.i_ino, &ibh);
1986 if (unlikely(err)) {
1987 nilfs_warn(sci->sc_super,
1988 "log writer: error %d getting inode block (ino=%lu)",
1989 err, ii->vfs_inode.i_ino);
1990 return err;
1991 }
1992 spin_lock(&nilfs->ns_inode_lock);
1993 if (likely(!ii->i_bh))
1994 ii->i_bh = ibh;
1995 else
1996 brelse(ibh);
1997 goto retry;
1998 }
1999
2000 // Always redirty the buffer to avoid race condition
2001 mark_buffer_dirty(ii->i_bh);
2002 nilfs_mdt_mark_dirty(ifile);
2003
2004 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2005 set_bit(NILFS_I_BUSY, &ii->i_state);
2006 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
2007 }
2008 spin_unlock(&nilfs->ns_inode_lock);
2009
2010 return 0;
2011}
2012
2013static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2014 struct the_nilfs *nilfs)
2015{
2016 struct nilfs_inode_info *ii, *n;
2017 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
2018 int defer_iput = false;
2019
2020 spin_lock(&nilfs->ns_inode_lock);
2021 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2022 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2023 test_bit(NILFS_I_DIRTY, &ii->i_state))
2024 continue;
2025
2026 clear_bit(NILFS_I_BUSY, &ii->i_state);
2027 brelse(ii->i_bh);
2028 ii->i_bh = NULL;
2029 list_del_init(&ii->i_dirty);
2030 if (!ii->vfs_inode.i_nlink || during_mount) {
2031 /*
2032 * Defer calling iput() to avoid deadlocks if
2033 * i_nlink == 0 or mount is not yet finished.
2034 */
2035 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2036 defer_iput = true;
2037 } else {
2038 spin_unlock(&nilfs->ns_inode_lock);
2039 iput(&ii->vfs_inode);
2040 spin_lock(&nilfs->ns_inode_lock);
2041 }
2042 }
2043 spin_unlock(&nilfs->ns_inode_lock);
2044
2045 if (defer_iput)
2046 schedule_work(&sci->sc_iput_work);
2047}
2048
2049/*
2050 * Main procedure of segment constructor
2051 */
2052static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2053{
2054 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2055 int err;
2056
2057 if (sb_rdonly(sci->sc_super))
2058 return -EROFS;
2059
2060 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2061 sci->sc_cno = nilfs->ns_cno;
2062
2063 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2064 if (unlikely(err))
2065 goto out;
2066
2067 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2068 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2069
2070 if (nilfs_segctor_clean(sci))
2071 goto out;
2072
2073 do {
2074 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2075
2076 err = nilfs_segctor_begin_construction(sci, nilfs);
2077 if (unlikely(err))
2078 goto failed;
2079
2080 /* Update time stamp */
2081 sci->sc_seg_ctime = ktime_get_real_seconds();
2082
2083 err = nilfs_segctor_collect(sci, nilfs, mode);
2084 if (unlikely(err))
2085 goto failed;
2086
2087 /* Avoid empty segment */
2088 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2089 nilfs_segbuf_empty(sci->sc_curseg)) {
2090 nilfs_segctor_abort_construction(sci, nilfs, 1);
2091 goto out;
2092 }
2093
2094 err = nilfs_segctor_assign(sci, mode);
2095 if (unlikely(err))
2096 goto failed;
2097
2098 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2099 nilfs_segctor_fill_in_file_bmap(sci);
2100
2101 if (mode == SC_LSEG_SR &&
2102 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2103 err = nilfs_segctor_fill_in_checkpoint(sci);
2104 if (unlikely(err))
2105 goto failed_to_write;
2106
2107 nilfs_segctor_fill_in_super_root(sci, nilfs);
2108 }
2109 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2110
2111 /* Write partial segments */
2112 nilfs_segctor_prepare_write(sci);
2113
2114 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2115 nilfs->ns_crc_seed);
2116
2117 err = nilfs_segctor_write(sci, nilfs);
2118 if (unlikely(err))
2119 goto failed_to_write;
2120
2121 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2122 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2123 /*
2124 * At this point, we avoid double buffering
2125 * for blocksize < pagesize because page dirty
2126 * flag is turned off during write and dirty
2127 * buffers are not properly collected for
2128 * pages crossing over segments.
2129 */
2130 err = nilfs_segctor_wait(sci);
2131 if (err)
2132 goto failed_to_write;
2133 }
2134 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2135
2136 out:
2137 nilfs_segctor_drop_written_files(sci, nilfs);
2138 return err;
2139
2140 failed_to_write:
2141 failed:
2142 if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
2143 nilfs_redirty_inodes(&sci->sc_dirty_files);
2144 if (nilfs_doing_gc())
2145 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2146 nilfs_segctor_abort_construction(sci, nilfs, err);
2147 goto out;
2148}
2149
2150/**
2151 * nilfs_segctor_start_timer - set timer of background write
2152 * @sci: nilfs_sc_info
2153 *
2154 * If the timer has already been set, it ignores the new request.
2155 * This function MUST be called within a section locking the segment
2156 * semaphore.
2157 */
2158static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2159{
2160 spin_lock(&sci->sc_state_lock);
2161 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2162 if (sci->sc_task) {
2163 sci->sc_timer.expires = jiffies + sci->sc_interval;
2164 add_timer(&sci->sc_timer);
2165 }
2166 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2167 }
2168 spin_unlock(&sci->sc_state_lock);
2169}
2170
2171static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2172{
2173 spin_lock(&sci->sc_state_lock);
2174 if (!(sci->sc_flush_request & BIT(bn))) {
2175 unsigned long prev_req = sci->sc_flush_request;
2176
2177 sci->sc_flush_request |= BIT(bn);
2178 if (!prev_req)
2179 wake_up(&sci->sc_wait_daemon);
2180 }
2181 spin_unlock(&sci->sc_state_lock);
2182}
2183
2184/**
2185 * nilfs_flush_segment - trigger a segment construction for resource control
2186 * @sb: super block
2187 * @ino: inode number of the file to be flushed out.
2188 */
2189void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2190{
2191 struct the_nilfs *nilfs = sb->s_fs_info;
2192 struct nilfs_sc_info *sci = nilfs->ns_writer;
2193
2194 if (!sci || nilfs_doing_construction())
2195 return;
2196 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2197 /* assign bit 0 to data files */
2198}
2199
2200struct nilfs_segctor_wait_request {
2201 wait_queue_entry_t wq;
2202 __u32 seq;
2203 int err;
2204 atomic_t done;
2205};
2206
2207static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2208{
2209 struct nilfs_segctor_wait_request wait_req;
2210 int err = 0;
2211
2212 init_wait(&wait_req.wq);
2213 wait_req.err = 0;
2214 atomic_set(&wait_req.done, 0);
2215 init_waitqueue_entry(&wait_req.wq, current);
2216
2217 /*
2218 * To prevent a race issue where completion notifications from the
2219 * log writer thread are missed, increment the request sequence count
2220 * "sc_seq_request" and insert a wait queue entry using the current
2221 * sequence number into the "sc_wait_request" queue at the same time
2222 * within the lock section of "sc_state_lock".
2223 */
2224 spin_lock(&sci->sc_state_lock);
2225 wait_req.seq = ++sci->sc_seq_request;
2226 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2227 spin_unlock(&sci->sc_state_lock);
2228
2229 wake_up(&sci->sc_wait_daemon);
2230
2231 for (;;) {
2232 set_current_state(TASK_INTERRUPTIBLE);
2233
2234 /*
2235 * Synchronize only while the log writer thread is alive.
2236 * Leave flushing out after the log writer thread exits to
2237 * the cleanup work in nilfs_segctor_destroy().
2238 */
2239 if (!sci->sc_task)
2240 break;
2241
2242 if (atomic_read(&wait_req.done)) {
2243 err = wait_req.err;
2244 break;
2245 }
2246 if (!signal_pending(current)) {
2247 schedule();
2248 continue;
2249 }
2250 err = -ERESTARTSYS;
2251 break;
2252 }
2253 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2254 return err;
2255}
2256
2257static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
2258{
2259 struct nilfs_segctor_wait_request *wrq, *n;
2260 unsigned long flags;
2261
2262 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2263 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2264 if (!atomic_read(&wrq->done) &&
2265 (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
2266 wrq->err = err;
2267 atomic_set(&wrq->done, 1);
2268 }
2269 if (atomic_read(&wrq->done)) {
2270 wrq->wq.func(&wrq->wq,
2271 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2272 0, NULL);
2273 }
2274 }
2275 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2276}
2277
2278/**
2279 * nilfs_construct_segment - construct a logical segment
2280 * @sb: super block
2281 *
2282 * Return Value: On success, 0 is retured. On errors, one of the following
2283 * negative error code is returned.
2284 *
2285 * %-EROFS - Read only filesystem.
2286 *
2287 * %-EIO - I/O error
2288 *
2289 * %-ENOSPC - No space left on device (only in a panic state).
2290 *
2291 * %-ERESTARTSYS - Interrupted.
2292 *
2293 * %-ENOMEM - Insufficient memory available.
2294 */
2295int nilfs_construct_segment(struct super_block *sb)
2296{
2297 struct the_nilfs *nilfs = sb->s_fs_info;
2298 struct nilfs_sc_info *sci = nilfs->ns_writer;
2299 struct nilfs_transaction_info *ti;
2300 int err;
2301
2302 if (sb_rdonly(sb) || unlikely(!sci))
2303 return -EROFS;
2304
2305 /* A call inside transactions causes a deadlock. */
2306 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2307
2308 err = nilfs_segctor_sync(sci);
2309 return err;
2310}
2311
2312/**
2313 * nilfs_construct_dsync_segment - construct a data-only logical segment
2314 * @sb: super block
2315 * @inode: inode whose data blocks should be written out
2316 * @start: start byte offset
2317 * @end: end byte offset (inclusive)
2318 *
2319 * Return Value: On success, 0 is retured. On errors, one of the following
2320 * negative error code is returned.
2321 *
2322 * %-EROFS - Read only filesystem.
2323 *
2324 * %-EIO - I/O error
2325 *
2326 * %-ENOSPC - No space left on device (only in a panic state).
2327 *
2328 * %-ERESTARTSYS - Interrupted.
2329 *
2330 * %-ENOMEM - Insufficient memory available.
2331 */
2332int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2333 loff_t start, loff_t end)
2334{
2335 struct the_nilfs *nilfs = sb->s_fs_info;
2336 struct nilfs_sc_info *sci = nilfs->ns_writer;
2337 struct nilfs_inode_info *ii;
2338 struct nilfs_transaction_info ti;
2339 int err = 0;
2340
2341 if (sb_rdonly(sb) || unlikely(!sci))
2342 return -EROFS;
2343
2344 nilfs_transaction_lock(sb, &ti, 0);
2345
2346 ii = NILFS_I(inode);
2347 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2348 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2349 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2350 nilfs_discontinued(nilfs)) {
2351 nilfs_transaction_unlock(sb);
2352 err = nilfs_segctor_sync(sci);
2353 return err;
2354 }
2355
2356 spin_lock(&nilfs->ns_inode_lock);
2357 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2358 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2359 spin_unlock(&nilfs->ns_inode_lock);
2360 nilfs_transaction_unlock(sb);
2361 return 0;
2362 }
2363 spin_unlock(&nilfs->ns_inode_lock);
2364 sci->sc_dsync_inode = ii;
2365 sci->sc_dsync_start = start;
2366 sci->sc_dsync_end = end;
2367
2368 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2369 if (!err)
2370 nilfs->ns_flushed_device = 0;
2371
2372 nilfs_transaction_unlock(sb);
2373 return err;
2374}
2375
2376#define FLUSH_FILE_BIT (0x1) /* data file only */
2377#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2378
2379/**
2380 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2381 * @sci: segment constructor object
2382 */
2383static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2384{
2385 bool thread_is_alive;
2386
2387 spin_lock(&sci->sc_state_lock);
2388 sci->sc_seq_accepted = sci->sc_seq_request;
2389 thread_is_alive = (bool)sci->sc_task;
2390 spin_unlock(&sci->sc_state_lock);
2391
2392 /*
2393 * This function does not race with the log writer thread's
2394 * termination. Therefore, deleting sc_timer, which should not be
2395 * done after the log writer thread exits, can be done safely outside
2396 * the area protected by sc_state_lock.
2397 */
2398 if (thread_is_alive)
2399 del_timer_sync(&sci->sc_timer);
2400}
2401
2402/**
2403 * nilfs_segctor_notify - notify the result of request to caller threads
2404 * @sci: segment constructor object
2405 * @mode: mode of log forming
2406 * @err: error code to be notified
2407 */
2408static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2409{
2410 /* Clear requests (even when the construction failed) */
2411 spin_lock(&sci->sc_state_lock);
2412
2413 if (mode == SC_LSEG_SR) {
2414 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2415 sci->sc_seq_done = sci->sc_seq_accepted;
2416 nilfs_segctor_wakeup(sci, err, false);
2417 sci->sc_flush_request = 0;
2418 } else {
2419 if (mode == SC_FLUSH_FILE)
2420 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2421 else if (mode == SC_FLUSH_DAT)
2422 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2423
2424 /* re-enable timer if checkpoint creation was not done */
2425 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
2426 time_before(jiffies, sci->sc_timer.expires))
2427 add_timer(&sci->sc_timer);
2428 }
2429 spin_unlock(&sci->sc_state_lock);
2430}
2431
2432/**
2433 * nilfs_segctor_construct - form logs and write them to disk
2434 * @sci: segment constructor object
2435 * @mode: mode of log forming
2436 */
2437static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2438{
2439 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2440 struct nilfs_super_block **sbp;
2441 int err = 0;
2442
2443 nilfs_segctor_accept(sci);
2444
2445 if (nilfs_discontinued(nilfs))
2446 mode = SC_LSEG_SR;
2447 if (!nilfs_segctor_confirm(sci))
2448 err = nilfs_segctor_do_construct(sci, mode);
2449
2450 if (likely(!err)) {
2451 if (mode != SC_FLUSH_DAT)
2452 atomic_set(&nilfs->ns_ndirtyblks, 0);
2453 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2454 nilfs_discontinued(nilfs)) {
2455 down_write(&nilfs->ns_sem);
2456 err = -EIO;
2457 sbp = nilfs_prepare_super(sci->sc_super,
2458 nilfs_sb_will_flip(nilfs));
2459 if (likely(sbp)) {
2460 nilfs_set_log_cursor(sbp[0], nilfs);
2461 err = nilfs_commit_super(sci->sc_super,
2462 NILFS_SB_COMMIT);
2463 }
2464 up_write(&nilfs->ns_sem);
2465 }
2466 }
2467
2468 nilfs_segctor_notify(sci, mode, err);
2469 return err;
2470}
2471
2472static void nilfs_construction_timeout(struct timer_list *t)
2473{
2474 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2475
2476 wake_up_process(sci->sc_timer_task);
2477}
2478
2479static void
2480nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2481{
2482 struct nilfs_inode_info *ii, *n;
2483
2484 list_for_each_entry_safe(ii, n, head, i_dirty) {
2485 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2486 continue;
2487 list_del_init(&ii->i_dirty);
2488 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2489 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2490 iput(&ii->vfs_inode);
2491 }
2492}
2493
2494int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2495 void **kbufs)
2496{
2497 struct the_nilfs *nilfs = sb->s_fs_info;
2498 struct nilfs_sc_info *sci = nilfs->ns_writer;
2499 struct nilfs_transaction_info ti;
2500 int err;
2501
2502 if (unlikely(!sci))
2503 return -EROFS;
2504
2505 nilfs_transaction_lock(sb, &ti, 1);
2506
2507 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2508 if (unlikely(err))
2509 goto out_unlock;
2510
2511 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2512 if (unlikely(err)) {
2513 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2514 goto out_unlock;
2515 }
2516
2517 sci->sc_freesegs = kbufs[4];
2518 sci->sc_nfreesegs = argv[4].v_nmembs;
2519 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2520
2521 for (;;) {
2522 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2523 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2524
2525 if (likely(!err))
2526 break;
2527
2528 nilfs_warn(sb, "error %d cleaning segments", err);
2529 set_current_state(TASK_INTERRUPTIBLE);
2530 schedule_timeout(sci->sc_interval);
2531 }
2532 if (nilfs_test_opt(nilfs, DISCARD)) {
2533 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2534 sci->sc_nfreesegs);
2535 if (ret) {
2536 nilfs_warn(sb,
2537 "error %d on discard request, turning discards off for the device",
2538 ret);
2539 nilfs_clear_opt(nilfs, DISCARD);
2540 }
2541 }
2542
2543 out_unlock:
2544 sci->sc_freesegs = NULL;
2545 sci->sc_nfreesegs = 0;
2546 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2547 nilfs_transaction_unlock(sb);
2548 return err;
2549}
2550
2551static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2552{
2553 struct nilfs_transaction_info ti;
2554
2555 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2556 nilfs_segctor_construct(sci, mode);
2557
2558 /*
2559 * Unclosed segment should be retried. We do this using sc_timer.
2560 * Timeout of sc_timer will invoke complete construction which leads
2561 * to close the current logical segment.
2562 */
2563 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2564 nilfs_segctor_start_timer(sci);
2565
2566 nilfs_transaction_unlock(sci->sc_super);
2567}
2568
2569static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2570{
2571 int mode = 0;
2572
2573 spin_lock(&sci->sc_state_lock);
2574 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2575 SC_FLUSH_DAT : SC_FLUSH_FILE;
2576 spin_unlock(&sci->sc_state_lock);
2577
2578 if (mode) {
2579 nilfs_segctor_do_construct(sci, mode);
2580
2581 spin_lock(&sci->sc_state_lock);
2582 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2583 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2584 spin_unlock(&sci->sc_state_lock);
2585 }
2586 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2587}
2588
2589static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2590{
2591 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2592 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2593 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2594 return SC_FLUSH_FILE;
2595 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2596 return SC_FLUSH_DAT;
2597 }
2598 return SC_LSEG_SR;
2599}
2600
2601/**
2602 * nilfs_segctor_thread - main loop of the segment constructor thread.
2603 * @arg: pointer to a struct nilfs_sc_info.
2604 *
2605 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2606 * to execute segment constructions.
2607 */
2608static int nilfs_segctor_thread(void *arg)
2609{
2610 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2611 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2612 int timeout = 0;
2613
2614 sci->sc_timer_task = current;
2615 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2616
2617 /* start sync. */
2618 sci->sc_task = current;
2619 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2620 nilfs_info(sci->sc_super,
2621 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2622 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2623
2624 spin_lock(&sci->sc_state_lock);
2625 loop:
2626 for (;;) {
2627 int mode;
2628
2629 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2630 goto end_thread;
2631
2632 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2633 mode = SC_LSEG_SR;
2634 else if (sci->sc_flush_request)
2635 mode = nilfs_segctor_flush_mode(sci);
2636 else
2637 break;
2638
2639 spin_unlock(&sci->sc_state_lock);
2640 nilfs_segctor_thread_construct(sci, mode);
2641 spin_lock(&sci->sc_state_lock);
2642 timeout = 0;
2643 }
2644
2645
2646 if (freezing(current)) {
2647 spin_unlock(&sci->sc_state_lock);
2648 try_to_freeze();
2649 spin_lock(&sci->sc_state_lock);
2650 } else {
2651 DEFINE_WAIT(wait);
2652 int should_sleep = 1;
2653
2654 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2655 TASK_INTERRUPTIBLE);
2656
2657 if (sci->sc_seq_request != sci->sc_seq_done)
2658 should_sleep = 0;
2659 else if (sci->sc_flush_request)
2660 should_sleep = 0;
2661 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2662 should_sleep = time_before(jiffies,
2663 sci->sc_timer.expires);
2664
2665 if (should_sleep) {
2666 spin_unlock(&sci->sc_state_lock);
2667 schedule();
2668 spin_lock(&sci->sc_state_lock);
2669 }
2670 finish_wait(&sci->sc_wait_daemon, &wait);
2671 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2672 time_after_eq(jiffies, sci->sc_timer.expires));
2673
2674 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2675 set_nilfs_discontinued(nilfs);
2676 }
2677 goto loop;
2678
2679 end_thread:
2680 /* end sync. */
2681 sci->sc_task = NULL;
2682 del_timer_sync(&sci->sc_timer);
2683 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2684 spin_unlock(&sci->sc_state_lock);
2685 return 0;
2686}
2687
2688static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2689{
2690 struct task_struct *t;
2691
2692 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2693 if (IS_ERR(t)) {
2694 int err = PTR_ERR(t);
2695
2696 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2697 err);
2698 return err;
2699 }
2700 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2701 return 0;
2702}
2703
2704static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2705 __acquires(&sci->sc_state_lock)
2706 __releases(&sci->sc_state_lock)
2707{
2708 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2709
2710 while (sci->sc_task) {
2711 wake_up(&sci->sc_wait_daemon);
2712 spin_unlock(&sci->sc_state_lock);
2713 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2714 spin_lock(&sci->sc_state_lock);
2715 }
2716}
2717
2718/*
2719 * Setup & clean-up functions
2720 */
2721static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2722 struct nilfs_root *root)
2723{
2724 struct the_nilfs *nilfs = sb->s_fs_info;
2725 struct nilfs_sc_info *sci;
2726
2727 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2728 if (!sci)
2729 return NULL;
2730
2731 sci->sc_super = sb;
2732
2733 nilfs_get_root(root);
2734 sci->sc_root = root;
2735
2736 init_waitqueue_head(&sci->sc_wait_request);
2737 init_waitqueue_head(&sci->sc_wait_daemon);
2738 init_waitqueue_head(&sci->sc_wait_task);
2739 spin_lock_init(&sci->sc_state_lock);
2740 INIT_LIST_HEAD(&sci->sc_dirty_files);
2741 INIT_LIST_HEAD(&sci->sc_segbufs);
2742 INIT_LIST_HEAD(&sci->sc_write_logs);
2743 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2744 INIT_LIST_HEAD(&sci->sc_iput_queue);
2745 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2746
2747 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2748 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2749 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2750
2751 if (nilfs->ns_interval)
2752 sci->sc_interval = HZ * nilfs->ns_interval;
2753 if (nilfs->ns_watermark)
2754 sci->sc_watermark = nilfs->ns_watermark;
2755 return sci;
2756}
2757
2758static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2759{
2760 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2761
2762 /*
2763 * The segctord thread was stopped and its timer was removed.
2764 * But some tasks remain.
2765 */
2766 do {
2767 struct nilfs_transaction_info ti;
2768
2769 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2770 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2771 nilfs_transaction_unlock(sci->sc_super);
2772
2773 flush_work(&sci->sc_iput_work);
2774
2775 } while (ret && ret != -EROFS && retrycount-- > 0);
2776}
2777
2778/**
2779 * nilfs_segctor_destroy - destroy the segment constructor.
2780 * @sci: nilfs_sc_info
2781 *
2782 * nilfs_segctor_destroy() kills the segctord thread and frees
2783 * the nilfs_sc_info struct.
2784 * Caller must hold the segment semaphore.
2785 */
2786static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2787{
2788 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2789 int flag;
2790
2791 up_write(&nilfs->ns_segctor_sem);
2792
2793 spin_lock(&sci->sc_state_lock);
2794 nilfs_segctor_kill_thread(sci);
2795 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2796 || sci->sc_seq_request != sci->sc_seq_done);
2797 spin_unlock(&sci->sc_state_lock);
2798
2799 /*
2800 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
2801 * be called from delayed iput() via nilfs_evict_inode() and can race
2802 * with the above log writer thread termination.
2803 */
2804 nilfs_segctor_wakeup(sci, 0, true);
2805
2806 if (flush_work(&sci->sc_iput_work))
2807 flag = true;
2808
2809 if (flag || !nilfs_segctor_confirm(sci))
2810 nilfs_segctor_write_out(sci);
2811
2812 if (!list_empty(&sci->sc_dirty_files)) {
2813 nilfs_warn(sci->sc_super,
2814 "disposed unprocessed dirty file(s) when stopping log writer");
2815 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2816 }
2817
2818 if (!list_empty(&sci->sc_iput_queue)) {
2819 nilfs_warn(sci->sc_super,
2820 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2821 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2822 }
2823
2824 WARN_ON(!list_empty(&sci->sc_segbufs));
2825 WARN_ON(!list_empty(&sci->sc_write_logs));
2826
2827 nilfs_put_root(sci->sc_root);
2828
2829 down_write(&nilfs->ns_segctor_sem);
2830
2831 kfree(sci);
2832}
2833
2834/**
2835 * nilfs_attach_log_writer - attach log writer
2836 * @sb: super block instance
2837 * @root: root object of the current filesystem tree
2838 *
2839 * This allocates a log writer object, initializes it, and starts the
2840 * log writer.
2841 *
2842 * Return Value: On success, 0 is returned. On error, one of the following
2843 * negative error code is returned.
2844 *
2845 * %-ENOMEM - Insufficient memory available.
2846 */
2847int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2848{
2849 struct the_nilfs *nilfs = sb->s_fs_info;
2850 int err;
2851
2852 if (nilfs->ns_writer) {
2853 /*
2854 * This happens if the filesystem is made read-only by
2855 * __nilfs_error or nilfs_remount and then remounted
2856 * read/write. In these cases, reuse the existing
2857 * writer.
2858 */
2859 return 0;
2860 }
2861
2862 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2863 if (!nilfs->ns_writer)
2864 return -ENOMEM;
2865
2866 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2867
2868 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2869 if (unlikely(err))
2870 nilfs_detach_log_writer(sb);
2871
2872 return err;
2873}
2874
2875/**
2876 * nilfs_detach_log_writer - destroy log writer
2877 * @sb: super block instance
2878 *
2879 * This kills log writer daemon, frees the log writer object, and
2880 * destroys list of dirty files.
2881 */
2882void nilfs_detach_log_writer(struct super_block *sb)
2883{
2884 struct the_nilfs *nilfs = sb->s_fs_info;
2885 LIST_HEAD(garbage_list);
2886
2887 down_write(&nilfs->ns_segctor_sem);
2888 if (nilfs->ns_writer) {
2889 nilfs_segctor_destroy(nilfs->ns_writer);
2890 nilfs->ns_writer = NULL;
2891 }
2892 set_nilfs_purging(nilfs);
2893
2894 /* Force to free the list of dirty files */
2895 spin_lock(&nilfs->ns_inode_lock);
2896 if (!list_empty(&nilfs->ns_dirty_files)) {
2897 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2898 nilfs_warn(sb,
2899 "disposed unprocessed dirty file(s) when detaching log writer");
2900 }
2901 spin_unlock(&nilfs->ns_inode_lock);
2902 up_write(&nilfs->ns_segctor_sem);
2903
2904 nilfs_dispose_list(nilfs, &garbage_list, 1);
2905 clear_nilfs_purging(nilfs);
2906}