blob: 558ba62178941c4301fd6e48517a0482c75f1653 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
14#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
24#include <trace/events/f2fs.h>
25
26static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 unsigned int wait_ms;
32
33 wait_ms = gc_th->min_sleep_time;
34
35 set_freezable();
36 do {
37 wait_event_interruptible_timeout(*wq,
38 kthread_should_stop() || freezing(current) ||
39 gc_th->gc_wake,
40 msecs_to_jiffies(wait_ms));
41
42 /* give it a try one time */
43 if (gc_th->gc_wake)
44 gc_th->gc_wake = 0;
45
46 if (try_to_freeze())
47 continue;
48 if (kthread_should_stop())
49 break;
50
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
52 increase_sleep_time(gc_th, &wait_ms);
53 continue;
54 }
55
56#ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT);
59 f2fs_stop_checkpoint(sbi, false);
60 }
61#endif
62
63 if (!sb_start_write_trylock(sbi->sb))
64 continue;
65
66 /*
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
73 *
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
78 */
79 if (gc_th->gc_urgent) {
80 wait_ms = gc_th->urgent_sleep_time;
81 mutex_lock(&sbi->gc_mutex);
82 goto do_gc;
83 }
84
85 if (!mutex_trylock(&sbi->gc_mutex))
86 goto next;
87
88 if (!is_idle(sbi)) {
89 increase_sleep_time(gc_th, &wait_ms);
90 mutex_unlock(&sbi->gc_mutex);
91 goto next;
92 }
93
94 if (has_enough_invalid_blocks(sbi))
95 decrease_sleep_time(gc_th, &wait_ms);
96 else
97 increase_sleep_time(gc_th, &wait_ms);
98do_gc:
99 stat_inc_bggc_count(sbi);
100
101 /* if return value is not zero, no victim was selected */
102 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
103 wait_ms = gc_th->no_gc_sleep_time;
104
105 trace_f2fs_background_gc(sbi->sb, wait_ms,
106 prefree_segments(sbi), free_segments(sbi));
107
108 /* balancing f2fs's metadata periodically */
109 f2fs_balance_fs_bg(sbi);
110next:
111 sb_end_write(sbi->sb);
112
113 } while (!kthread_should_stop());
114 return 0;
115}
116
117int start_gc_thread(struct f2fs_sb_info *sbi)
118{
119 struct f2fs_gc_kthread *gc_th;
120 dev_t dev = sbi->sb->s_bdev->bd_dev;
121 int err = 0;
122
123 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
124 if (!gc_th) {
125 err = -ENOMEM;
126 goto out;
127 }
128
129 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
130 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
131 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
132 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
133
134 gc_th->gc_idle = 0;
135 gc_th->gc_urgent = 0;
136 gc_th->gc_wake= 0;
137
138 sbi->gc_thread = gc_th;
139 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
140 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
141 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
142 if (IS_ERR(gc_th->f2fs_gc_task)) {
143 err = PTR_ERR(gc_th->f2fs_gc_task);
144 kfree(gc_th);
145 sbi->gc_thread = NULL;
146 }
147out:
148 return err;
149}
150
151void stop_gc_thread(struct f2fs_sb_info *sbi)
152{
153 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
154 if (!gc_th)
155 return;
156 kthread_stop(gc_th->f2fs_gc_task);
157 kfree(gc_th);
158 sbi->gc_thread = NULL;
159}
160
161static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
162{
163 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
164
165 if (!gc_th)
166 return gc_mode;
167
168 if (gc_th->gc_idle) {
169 if (gc_th->gc_idle == 1)
170 gc_mode = GC_CB;
171 else if (gc_th->gc_idle == 2)
172 gc_mode = GC_GREEDY;
173 }
174 if (gc_th->gc_urgent)
175 gc_mode = GC_GREEDY;
176 return gc_mode;
177}
178
179static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
180 int type, struct victim_sel_policy *p)
181{
182 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
183
184 if (p->alloc_mode == SSR) {
185 p->gc_mode = GC_GREEDY;
186 p->dirty_segmap = dirty_i->dirty_segmap[type];
187 p->max_search = dirty_i->nr_dirty[type];
188 p->ofs_unit = 1;
189 } else {
190 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
191 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
192 p->max_search = dirty_i->nr_dirty[DIRTY];
193 p->ofs_unit = sbi->segs_per_sec;
194 }
195
196 /* we need to check every dirty segments in the FG_GC case */
197 if (gc_type != FG_GC &&
198 (sbi->gc_thread && !sbi->gc_thread->gc_urgent) &&
199 p->max_search > sbi->max_victim_search)
200 p->max_search = sbi->max_victim_search;
201
202 /* let's select beginning hot/small space first in no_heap mode*/
203 if (test_opt(sbi, NOHEAP) &&
204 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
205 p->offset = 0;
206 else
207 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
208}
209
210static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
211 struct victim_sel_policy *p)
212{
213 /* SSR allocates in a segment unit */
214 if (p->alloc_mode == SSR)
215 return sbi->blocks_per_seg;
216 if (p->gc_mode == GC_GREEDY)
217 return 2 * sbi->blocks_per_seg * p->ofs_unit;
218 else if (p->gc_mode == GC_CB)
219 return UINT_MAX;
220 else /* No other gc_mode */
221 return 0;
222}
223
224static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
225{
226 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
227 unsigned int secno;
228
229 /*
230 * If the gc_type is FG_GC, we can select victim segments
231 * selected by background GC before.
232 * Those segments guarantee they have small valid blocks.
233 */
234 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
235 if (sec_usage_check(sbi, secno))
236 continue;
237
238 if (no_fggc_candidate(sbi, secno))
239 continue;
240
241 clear_bit(secno, dirty_i->victim_secmap);
242 return GET_SEG_FROM_SEC(sbi, secno);
243 }
244 return NULL_SEGNO;
245}
246
247static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
248{
249 struct sit_info *sit_i = SIT_I(sbi);
250 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
251 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
252 unsigned long long mtime = 0;
253 unsigned int vblocks;
254 unsigned char age = 0;
255 unsigned char u;
256 unsigned int i;
257
258 for (i = 0; i < sbi->segs_per_sec; i++)
259 mtime += get_seg_entry(sbi, start + i)->mtime;
260 vblocks = get_valid_blocks(sbi, segno, true);
261
262 mtime = div_u64(mtime, sbi->segs_per_sec);
263 vblocks = div_u64(vblocks, sbi->segs_per_sec);
264
265 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
266
267 /* Handle if the system time has changed by the user */
268 if (mtime < sit_i->min_mtime)
269 sit_i->min_mtime = mtime;
270 if (mtime > sit_i->max_mtime)
271 sit_i->max_mtime = mtime;
272 if (sit_i->max_mtime != sit_i->min_mtime)
273 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
274 sit_i->max_mtime - sit_i->min_mtime);
275
276 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
277}
278
279static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
280 unsigned int segno, struct victim_sel_policy *p)
281{
282 if (p->alloc_mode == SSR)
283 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
284
285 /* alloc_mode == LFS */
286 if (p->gc_mode == GC_GREEDY)
287 return get_valid_blocks(sbi, segno, true);
288 else
289 return get_cb_cost(sbi, segno);
290}
291
292static unsigned int count_bits(const unsigned long *addr,
293 unsigned int offset, unsigned int len)
294{
295 unsigned int end = offset + len, sum = 0;
296
297 while (offset < end) {
298 if (test_bit(offset++, addr))
299 ++sum;
300 }
301 return sum;
302}
303
304/*
305 * This function is called from two paths.
306 * One is garbage collection and the other is SSR segment selection.
307 * When it is called during GC, it just gets a victim segment
308 * and it does not remove it from dirty seglist.
309 * When it is called from SSR segment selection, it finds a segment
310 * which has minimum valid blocks and removes it from dirty seglist.
311 */
312static int get_victim_by_default(struct f2fs_sb_info *sbi,
313 unsigned int *result, int gc_type, int type, char alloc_mode)
314{
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316 struct sit_info *sm = SIT_I(sbi);
317 struct victim_sel_policy p;
318 unsigned int secno, last_victim;
319 unsigned int last_segment = MAIN_SEGS(sbi);
320 unsigned int nsearched = 0;
321
322 mutex_lock(&dirty_i->seglist_lock);
323
324 p.alloc_mode = alloc_mode;
325 select_policy(sbi, gc_type, type, &p);
326
327 p.min_segno = NULL_SEGNO;
328 p.min_cost = get_max_cost(sbi, &p);
329
330 if (*result != NULL_SEGNO) {
331 if (get_valid_blocks(sbi, *result, false) &&
332 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
333 p.min_segno = *result;
334 goto out;
335 }
336
337 if (p.max_search == 0)
338 goto out;
339
340 last_victim = sm->last_victim[p.gc_mode];
341 if (p.alloc_mode == LFS && gc_type == FG_GC) {
342 p.min_segno = check_bg_victims(sbi);
343 if (p.min_segno != NULL_SEGNO)
344 goto got_it;
345 }
346
347 while (1) {
348 unsigned long cost;
349 unsigned int segno;
350
351 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
352 if (segno >= last_segment) {
353 if (sm->last_victim[p.gc_mode]) {
354 last_segment =
355 sm->last_victim[p.gc_mode];
356 sm->last_victim[p.gc_mode] = 0;
357 p.offset = 0;
358 continue;
359 }
360 break;
361 }
362
363 p.offset = segno + p.ofs_unit;
364 if (p.ofs_unit > 1) {
365 p.offset -= segno % p.ofs_unit;
366 nsearched += count_bits(p.dirty_segmap,
367 p.offset - p.ofs_unit,
368 p.ofs_unit);
369 } else {
370 nsearched++;
371 }
372
373 secno = GET_SEC_FROM_SEG(sbi, segno);
374
375 if (sec_usage_check(sbi, secno))
376 goto next;
377 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
378 goto next;
379 if (gc_type == FG_GC && p.alloc_mode == LFS &&
380 no_fggc_candidate(sbi, secno))
381 goto next;
382
383 cost = get_gc_cost(sbi, segno, &p);
384
385 if (p.min_cost > cost) {
386 p.min_segno = segno;
387 p.min_cost = cost;
388 }
389next:
390 if (nsearched >= p.max_search) {
391 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
392 sm->last_victim[p.gc_mode] = last_victim + 1;
393 else
394 sm->last_victim[p.gc_mode] = segno + 1;
395 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
396 break;
397 }
398 }
399 if (p.min_segno != NULL_SEGNO) {
400got_it:
401 if (p.alloc_mode == LFS) {
402 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
403 if (gc_type == FG_GC)
404 sbi->cur_victim_sec = secno;
405 else
406 set_bit(secno, dirty_i->victim_secmap);
407 }
408 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
409
410 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
411 sbi->cur_victim_sec,
412 prefree_segments(sbi), free_segments(sbi));
413 }
414out:
415 mutex_unlock(&dirty_i->seglist_lock);
416
417 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
418}
419
420static const struct victim_selection default_v_ops = {
421 .get_victim = get_victim_by_default,
422};
423
424static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
425{
426 struct inode_entry *ie;
427
428 ie = radix_tree_lookup(&gc_list->iroot, ino);
429 if (ie)
430 return ie->inode;
431 return NULL;
432}
433
434static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
435{
436 struct inode_entry *new_ie;
437
438 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
439 iput(inode);
440 return;
441 }
442 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
443 new_ie->inode = inode;
444
445 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
446 list_add_tail(&new_ie->list, &gc_list->ilist);
447}
448
449static void put_gc_inode(struct gc_inode_list *gc_list)
450{
451 struct inode_entry *ie, *next_ie;
452 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
453 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
454 iput(ie->inode);
455 list_del(&ie->list);
456 kmem_cache_free(inode_entry_slab, ie);
457 }
458}
459
460static int check_valid_map(struct f2fs_sb_info *sbi,
461 unsigned int segno, int offset)
462{
463 struct sit_info *sit_i = SIT_I(sbi);
464 struct seg_entry *sentry;
465 int ret;
466
467 down_read(&sit_i->sentry_lock);
468 sentry = get_seg_entry(sbi, segno);
469 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
470 up_read(&sit_i->sentry_lock);
471 return ret;
472}
473
474/*
475 * This function compares node address got in summary with that in NAT.
476 * On validity, copy that node with cold status, otherwise (invalid node)
477 * ignore that.
478 */
479static void gc_node_segment(struct f2fs_sb_info *sbi,
480 struct f2fs_summary *sum, unsigned int segno, int gc_type)
481{
482 struct f2fs_summary *entry;
483 block_t start_addr;
484 int off;
485 int phase = 0;
486
487 start_addr = START_BLOCK(sbi, segno);
488
489next_step:
490 entry = sum;
491
492 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
493 nid_t nid = le32_to_cpu(entry->nid);
494 struct page *node_page;
495 struct node_info ni;
496
497 /* stop BG_GC if there is not enough free sections. */
498 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
499 return;
500
501 if (check_valid_map(sbi, segno, off) == 0)
502 continue;
503
504 if (phase == 0) {
505 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
506 META_NAT, true);
507 continue;
508 }
509
510 if (phase == 1) {
511 ra_node_page(sbi, nid);
512 continue;
513 }
514
515 /* phase == 2 */
516 node_page = get_node_page(sbi, nid);
517 if (IS_ERR(node_page))
518 continue;
519
520 /* block may become invalid during get_node_page */
521 if (check_valid_map(sbi, segno, off) == 0) {
522 f2fs_put_page(node_page, 1);
523 continue;
524 }
525
526 get_node_info(sbi, nid, &ni);
527 if (ni.blk_addr != start_addr + off) {
528 f2fs_put_page(node_page, 1);
529 continue;
530 }
531
532 move_node_page(node_page, gc_type);
533 stat_inc_node_blk_count(sbi, 1, gc_type);
534 }
535
536 if (++phase < 3)
537 goto next_step;
538}
539
540/*
541 * Calculate start block index indicating the given node offset.
542 * Be careful, caller should give this node offset only indicating direct node
543 * blocks. If any node offsets, which point the other types of node blocks such
544 * as indirect or double indirect node blocks, are given, it must be a caller's
545 * bug.
546 */
547block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
548{
549 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
550 unsigned int bidx;
551
552 if (node_ofs == 0)
553 return 0;
554
555 if (node_ofs <= 2) {
556 bidx = node_ofs - 1;
557 } else if (node_ofs <= indirect_blks) {
558 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
559 bidx = node_ofs - 2 - dec;
560 } else {
561 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
562 bidx = node_ofs - 5 - dec;
563 }
564 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
565}
566
567static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
568 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
569{
570 struct page *node_page;
571 nid_t nid;
572 unsigned int ofs_in_node;
573 block_t source_blkaddr;
574
575 nid = le32_to_cpu(sum->nid);
576 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
577
578 node_page = get_node_page(sbi, nid);
579 if (IS_ERR(node_page))
580 return false;
581
582 get_node_info(sbi, nid, dni);
583
584 if (sum->version != dni->version) {
585 f2fs_msg(sbi->sb, KERN_WARNING,
586 "%s: valid data with mismatched node version.",
587 __func__);
588 set_sbi_flag(sbi, SBI_NEED_FSCK);
589 }
590
591 *nofs = ofs_of_node(node_page);
592 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
593 f2fs_put_page(node_page, 1);
594
595 if (source_blkaddr != blkaddr)
596 return false;
597 return true;
598}
599
600/*
601 * Move data block via META_MAPPING while keeping locked data page.
602 * This can be used to move blocks, aka LBAs, directly on disk.
603 */
604static void move_data_block(struct inode *inode, block_t bidx,
605 unsigned int segno, int off)
606{
607 struct f2fs_io_info fio = {
608 .sbi = F2FS_I_SB(inode),
609 .ino = inode->i_ino,
610 .type = DATA,
611 .temp = COLD,
612 .op = REQ_OP_READ,
613 .op_flags = 0,
614 .encrypted_page = NULL,
615 .in_list = false,
616 };
617 struct dnode_of_data dn;
618 struct f2fs_summary sum;
619 struct node_info ni;
620 struct page *page;
621 block_t newaddr;
622 int err;
623
624 /* do not read out */
625 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
626 if (!page)
627 return;
628
629 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
630 goto out;
631
632 if (f2fs_is_atomic_file(inode))
633 goto out;
634
635 if (f2fs_is_pinned_file(inode)) {
636 f2fs_pin_file_control(inode, true);
637 goto out;
638 }
639
640 set_new_dnode(&dn, inode, NULL, NULL, 0);
641 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
642 if (err)
643 goto out;
644
645 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
646 ClearPageUptodate(page);
647 goto put_out;
648 }
649
650 /*
651 * don't cache encrypted data into meta inode until previous dirty
652 * data were writebacked to avoid racing between GC and flush.
653 */
654 f2fs_wait_on_page_writeback(page, DATA, true);
655
656 get_node_info(fio.sbi, dn.nid, &ni);
657 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
658
659 /* read page */
660 fio.page = page;
661 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
662
663 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
664 &sum, CURSEG_COLD_DATA, NULL, false);
665
666 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
667 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
668 if (!fio.encrypted_page) {
669 err = -ENOMEM;
670 goto recover_block;
671 }
672
673 err = f2fs_submit_page_bio(&fio);
674 if (err)
675 goto put_page_out;
676
677 /* write page */
678 lock_page(fio.encrypted_page);
679
680 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
681 err = -EIO;
682 goto put_page_out;
683 }
684 if (unlikely(!PageUptodate(fio.encrypted_page))) {
685 err = -EIO;
686 goto put_page_out;
687 }
688
689 set_page_dirty(fio.encrypted_page);
690 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
691 if (clear_page_dirty_for_io(fio.encrypted_page))
692 dec_page_count(fio.sbi, F2FS_DIRTY_META);
693
694 set_page_writeback(fio.encrypted_page);
695 ClearPageError(page);
696
697 /* allocate block address */
698 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
699
700 fio.op = REQ_OP_WRITE;
701 fio.op_flags = REQ_SYNC;
702 fio.new_blkaddr = newaddr;
703 err = f2fs_submit_page_write(&fio);
704 if (err) {
705 if (PageWriteback(fio.encrypted_page))
706 end_page_writeback(fio.encrypted_page);
707 goto put_page_out;
708 }
709
710 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
711
712 f2fs_update_data_blkaddr(&dn, newaddr);
713 set_inode_flag(inode, FI_APPEND_WRITE);
714 if (page->index == 0)
715 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
716put_page_out:
717 f2fs_put_page(fio.encrypted_page, 1);
718recover_block:
719 if (err)
720 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
721 true, true);
722put_out:
723 f2fs_put_dnode(&dn);
724out:
725 f2fs_put_page(page, 1);
726}
727
728static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
729 unsigned int segno, int off)
730{
731 struct page *page;
732
733 page = get_lock_data_page(inode, bidx, true);
734 if (IS_ERR(page))
735 return;
736
737 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
738 goto out;
739
740 if (f2fs_is_atomic_file(inode))
741 goto out;
742 if (f2fs_is_pinned_file(inode)) {
743 if (gc_type == FG_GC)
744 f2fs_pin_file_control(inode, true);
745 goto out;
746 }
747
748 if (gc_type == BG_GC) {
749 if (PageWriteback(page))
750 goto out;
751 set_page_dirty(page);
752 set_cold_data(page);
753 } else {
754 struct f2fs_io_info fio = {
755 .sbi = F2FS_I_SB(inode),
756 .ino = inode->i_ino,
757 .type = DATA,
758 .temp = COLD,
759 .op = REQ_OP_WRITE,
760 .op_flags = REQ_SYNC,
761 .old_blkaddr = NULL_ADDR,
762 .page = page,
763 .encrypted_page = NULL,
764 .need_lock = LOCK_REQ,
765 .io_type = FS_GC_DATA_IO,
766 };
767 bool is_dirty = PageDirty(page);
768 int err;
769
770retry:
771 set_page_dirty(page);
772 f2fs_wait_on_page_writeback(page, DATA, true);
773 if (clear_page_dirty_for_io(page)) {
774 inode_dec_dirty_pages(inode);
775 remove_dirty_inode(inode);
776 }
777
778 set_cold_data(page);
779
780 err = do_write_data_page(&fio);
781 if (err) {
782 clear_cold_data(page);
783 if (err == -ENOMEM) {
784 congestion_wait(BLK_RW_ASYNC, HZ/50);
785 goto retry;
786 }
787 if (is_dirty)
788 set_page_dirty(page);
789 }
790 }
791out:
792 f2fs_put_page(page, 1);
793}
794
795/*
796 * This function tries to get parent node of victim data block, and identifies
797 * data block validity. If the block is valid, copy that with cold status and
798 * modify parent node.
799 * If the parent node is not valid or the data block address is different,
800 * the victim data block is ignored.
801 */
802static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
803 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
804{
805 struct super_block *sb = sbi->sb;
806 struct f2fs_summary *entry;
807 block_t start_addr;
808 int off;
809 int phase = 0;
810
811 start_addr = START_BLOCK(sbi, segno);
812
813next_step:
814 entry = sum;
815
816 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
817 struct page *data_page;
818 struct inode *inode;
819 struct node_info dni; /* dnode info for the data */
820 unsigned int ofs_in_node, nofs;
821 block_t start_bidx;
822 nid_t nid = le32_to_cpu(entry->nid);
823
824 /* stop BG_GC if there is not enough free sections. */
825 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
826 return;
827
828 if (check_valid_map(sbi, segno, off) == 0)
829 continue;
830
831 if (phase == 0) {
832 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
833 META_NAT, true);
834 continue;
835 }
836
837 if (phase == 1) {
838 ra_node_page(sbi, nid);
839 continue;
840 }
841
842 /* Get an inode by ino with checking validity */
843 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
844 continue;
845
846 if (phase == 2) {
847 ra_node_page(sbi, dni.ino);
848 continue;
849 }
850
851 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
852
853 if (phase == 3) {
854 inode = f2fs_iget(sb, dni.ino);
855 if (IS_ERR(inode) || is_bad_inode(inode))
856 continue;
857
858 /* if inode uses special I/O path, let's go phase 3 */
859 if (f2fs_post_read_required(inode)) {
860 add_gc_inode(gc_list, inode);
861 continue;
862 }
863
864 if (!down_write_trylock(
865 &F2FS_I(inode)->dio_rwsem[WRITE])) {
866 iput(inode);
867 continue;
868 }
869
870 start_bidx = start_bidx_of_node(nofs, inode);
871 data_page = get_read_data_page(inode,
872 start_bidx + ofs_in_node, REQ_RAHEAD,
873 true);
874 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
875 if (IS_ERR(data_page)) {
876 iput(inode);
877 continue;
878 }
879
880 f2fs_put_page(data_page, 0);
881 add_gc_inode(gc_list, inode);
882 continue;
883 }
884
885 /* phase 4 */
886 inode = find_gc_inode(gc_list, dni.ino);
887 if (inode) {
888 struct f2fs_inode_info *fi = F2FS_I(inode);
889 bool locked = false;
890
891 if (S_ISREG(inode->i_mode)) {
892 if (!down_write_trylock(&fi->dio_rwsem[READ]))
893 continue;
894 if (!down_write_trylock(
895 &fi->dio_rwsem[WRITE])) {
896 up_write(&fi->dio_rwsem[READ]);
897 continue;
898 }
899 locked = true;
900
901 /* wait for all inflight aio data */
902 inode_dio_wait(inode);
903 }
904
905 start_bidx = start_bidx_of_node(nofs, inode)
906 + ofs_in_node;
907 if (f2fs_post_read_required(inode))
908 move_data_block(inode, start_bidx, segno, off);
909 else
910 move_data_page(inode, start_bidx, gc_type,
911 segno, off);
912
913 if (locked) {
914 up_write(&fi->dio_rwsem[WRITE]);
915 up_write(&fi->dio_rwsem[READ]);
916 }
917
918 stat_inc_data_blk_count(sbi, 1, gc_type);
919 }
920 }
921
922 if (++phase < 5)
923 goto next_step;
924}
925
926static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
927 int gc_type)
928{
929 struct sit_info *sit_i = SIT_I(sbi);
930 int ret;
931
932 down_write(&sit_i->sentry_lock);
933 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
934 NO_CHECK_TYPE, LFS);
935 up_write(&sit_i->sentry_lock);
936 return ret;
937}
938
939static int do_garbage_collect(struct f2fs_sb_info *sbi,
940 unsigned int start_segno,
941 struct gc_inode_list *gc_list, int gc_type)
942{
943 struct page *sum_page;
944 struct f2fs_summary_block *sum;
945 struct blk_plug plug;
946 unsigned int segno = start_segno;
947 unsigned int end_segno = start_segno + sbi->segs_per_sec;
948 int seg_freed = 0;
949 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
950 SUM_TYPE_DATA : SUM_TYPE_NODE;
951
952 /* readahead multi ssa blocks those have contiguous address */
953 if (sbi->segs_per_sec > 1)
954 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
955 sbi->segs_per_sec, META_SSA, true);
956
957 /* reference all summary page */
958 while (segno < end_segno) {
959 sum_page = get_sum_page(sbi, segno++);
960 unlock_page(sum_page);
961 }
962
963 blk_start_plug(&plug);
964
965 for (segno = start_segno; segno < end_segno; segno++) {
966
967 /* find segment summary of victim */
968 sum_page = find_get_page(META_MAPPING(sbi),
969 GET_SUM_BLOCK(sbi, segno));
970 f2fs_put_page(sum_page, 0);
971
972 if (get_valid_blocks(sbi, segno, false) == 0)
973 goto freed;
974 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
975 goto next;
976
977 sum = page_address(sum_page);
978 if (type != GET_SUM_TYPE((&sum->footer))) {
979 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
980 "type [%d, %d] in SSA and SIT",
981 segno, type, GET_SUM_TYPE((&sum->footer)));
982 set_sbi_flag(sbi, SBI_NEED_FSCK);
983 goto next;
984 }
985
986 /*
987 * this is to avoid deadlock:
988 * - lock_page(sum_page) - f2fs_replace_block
989 * - check_valid_map() - down_write(sentry_lock)
990 * - down_read(sentry_lock) - change_curseg()
991 * - lock_page(sum_page)
992 */
993 if (type == SUM_TYPE_NODE)
994 gc_node_segment(sbi, sum->entries, segno, gc_type);
995 else
996 gc_data_segment(sbi, sum->entries, gc_list, segno,
997 gc_type);
998
999 stat_inc_seg_count(sbi, type, gc_type);
1000
1001freed:
1002 if (gc_type == FG_GC &&
1003 get_valid_blocks(sbi, segno, false) == 0)
1004 seg_freed++;
1005next:
1006 f2fs_put_page(sum_page, 0);
1007 }
1008
1009 if (gc_type == FG_GC)
1010 f2fs_submit_merged_write(sbi,
1011 (type == SUM_TYPE_NODE) ? NODE : DATA);
1012
1013 blk_finish_plug(&plug);
1014
1015 stat_inc_call_count(sbi->stat_info);
1016
1017 return seg_freed;
1018}
1019
1020int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1021 bool background, unsigned int segno)
1022{
1023 int gc_type = sync ? FG_GC : BG_GC;
1024 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1025 int ret = 0;
1026 struct cp_control cpc;
1027 unsigned int init_segno = segno;
1028 struct gc_inode_list gc_list = {
1029 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1030 .iroot = RADIX_TREE_INIT(GFP_NOFS),
1031 };
1032
1033 trace_f2fs_gc_begin(sbi->sb, sync, background,
1034 get_pages(sbi, F2FS_DIRTY_NODES),
1035 get_pages(sbi, F2FS_DIRTY_DENTS),
1036 get_pages(sbi, F2FS_DIRTY_IMETA),
1037 free_sections(sbi),
1038 free_segments(sbi),
1039 reserved_segments(sbi),
1040 prefree_segments(sbi));
1041
1042 cpc.reason = __get_cp_reason(sbi);
1043gc_more:
1044 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
1045 ret = -EINVAL;
1046 goto stop;
1047 }
1048 if (unlikely(f2fs_cp_error(sbi))) {
1049 ret = -EIO;
1050 goto stop;
1051 }
1052
1053 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1054 /*
1055 * For example, if there are many prefree_segments below given
1056 * threshold, we can make them free by checkpoint. Then, we
1057 * secure free segments which doesn't need fggc any more.
1058 */
1059 if (prefree_segments(sbi)) {
1060 ret = write_checkpoint(sbi, &cpc);
1061 if (ret)
1062 goto stop;
1063 }
1064 if (has_not_enough_free_secs(sbi, 0, 0))
1065 gc_type = FG_GC;
1066 }
1067
1068 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1069 if (gc_type == BG_GC && !background) {
1070 ret = -EINVAL;
1071 goto stop;
1072 }
1073 if (!__get_victim(sbi, &segno, gc_type)) {
1074 ret = -ENODATA;
1075 goto stop;
1076 }
1077
1078 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1079 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1080 sec_freed++;
1081 total_freed += seg_freed;
1082
1083 if (gc_type == FG_GC)
1084 sbi->cur_victim_sec = NULL_SEGNO;
1085
1086 if (!sync) {
1087 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1088 segno = NULL_SEGNO;
1089 goto gc_more;
1090 }
1091
1092 if (gc_type == FG_GC)
1093 ret = write_checkpoint(sbi, &cpc);
1094 }
1095stop:
1096 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1097 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1098
1099 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1100 get_pages(sbi, F2FS_DIRTY_NODES),
1101 get_pages(sbi, F2FS_DIRTY_DENTS),
1102 get_pages(sbi, F2FS_DIRTY_IMETA),
1103 free_sections(sbi),
1104 free_segments(sbi),
1105 reserved_segments(sbi),
1106 prefree_segments(sbi));
1107
1108 mutex_unlock(&sbi->gc_mutex);
1109
1110 put_gc_inode(&gc_list);
1111
1112 if (sync && !ret)
1113 ret = sec_freed ? 0 : -EAGAIN;
1114 return ret;
1115}
1116
1117void build_gc_manager(struct f2fs_sb_info *sbi)
1118{
1119 u64 main_count, resv_count, ovp_count;
1120
1121 DIRTY_I(sbi)->v_ops = &default_v_ops;
1122
1123 /* threshold of # of valid blocks in a section for victims of FG_GC */
1124 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1125 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1126 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1127
1128 sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
1129 BLKS_PER_SEC(sbi), (main_count - resv_count));
1130 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1131
1132 /* give warm/cold data area from slower device */
1133 if (f2fs_is_multi_device(sbi) && sbi->segs_per_sec == 1)
1134 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1135 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1136}