blob: ff7eab5b70d5912f50788a01648f9f389ce34365 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * fs/f2fs/inode.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/backing-dev.h>
15#include <linux/writeback.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include "segment.h"
20
21#include <trace/events/f2fs.h>
22
23void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
24{
25 if (is_inode_flag_set(inode, FI_NEW_INODE))
26 return;
27
28 if (f2fs_inode_dirtied(inode, sync))
29 return;
30
31 mark_inode_dirty_sync(inode);
32}
33
34void f2fs_set_inode_flags(struct inode *inode)
35{
36 unsigned int flags = F2FS_I(inode)->i_flags;
37 unsigned int new_fl = 0;
38
39 if (flags & FS_SYNC_FL)
40 new_fl |= S_SYNC;
41 if (flags & FS_APPEND_FL)
42 new_fl |= S_APPEND;
43 if (flags & FS_IMMUTABLE_FL)
44 new_fl |= S_IMMUTABLE;
45 if (flags & FS_NOATIME_FL)
46 new_fl |= S_NOATIME;
47 if (flags & FS_DIRSYNC_FL)
48 new_fl |= S_DIRSYNC;
49 if (f2fs_encrypted_inode(inode))
50 new_fl |= S_ENCRYPTED;
51 inode_set_flags(inode, new_fl,
52 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
53 S_ENCRYPTED);
54}
55
56static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
57{
58 int extra_size = get_extra_isize(inode);
59
60 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
61 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
62 if (ri->i_addr[extra_size])
63 inode->i_rdev = old_decode_dev(
64 le32_to_cpu(ri->i_addr[extra_size]));
65 else
66 inode->i_rdev = new_decode_dev(
67 le32_to_cpu(ri->i_addr[extra_size + 1]));
68 }
69}
70
71static int __written_first_block(struct f2fs_sb_info *sbi,
72 struct f2fs_inode *ri)
73{
74 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
75
76 if (!__is_valid_data_blkaddr(addr))
77 return 1;
78 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
79 return -EFSCORRUPTED;
80 return 0;
81}
82
83static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
84{
85 int extra_size = get_extra_isize(inode);
86
87 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
88 if (old_valid_dev(inode->i_rdev)) {
89 ri->i_addr[extra_size] =
90 cpu_to_le32(old_encode_dev(inode->i_rdev));
91 ri->i_addr[extra_size + 1] = 0;
92 } else {
93 ri->i_addr[extra_size] = 0;
94 ri->i_addr[extra_size + 1] =
95 cpu_to_le32(new_encode_dev(inode->i_rdev));
96 ri->i_addr[extra_size + 2] = 0;
97 }
98 }
99}
100
101static void __recover_inline_status(struct inode *inode, struct page *ipage)
102{
103 void *inline_data = inline_data_addr(inode, ipage);
104 __le32 *start = inline_data;
105 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
106
107 while (start < end) {
108 if (*start++) {
109 f2fs_wait_on_page_writeback(ipage, NODE, true);
110
111 set_inode_flag(inode, FI_DATA_EXIST);
112 set_raw_inline(inode, F2FS_INODE(ipage));
113 set_page_dirty(ipage);
114 return;
115 }
116 }
117 return;
118}
119
120static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
121{
122 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
123 int extra_isize = le32_to_cpu(ri->i_extra_isize);
124
125 if (!f2fs_sb_has_inode_chksum(sbi->sb))
126 return false;
127
128 if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
129 return false;
130
131 if (!F2FS_FITS_IN_INODE(ri, extra_isize, i_inode_checksum))
132 return false;
133
134 return true;
135}
136
137static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
138{
139 struct f2fs_node *node = F2FS_NODE(page);
140 struct f2fs_inode *ri = &node->i;
141 __le32 ino = node->footer.ino;
142 __le32 gen = ri->i_generation;
143 __u32 chksum, chksum_seed;
144 __u32 dummy_cs = 0;
145 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
146 unsigned int cs_size = sizeof(dummy_cs);
147
148 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
149 sizeof(ino));
150 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
151
152 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
153 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
154 offset += cs_size;
155 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
156 F2FS_BLKSIZE - offset);
157 return chksum;
158}
159
160bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
161{
162 struct f2fs_inode *ri;
163 __u32 provided, calculated;
164
165 if (!f2fs_enable_inode_chksum(sbi, page) ||
166 PageDirty(page) || PageWriteback(page))
167 return true;
168
169 ri = &F2FS_NODE(page)->i;
170 provided = le32_to_cpu(ri->i_inode_checksum);
171 calculated = f2fs_inode_chksum(sbi, page);
172
173 if (provided != calculated)
174 f2fs_msg(sbi->sb, KERN_WARNING,
175 "checksum invalid, ino = %x, %x vs. %x",
176 ino_of_node(page), provided, calculated);
177
178 return provided == calculated;
179}
180
181void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
182{
183 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
184
185 if (!f2fs_enable_inode_chksum(sbi, page))
186 return;
187
188 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
189}
190
191static bool sanity_check_inode(struct inode *inode, struct page *node_page)
192{
193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
194 struct f2fs_inode_info *fi = F2FS_I(inode);
195 unsigned long long iblocks;
196
197 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
198 if (!iblocks) {
199 set_sbi_flag(sbi, SBI_NEED_FSCK);
200 f2fs_msg(sbi->sb, KERN_WARNING,
201 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
202 "run fsck to fix.",
203 __func__, inode->i_ino, iblocks);
204 return false;
205 }
206
207 if (ino_of_node(node_page) != nid_of_node(node_page)) {
208 set_sbi_flag(sbi, SBI_NEED_FSCK);
209 f2fs_msg(sbi->sb, KERN_WARNING,
210 "%s: corrupted inode footer i_ino=%lx, ino,nid: "
211 "[%u, %u] run fsck to fix.",
212 __func__, inode->i_ino,
213 ino_of_node(node_page), nid_of_node(node_page));
214 return false;
215 }
216
217 if (f2fs_has_extra_attr(inode) &&
218 !f2fs_sb_has_extra_attr(sbi->sb)) {
219 set_sbi_flag(sbi, SBI_NEED_FSCK);
220 f2fs_msg(sbi->sb, KERN_WARNING,
221 "%s: inode (ino=%lx) is with extra_attr, "
222 "but extra_attr feature is off",
223 __func__, inode->i_ino);
224 return false;
225 }
226
227 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
228 fi->i_extra_isize % sizeof(__le32)) {
229 set_sbi_flag(sbi, SBI_NEED_FSCK);
230 f2fs_msg(sbi->sb, KERN_WARNING,
231 "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
232 "max: %zu",
233 __func__, inode->i_ino, fi->i_extra_isize,
234 F2FS_TOTAL_EXTRA_ATTR_SIZE);
235 return false;
236 }
237
238 if (F2FS_I(inode)->extent_tree) {
239 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
240
241 if (ei->len &&
242 (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
243 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
244 DATA_GENERIC))) {
245 set_sbi_flag(sbi, SBI_NEED_FSCK);
246 f2fs_msg(sbi->sb, KERN_WARNING,
247 "%s: inode (ino=%lx) extent info [%u, %u, %u] "
248 "is incorrect, run fsck to fix",
249 __func__, inode->i_ino,
250 ei->blk, ei->fofs, ei->len);
251 return false;
252 }
253 }
254 return true;
255}
256
257static int do_read_inode(struct inode *inode)
258{
259 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
260 struct f2fs_inode_info *fi = F2FS_I(inode);
261 struct page *node_page;
262 struct f2fs_inode *ri;
263 projid_t i_projid;
264 int err;
265
266 /* Check if ino is within scope */
267 if (check_nid_range(sbi, inode->i_ino))
268 return -EINVAL;
269
270 node_page = get_node_page(sbi, inode->i_ino);
271 if (IS_ERR(node_page))
272 return PTR_ERR(node_page);
273
274 ri = F2FS_INODE(node_page);
275
276 inode->i_mode = le16_to_cpu(ri->i_mode);
277 i_uid_write(inode, le32_to_cpu(ri->i_uid));
278 i_gid_write(inode, le32_to_cpu(ri->i_gid));
279 set_nlink(inode, le32_to_cpu(ri->i_links));
280 inode->i_size = le64_to_cpu(ri->i_size);
281 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
282
283 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
284 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
285 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
286 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
287 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
288 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
289 inode->i_generation = le32_to_cpu(ri->i_generation);
290
291 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
292 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
293 fi->i_flags = le32_to_cpu(ri->i_flags);
294 fi->flags = 0;
295 fi->i_advise = ri->i_advise;
296 fi->i_pino = le32_to_cpu(ri->i_pino);
297 fi->i_dir_level = ri->i_dir_level;
298
299 if (f2fs_init_extent_tree(inode, &ri->i_ext))
300 set_page_dirty(node_page);
301
302 get_inline_info(inode, ri);
303
304 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
305 le16_to_cpu(ri->i_extra_isize) : 0;
306
307 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
308 f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
309 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
310 } else if (f2fs_has_inline_xattr(inode) ||
311 f2fs_has_inline_dentry(inode)) {
312 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
313 } else {
314
315 /*
316 * Previous inline data or directory always reserved 200 bytes
317 * in inode layout, even if inline_xattr is disabled. In order
318 * to keep inline_dentry's structure for backward compatibility,
319 * we get the space back only from inline_data.
320 */
321 fi->i_inline_xattr_size = 0;
322 }
323
324 if (!sanity_check_inode(inode, node_page)) {
325 f2fs_put_page(node_page, 1);
326 return -EFSCORRUPTED;
327 }
328
329 /* check data exist */
330 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
331 __recover_inline_status(inode, node_page);
332
333 /* get rdev by using inline_info */
334 __get_inode_rdev(inode, ri);
335
336 err = __written_first_block(sbi, ri);
337 if (err < 0) {
338 f2fs_put_page(node_page, 1);
339 return err;
340 }
341 if (!err)
342 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
343
344 if (!need_inode_block_update(sbi, inode->i_ino))
345 fi->last_disk_size = inode->i_size;
346
347 if (fi->i_flags & FS_PROJINHERIT_FL)
348 set_inode_flag(inode, FI_PROJ_INHERIT);
349
350 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
351 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
352 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
353 else
354 i_projid = F2FS_DEF_PROJID;
355 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
356
357 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
358 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
359 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
360 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
361 }
362
363 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
364 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
365 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
366 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
367 f2fs_put_page(node_page, 1);
368
369 stat_inc_inline_xattr(inode);
370 stat_inc_inline_inode(inode);
371 stat_inc_inline_dir(inode);
372
373 return 0;
374}
375
376struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
377{
378 struct f2fs_sb_info *sbi = F2FS_SB(sb);
379 struct inode *inode;
380 int ret = 0;
381
382 inode = iget_locked(sb, ino);
383 if (!inode)
384 return ERR_PTR(-ENOMEM);
385
386 if (!(inode->i_state & I_NEW)) {
387 trace_f2fs_iget(inode);
388 return inode;
389 }
390 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
391 goto make_now;
392
393 ret = do_read_inode(inode);
394 if (ret)
395 goto bad_inode;
396make_now:
397 if (ino == F2FS_NODE_INO(sbi)) {
398 inode->i_mapping->a_ops = &f2fs_node_aops;
399 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
400 } else if (ino == F2FS_META_INO(sbi)) {
401 inode->i_mapping->a_ops = &f2fs_meta_aops;
402 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
403 } else if (S_ISREG(inode->i_mode)) {
404 inode->i_op = &f2fs_file_inode_operations;
405 inode->i_fop = &f2fs_file_operations;
406 inode->i_mapping->a_ops = &f2fs_dblock_aops;
407 } else if (S_ISDIR(inode->i_mode)) {
408 inode->i_op = &f2fs_dir_inode_operations;
409 inode->i_fop = &f2fs_dir_operations;
410 inode->i_mapping->a_ops = &f2fs_dblock_aops;
411 inode_nohighmem(inode);
412 } else if (S_ISLNK(inode->i_mode)) {
413 if (f2fs_encrypted_inode(inode))
414 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
415 else
416 inode->i_op = &f2fs_symlink_inode_operations;
417 inode_nohighmem(inode);
418 inode->i_mapping->a_ops = &f2fs_dblock_aops;
419 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
420 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
421 inode->i_op = &f2fs_special_inode_operations;
422 init_special_inode(inode, inode->i_mode, inode->i_rdev);
423 } else {
424 ret = -EIO;
425 goto bad_inode;
426 }
427 f2fs_set_inode_flags(inode);
428 unlock_new_inode(inode);
429 trace_f2fs_iget(inode);
430 return inode;
431
432bad_inode:
433 f2fs_inode_synced(inode);
434 iget_failed(inode);
435 trace_f2fs_iget_exit(inode, ret);
436 return ERR_PTR(ret);
437}
438
439struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
440{
441 struct inode *inode;
442retry:
443 inode = f2fs_iget(sb, ino);
444 if (IS_ERR(inode)) {
445 if (PTR_ERR(inode) == -ENOMEM) {
446 congestion_wait(BLK_RW_ASYNC, HZ/50);
447 goto retry;
448 }
449 }
450 return inode;
451}
452
453void update_inode(struct inode *inode, struct page *node_page)
454{
455 struct f2fs_inode *ri;
456 struct extent_tree *et = F2FS_I(inode)->extent_tree;
457
458 f2fs_wait_on_page_writeback(node_page, NODE, true);
459 set_page_dirty(node_page);
460
461 f2fs_inode_synced(inode);
462
463 ri = F2FS_INODE(node_page);
464
465 ri->i_mode = cpu_to_le16(inode->i_mode);
466 ri->i_advise = F2FS_I(inode)->i_advise;
467 ri->i_uid = cpu_to_le32(i_uid_read(inode));
468 ri->i_gid = cpu_to_le32(i_gid_read(inode));
469 ri->i_links = cpu_to_le32(inode->i_nlink);
470 ri->i_size = cpu_to_le64(i_size_read(inode));
471 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
472
473 if (et) {
474 read_lock(&et->lock);
475 set_raw_extent(&et->largest, &ri->i_ext);
476 read_unlock(&et->lock);
477 } else {
478 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
479 }
480 set_raw_inline(inode, ri);
481
482 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
483 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
484 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
485 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
486 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
487 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
488 ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
489 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
490 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
491 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
492 ri->i_generation = cpu_to_le32(inode->i_generation);
493 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
494
495 if (f2fs_has_extra_attr(inode)) {
496 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
497
498 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
499 ri->i_inline_xattr_size =
500 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
501
502 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
503 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
504 i_projid)) {
505 projid_t i_projid;
506
507 i_projid = from_kprojid(&init_user_ns,
508 F2FS_I(inode)->i_projid);
509 ri->i_projid = cpu_to_le32(i_projid);
510 }
511
512 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
513 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
514 i_crtime)) {
515 ri->i_crtime =
516 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
517 ri->i_crtime_nsec =
518 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
519 }
520 }
521
522 __set_inode_rdev(inode, ri);
523
524 /* deleted inode */
525 if (inode->i_nlink == 0)
526 clear_inline_node(node_page);
527
528 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
529 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
530 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
531 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
532}
533
534void update_inode_page(struct inode *inode)
535{
536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
537 struct page *node_page;
538retry:
539 node_page = get_node_page(sbi, inode->i_ino);
540 if (IS_ERR(node_page)) {
541 int err = PTR_ERR(node_page);
542 if (err == -ENOMEM) {
543 cond_resched();
544 goto retry;
545 } else if (err != -ENOENT) {
546 f2fs_stop_checkpoint(sbi, false);
547 }
548 return;
549 }
550 update_inode(inode, node_page);
551 f2fs_put_page(node_page, 1);
552}
553
554int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
555{
556 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
557
558 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
559 inode->i_ino == F2FS_META_INO(sbi))
560 return 0;
561
562 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
563 return 0;
564
565 /*
566 * We need to balance fs here to prevent from producing dirty node pages
567 * during the urgent cleaning time when runing out of free sections.
568 */
569 update_inode_page(inode);
570 if (wbc && wbc->nr_to_write)
571 f2fs_balance_fs(sbi, true);
572 return 0;
573}
574
575/*
576 * Called at the last iput() if i_nlink is zero
577 */
578void f2fs_evict_inode(struct inode *inode)
579{
580 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
581 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
582 int err = 0;
583
584 /* some remained atomic pages should discarded */
585 if (f2fs_is_atomic_file(inode))
586 drop_inmem_pages(inode);
587
588 trace_f2fs_evict_inode(inode);
589 truncate_inode_pages_final(&inode->i_data);
590
591 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
592 inode->i_ino == F2FS_META_INO(sbi))
593 goto out_clear;
594
595 f2fs_bug_on(sbi, get_dirty_pages(inode));
596 remove_dirty_inode(inode);
597
598 f2fs_destroy_extent_tree(inode);
599
600 if (inode->i_nlink || is_bad_inode(inode))
601 goto no_delete;
602
603 dquot_initialize(inode);
604
605 remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
606 remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
607 remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
608
609 sb_start_intwrite(inode->i_sb);
610 set_inode_flag(inode, FI_NO_ALLOC);
611 i_size_write(inode, 0);
612retry:
613 if (F2FS_HAS_BLOCKS(inode))
614 err = f2fs_truncate(inode);
615
616#ifdef CONFIG_F2FS_FAULT_INJECTION
617 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
618 f2fs_show_injection_info(FAULT_EVICT_INODE);
619 err = -EIO;
620 }
621#endif
622 if (!err) {
623 f2fs_lock_op(sbi);
624 err = remove_inode_page(inode);
625 f2fs_unlock_op(sbi);
626 if (err == -ENOENT)
627 err = 0;
628 }
629
630 /* give more chances, if ENOMEM case */
631 if (err == -ENOMEM) {
632 err = 0;
633 goto retry;
634 }
635
636 if (err)
637 update_inode_page(inode);
638 dquot_free_inode(inode);
639 sb_end_intwrite(inode->i_sb);
640no_delete:
641 dquot_drop(inode);
642
643 stat_dec_inline_xattr(inode);
644 stat_dec_inline_dir(inode);
645 stat_dec_inline_inode(inode);
646
647 if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
648 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
649 else
650 f2fs_inode_synced(inode);
651
652 /* ino == 0, if f2fs_new_inode() was failed t*/
653 if (inode->i_ino)
654 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
655 inode->i_ino);
656 if (xnid)
657 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
658 if (inode->i_nlink) {
659 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
660 add_ino_entry(sbi, inode->i_ino, APPEND_INO);
661 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
662 add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
663 }
664 if (is_inode_flag_set(inode, FI_FREE_NID)) {
665 alloc_nid_failed(sbi, inode->i_ino);
666 clear_inode_flag(inode, FI_FREE_NID);
667 } else {
668 /*
669 * If xattr nid is corrupted, we can reach out error condition,
670 * err & !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
671 * In that case, check_nid_range() is enough to give a clue.
672 */
673 }
674out_clear:
675 fscrypt_put_encryption_info(inode);
676 clear_inode(inode);
677}
678
679/* caller should call f2fs_lock_op() */
680void handle_failed_inode(struct inode *inode)
681{
682 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
683 struct node_info ni;
684
685 /*
686 * clear nlink of inode in order to release resource of inode
687 * immediately.
688 */
689 clear_nlink(inode);
690
691 /*
692 * we must call this to avoid inode being remained as dirty, resulting
693 * in a panic when flushing dirty inodes in gdirty_list.
694 */
695 update_inode_page(inode);
696 f2fs_inode_synced(inode);
697
698 /* don't make bad inode, since it becomes a regular file. */
699 unlock_new_inode(inode);
700
701 /*
702 * Note: we should add inode to orphan list before f2fs_unlock_op()
703 * so we can prevent losing this orphan when encoutering checkpoint
704 * and following suddenly power-off.
705 */
706 get_node_info(sbi, inode->i_ino, &ni);
707
708 if (ni.blk_addr != NULL_ADDR) {
709 int err = acquire_orphan_inode(sbi);
710 if (err) {
711 set_sbi_flag(sbi, SBI_NEED_FSCK);
712 f2fs_msg(sbi->sb, KERN_WARNING,
713 "Too many orphan inodes, run fsck to fix.");
714 } else {
715 add_orphan_inode(inode);
716 }
717 alloc_nid_done(sbi, inode->i_ino);
718 } else {
719 set_inode_flag(inode, FI_FREE_NID);
720 }
721
722 f2fs_unlock_op(sbi);
723
724 /* iput will drop the inode object */
725 iput(inode);
726}