blob: 10f0fac031f43f7e974964cb83e2b1922cc6793f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/pagemap.h>
15#include <linux/uio.h>
16#include <linux/blkdev.h>
17#include <linux/mm.h>
18#include <linux/mount.h>
19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/falloc.h>
22#include <linux/swap.h>
23#include <linux/crc32.h>
24#include <linux/writeback.h>
25#include <linux/uaccess.h>
26#include <linux/dlm.h>
27#include <linux/dlm_plock.h>
28#include <linux/delay.h>
29
30#include "gfs2.h"
31#include "incore.h"
32#include "bmap.h"
33#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
37#include "log.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
42#include "util.h"
43
44/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57{
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 switch (whence) {
63 case SEEK_END: /* These reference inode->i_size */
64 case SEEK_DATA:
65 case SEEK_HOLE:
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 &i_gh);
68 if (!error) {
69 error = generic_file_llseek(file, offset, whence);
70 gfs2_glock_dq_uninit(&i_gh);
71 }
72 break;
73 case SEEK_CUR:
74 case SEEK_SET:
75 error = generic_file_llseek(file, offset, whence);
76 break;
77 default:
78 error = -EINVAL;
79 }
80
81 return error;
82}
83
84/**
85 * gfs2_readdir - Iterator for a directory
86 * @file: The directory to read from
87 * @ctx: What to feed directory entries to
88 *
89 * Returns: errno
90 */
91
92static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93{
94 struct inode *dir = file->f_mapping->host;
95 struct gfs2_inode *dip = GFS2_I(dir);
96 struct gfs2_holder d_gh;
97 int error;
98
99 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100 if (error)
101 return error;
102
103 error = gfs2_dir_read(dir, ctx, &file->f_ra);
104
105 gfs2_glock_dq_uninit(&d_gh);
106
107 return error;
108}
109
110/**
111 * fsflags_cvt
112 * @table: A table of 32 u32 flags
113 * @val: a 32 bit value to convert
114 *
115 * This function can be used to convert between fsflags values and
116 * GFS2's own flags values.
117 *
118 * Returns: the converted flags
119 */
120static u32 fsflags_cvt(const u32 *table, u32 val)
121{
122 u32 res = 0;
123 while(val) {
124 if (val & 1)
125 res |= *table;
126 table++;
127 val >>= 1;
128 }
129 return res;
130}
131
132static const u32 fsflags_to_gfs2[32] = {
133 [3] = GFS2_DIF_SYNC,
134 [4] = GFS2_DIF_IMMUTABLE,
135 [5] = GFS2_DIF_APPENDONLY,
136 [7] = GFS2_DIF_NOATIME,
137 [12] = GFS2_DIF_EXHASH,
138 [14] = GFS2_DIF_INHERIT_JDATA,
139 [17] = GFS2_DIF_TOPDIR,
140};
141
142static const u32 gfs2_to_fsflags[32] = {
143 [gfs2fl_Sync] = FS_SYNC_FL,
144 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145 [gfs2fl_AppendOnly] = FS_APPEND_FL,
146 [gfs2fl_NoAtime] = FS_NOATIME_FL,
147 [gfs2fl_ExHash] = FS_INDEX_FL,
148 [gfs2fl_TopLevel] = FS_TOPDIR_FL,
149 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150};
151
152static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153{
154 struct inode *inode = file_inode(filp);
155 struct gfs2_inode *ip = GFS2_I(inode);
156 struct gfs2_holder gh;
157 int error;
158 u32 fsflags;
159
160 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161 error = gfs2_glock_nq(&gh);
162 if (error)
163 goto out_uninit;
164
165 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167 fsflags |= FS_JOURNAL_DATA_FL;
168 if (put_user(fsflags, ptr))
169 error = -EFAULT;
170
171 gfs2_glock_dq(&gh);
172out_uninit:
173 gfs2_holder_uninit(&gh);
174 return error;
175}
176
177void gfs2_set_inode_flags(struct inode *inode)
178{
179 struct gfs2_inode *ip = GFS2_I(inode);
180 unsigned int flags = inode->i_flags;
181
182 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
183 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
184 flags |= S_NOSEC;
185 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
186 flags |= S_IMMUTABLE;
187 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
188 flags |= S_APPEND;
189 if (ip->i_diskflags & GFS2_DIF_NOATIME)
190 flags |= S_NOATIME;
191 if (ip->i_diskflags & GFS2_DIF_SYNC)
192 flags |= S_SYNC;
193 inode->i_flags = flags;
194}
195
196/* Flags that can be set by user space */
197#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
198 GFS2_DIF_IMMUTABLE| \
199 GFS2_DIF_APPENDONLY| \
200 GFS2_DIF_NOATIME| \
201 GFS2_DIF_SYNC| \
202 GFS2_DIF_SYSTEM| \
203 GFS2_DIF_TOPDIR| \
204 GFS2_DIF_INHERIT_JDATA)
205
206/**
207 * do_gfs2_set_flags - set flags on an inode
208 * @filp: file pointer
209 * @reqflags: The flags to set
210 * @mask: Indicates which flags are valid
211 *
212 */
213static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
214{
215 struct inode *inode = file_inode(filp);
216 struct gfs2_inode *ip = GFS2_I(inode);
217 struct gfs2_sbd *sdp = GFS2_SB(inode);
218 struct buffer_head *bh;
219 struct gfs2_holder gh;
220 int error;
221 u32 new_flags, flags;
222
223 error = mnt_want_write_file(filp);
224 if (error)
225 return error;
226
227 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
228 if (error)
229 goto out_drop_write;
230
231 error = -EACCES;
232 if (!inode_owner_or_capable(inode))
233 goto out;
234
235 error = 0;
236 flags = ip->i_diskflags;
237 new_flags = (flags & ~mask) | (reqflags & mask);
238 if ((new_flags ^ flags) == 0)
239 goto out;
240
241 error = -EINVAL;
242 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
243 goto out;
244
245 error = -EPERM;
246 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
247 goto out;
248 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
249 goto out;
250 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
251 !capable(CAP_LINUX_IMMUTABLE))
252 goto out;
253 if (!IS_IMMUTABLE(inode)) {
254 error = gfs2_permission(inode, MAY_WRITE);
255 if (error)
256 goto out;
257 }
258 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
259 if (new_flags & GFS2_DIF_JDATA)
260 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
261 error = filemap_fdatawrite(inode->i_mapping);
262 if (error)
263 goto out;
264 error = filemap_fdatawait(inode->i_mapping);
265 if (error)
266 goto out;
267 if (new_flags & GFS2_DIF_JDATA)
268 gfs2_ordered_del_inode(ip);
269 }
270 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
271 if (error)
272 goto out;
273 error = gfs2_meta_inode_buffer(ip, &bh);
274 if (error)
275 goto out_trans_end;
276 gfs2_trans_add_meta(ip->i_gl, bh);
277 ip->i_diskflags = new_flags;
278 gfs2_dinode_out(ip, bh->b_data);
279 brelse(bh);
280 gfs2_set_inode_flags(inode);
281 gfs2_set_aops(inode);
282out_trans_end:
283 gfs2_trans_end(sdp);
284out:
285 gfs2_glock_dq_uninit(&gh);
286out_drop_write:
287 mnt_drop_write_file(filp);
288 return error;
289}
290
291static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
292{
293 struct inode *inode = file_inode(filp);
294 u32 fsflags, gfsflags;
295
296 if (get_user(fsflags, ptr))
297 return -EFAULT;
298
299 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
300 if (!S_ISDIR(inode->i_mode)) {
301 gfsflags &= ~GFS2_DIF_TOPDIR;
302 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
303 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
304 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
305 }
306 return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
307}
308
309static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
310{
311 switch(cmd) {
312 case FS_IOC_GETFLAGS:
313 return gfs2_get_flags(filp, (u32 __user *)arg);
314 case FS_IOC_SETFLAGS:
315 return gfs2_set_flags(filp, (u32 __user *)arg);
316 case FITRIM:
317 return gfs2_fitrim(filp, (void __user *)arg);
318 }
319 return -ENOTTY;
320}
321
322/**
323 * gfs2_size_hint - Give a hint to the size of a write request
324 * @filep: The struct file
325 * @offset: The file offset of the write
326 * @size: The length of the write
327 *
328 * When we are about to do a write, this function records the total
329 * write size in order to provide a suitable hint to the lower layers
330 * about how many blocks will be required.
331 *
332 */
333
334static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
335{
336 struct inode *inode = file_inode(filep);
337 struct gfs2_sbd *sdp = GFS2_SB(inode);
338 struct gfs2_inode *ip = GFS2_I(inode);
339 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
340 int hint = min_t(size_t, INT_MAX, blks);
341
342 if (hint > atomic_read(&ip->i_res.rs_sizehint))
343 atomic_set(&ip->i_res.rs_sizehint, hint);
344}
345
346/**
347 * gfs2_allocate_page_backing - Use bmap to allocate blocks
348 * @page: The (locked) page to allocate backing for
349 *
350 * We try to allocate all the blocks required for the page in
351 * one go. This might fail for various reasons, so we keep
352 * trying until all the blocks to back this page are allocated.
353 * If some of the blocks are already allocated, thats ok too.
354 */
355
356static int gfs2_allocate_page_backing(struct page *page)
357{
358 struct inode *inode = page->mapping->host;
359 struct buffer_head bh;
360 unsigned long size = PAGE_SIZE;
361 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
362
363 do {
364 bh.b_state = 0;
365 bh.b_size = size;
366 gfs2_block_map(inode, lblock, &bh, 1);
367 if (!buffer_mapped(&bh))
368 return -EIO;
369 size -= bh.b_size;
370 lblock += (bh.b_size >> inode->i_blkbits);
371 } while(size > 0);
372 return 0;
373}
374
375/**
376 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
377 * @vma: The virtual memory area
378 * @vmf: The virtual memory fault containing the page to become writable
379 *
380 * When the page becomes writable, we need to ensure that we have
381 * blocks allocated on disk to back that page.
382 */
383
384static int gfs2_page_mkwrite(struct vm_fault *vmf)
385{
386 struct page *page = vmf->page;
387 struct inode *inode = file_inode(vmf->vma->vm_file);
388 struct gfs2_inode *ip = GFS2_I(inode);
389 struct gfs2_sbd *sdp = GFS2_SB(inode);
390 struct gfs2_alloc_parms ap = { .aflags = 0, };
391 unsigned long last_index;
392 u64 pos = page->index << PAGE_SHIFT;
393 unsigned int data_blocks, ind_blocks, rblocks;
394 struct gfs2_holder gh;
395 loff_t size;
396 int ret;
397
398 sb_start_pagefault(inode->i_sb);
399
400 ret = gfs2_rsqa_alloc(ip);
401 if (ret)
402 goto out;
403
404 gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
405
406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 ret = gfs2_glock_nq(&gh);
408 if (ret)
409 goto out_uninit;
410
411 /* Update file times before taking page lock */
412 file_update_time(vmf->vma->vm_file);
413
414 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
415 set_bit(GIF_SW_PAGED, &ip->i_flags);
416
417 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
418 lock_page(page);
419 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
420 ret = -EAGAIN;
421 unlock_page(page);
422 }
423 goto out_unlock;
424 }
425
426 ret = gfs2_rindex_update(sdp);
427 if (ret)
428 goto out_unlock;
429
430 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
431 ap.target = data_blocks + ind_blocks;
432 ret = gfs2_quota_lock_check(ip, &ap);
433 if (ret)
434 goto out_unlock;
435 ret = gfs2_inplace_reserve(ip, &ap);
436 if (ret)
437 goto out_quota_unlock;
438
439 rblocks = RES_DINODE + ind_blocks;
440 if (gfs2_is_jdata(ip))
441 rblocks += data_blocks ? data_blocks : 1;
442 if (ind_blocks || data_blocks) {
443 rblocks += RES_STATFS + RES_QUOTA;
444 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
445 }
446 ret = gfs2_trans_begin(sdp, rblocks, 0);
447 if (ret)
448 goto out_trans_fail;
449
450 lock_page(page);
451 ret = -EINVAL;
452 size = i_size_read(inode);
453 last_index = (size - 1) >> PAGE_SHIFT;
454 /* Check page index against inode size */
455 if (size == 0 || (page->index > last_index))
456 goto out_trans_end;
457
458 ret = -EAGAIN;
459 /* If truncated, we must retry the operation, we may have raced
460 * with the glock demotion code.
461 */
462 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
463 goto out_trans_end;
464
465 /* Unstuff, if required, and allocate backing blocks for page */
466 ret = 0;
467 if (gfs2_is_stuffed(ip))
468 ret = gfs2_unstuff_dinode(ip, page);
469 if (ret == 0)
470 ret = gfs2_allocate_page_backing(page);
471
472out_trans_end:
473 if (ret)
474 unlock_page(page);
475 gfs2_trans_end(sdp);
476out_trans_fail:
477 gfs2_inplace_release(ip);
478out_quota_unlock:
479 gfs2_quota_unlock(ip);
480out_unlock:
481 gfs2_glock_dq(&gh);
482out_uninit:
483 gfs2_holder_uninit(&gh);
484 if (ret == 0) {
485 set_page_dirty(page);
486 wait_for_stable_page(page);
487 }
488out:
489 sb_end_pagefault(inode->i_sb);
490 return block_page_mkwrite_return(ret);
491}
492
493static const struct vm_operations_struct gfs2_vm_ops = {
494 .fault = filemap_fault,
495 .map_pages = filemap_map_pages,
496 .page_mkwrite = gfs2_page_mkwrite,
497};
498
499/**
500 * gfs2_mmap -
501 * @file: The file to map
502 * @vma: The VMA which described the mapping
503 *
504 * There is no need to get a lock here unless we should be updating
505 * atime. We ignore any locking errors since the only consequence is
506 * a missed atime update (which will just be deferred until later).
507 *
508 * Returns: 0
509 */
510
511static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
512{
513 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
514
515 if (!(file->f_flags & O_NOATIME) &&
516 !IS_NOATIME(&ip->i_inode)) {
517 struct gfs2_holder i_gh;
518 int error;
519
520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
521 &i_gh);
522 if (error)
523 return error;
524 /* grab lock to update inode */
525 gfs2_glock_dq_uninit(&i_gh);
526 file_accessed(file);
527 }
528 vma->vm_ops = &gfs2_vm_ops;
529
530 return 0;
531}
532
533/**
534 * gfs2_open_common - This is common to open and atomic_open
535 * @inode: The inode being opened
536 * @file: The file being opened
537 *
538 * This maybe called under a glock or not depending upon how it has
539 * been called. We must always be called under a glock for regular
540 * files, however. For other file types, it does not matter whether
541 * we hold the glock or not.
542 *
543 * Returns: Error code or 0 for success
544 */
545
546int gfs2_open_common(struct inode *inode, struct file *file)
547{
548 struct gfs2_file *fp;
549 int ret;
550
551 if (S_ISREG(inode->i_mode)) {
552 ret = generic_file_open(inode, file);
553 if (ret)
554 return ret;
555 }
556
557 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
558 if (!fp)
559 return -ENOMEM;
560
561 mutex_init(&fp->f_fl_mutex);
562
563 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
564 file->private_data = fp;
565 return 0;
566}
567
568/**
569 * gfs2_open - open a file
570 * @inode: the inode to open
571 * @file: the struct file for this opening
572 *
573 * After atomic_open, this function is only used for opening files
574 * which are already cached. We must still get the glock for regular
575 * files to ensure that we have the file size uptodate for the large
576 * file check which is in the common code. That is only an issue for
577 * regular files though.
578 *
579 * Returns: errno
580 */
581
582static int gfs2_open(struct inode *inode, struct file *file)
583{
584 struct gfs2_inode *ip = GFS2_I(inode);
585 struct gfs2_holder i_gh;
586 int error;
587 bool need_unlock = false;
588
589 if (S_ISREG(ip->i_inode.i_mode)) {
590 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
591 &i_gh);
592 if (error)
593 return error;
594 need_unlock = true;
595 }
596
597 error = gfs2_open_common(inode, file);
598
599 if (need_unlock)
600 gfs2_glock_dq_uninit(&i_gh);
601
602 return error;
603}
604
605/**
606 * gfs2_release - called to close a struct file
607 * @inode: the inode the struct file belongs to
608 * @file: the struct file being closed
609 *
610 * Returns: errno
611 */
612
613static int gfs2_release(struct inode *inode, struct file *file)
614{
615 struct gfs2_inode *ip = GFS2_I(inode);
616
617 kfree(file->private_data);
618 file->private_data = NULL;
619
620 if (!(file->f_mode & FMODE_WRITE))
621 return 0;
622
623 gfs2_rsqa_delete(ip, &inode->i_writecount);
624 return 0;
625}
626
627/**
628 * gfs2_fsync - sync the dirty data for a file (across the cluster)
629 * @file: the file that points to the dentry
630 * @start: the start position in the file to sync
631 * @end: the end position in the file to sync
632 * @datasync: set if we can ignore timestamp changes
633 *
634 * We split the data flushing here so that we don't wait for the data
635 * until after we've also sent the metadata to disk. Note that for
636 * data=ordered, we will write & wait for the data at the log flush
637 * stage anyway, so this is unlikely to make much of a difference
638 * except in the data=writeback case.
639 *
640 * If the fdatawrite fails due to any reason except -EIO, we will
641 * continue the remainder of the fsync, although we'll still report
642 * the error at the end. This is to match filemap_write_and_wait_range()
643 * behaviour.
644 *
645 * Returns: errno
646 */
647
648static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
649 int datasync)
650{
651 struct address_space *mapping = file->f_mapping;
652 struct inode *inode = mapping->host;
653 int sync_state = inode->i_state & I_DIRTY_ALL;
654 struct gfs2_inode *ip = GFS2_I(inode);
655 int ret = 0, ret1 = 0;
656
657 if (mapping->nrpages) {
658 ret1 = filemap_fdatawrite_range(mapping, start, end);
659 if (ret1 == -EIO)
660 return ret1;
661 }
662
663 if (!gfs2_is_jdata(ip))
664 sync_state &= ~I_DIRTY_PAGES;
665 if (datasync)
666 sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
667
668 if (sync_state) {
669 ret = sync_inode_metadata(inode, 1);
670 if (ret)
671 return ret;
672 if (gfs2_is_jdata(ip))
673 ret = file_write_and_wait(file);
674 if (ret)
675 return ret;
676 gfs2_ail_flush(ip->i_gl, 1);
677 }
678
679 if (mapping->nrpages)
680 ret = file_fdatawait_range(file, start, end);
681
682 return ret ? ret : ret1;
683}
684
685/**
686 * gfs2_file_write_iter - Perform a write to a file
687 * @iocb: The io context
688 * @iov: The data to write
689 * @nr_segs: Number of @iov segments
690 * @pos: The file position
691 *
692 * We have to do a lock/unlock here to refresh the inode size for
693 * O_APPEND writes, otherwise we can land up writing at the wrong
694 * offset. There is still a race, but provided the app is using its
695 * own file locking, this will make O_APPEND work as expected.
696 *
697 */
698
699static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
700{
701 struct file *file = iocb->ki_filp;
702 struct gfs2_inode *ip = GFS2_I(file_inode(file));
703 int ret;
704
705 ret = gfs2_rsqa_alloc(ip);
706 if (ret)
707 return ret;
708
709 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
710
711 if (iocb->ki_flags & IOCB_APPEND) {
712 struct gfs2_holder gh;
713
714 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
715 if (ret)
716 return ret;
717 gfs2_glock_dq_uninit(&gh);
718 }
719
720 return generic_file_write_iter(iocb, from);
721}
722
723static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
724 int mode)
725{
726 struct gfs2_inode *ip = GFS2_I(inode);
727 struct buffer_head *dibh;
728 int error;
729 unsigned int nr_blks;
730 sector_t lblock = offset >> inode->i_blkbits;
731
732 error = gfs2_meta_inode_buffer(ip, &dibh);
733 if (unlikely(error))
734 return error;
735
736 gfs2_trans_add_meta(ip->i_gl, dibh);
737
738 if (gfs2_is_stuffed(ip)) {
739 error = gfs2_unstuff_dinode(ip, NULL);
740 if (unlikely(error))
741 goto out;
742 }
743
744 while (len) {
745 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
746 bh_map.b_size = len;
747 set_buffer_zeronew(&bh_map);
748
749 error = gfs2_block_map(inode, lblock, &bh_map, 1);
750 if (unlikely(error))
751 goto out;
752 len -= bh_map.b_size;
753 nr_blks = bh_map.b_size >> inode->i_blkbits;
754 lblock += nr_blks;
755 if (!buffer_new(&bh_map))
756 continue;
757 if (unlikely(!buffer_zeronew(&bh_map))) {
758 error = -EIO;
759 goto out;
760 }
761 }
762out:
763 brelse(dibh);
764 return error;
765}
766/**
767 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
768 * blocks, determine how many bytes can be written.
769 * @ip: The inode in question.
770 * @len: Max cap of bytes. What we return in *len must be <= this.
771 * @data_blocks: Compute and return the number of data blocks needed
772 * @ind_blocks: Compute and return the number of indirect blocks needed
773 * @max_blocks: The total blocks available to work with.
774 *
775 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
776 */
777static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
778 unsigned int *data_blocks, unsigned int *ind_blocks,
779 unsigned int max_blocks)
780{
781 loff_t max = *len;
782 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
783 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
784
785 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
786 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
787 max_data -= tmp;
788 }
789
790 *data_blocks = max_data;
791 *ind_blocks = max_blocks - max_data;
792 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
793 if (*len > max) {
794 *len = max;
795 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
796 }
797}
798
799static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
800{
801 struct inode *inode = file_inode(file);
802 struct gfs2_sbd *sdp = GFS2_SB(inode);
803 struct gfs2_inode *ip = GFS2_I(inode);
804 struct gfs2_alloc_parms ap = { .aflags = 0, };
805 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
806 loff_t bytes, max_bytes, max_blks;
807 int error;
808 const loff_t pos = offset;
809 const loff_t count = len;
810 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
811 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
812 loff_t max_chunk_size = UINT_MAX & bsize_mask;
813
814 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
815
816 offset &= bsize_mask;
817
818 len = next - offset;
819 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
820 if (!bytes)
821 bytes = UINT_MAX;
822 bytes &= bsize_mask;
823 if (bytes == 0)
824 bytes = sdp->sd_sb.sb_bsize;
825
826 gfs2_size_hint(file, offset, len);
827
828 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
829 ap.min_target = data_blocks + ind_blocks;
830
831 while (len > 0) {
832 if (len < bytes)
833 bytes = len;
834 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
835 len -= bytes;
836 offset += bytes;
837 continue;
838 }
839
840 /* We need to determine how many bytes we can actually
841 * fallocate without exceeding quota or going over the
842 * end of the fs. We start off optimistically by assuming
843 * we can write max_bytes */
844 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
845
846 /* Since max_bytes is most likely a theoretical max, we
847 * calculate a more realistic 'bytes' to serve as a good
848 * starting point for the number of bytes we may be able
849 * to write */
850 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
851 ap.target = data_blocks + ind_blocks;
852
853 error = gfs2_quota_lock_check(ip, &ap);
854 if (error)
855 return error;
856 /* ap.allowed tells us how many blocks quota will allow
857 * us to write. Check if this reduces max_blks */
858 max_blks = UINT_MAX;
859 if (ap.allowed)
860 max_blks = ap.allowed;
861
862 error = gfs2_inplace_reserve(ip, &ap);
863 if (error)
864 goto out_qunlock;
865
866 /* check if the selected rgrp limits our max_blks further */
867 if (ap.allowed && ap.allowed < max_blks)
868 max_blks = ap.allowed;
869
870 /* Almost done. Calculate bytes that can be written using
871 * max_blks. We also recompute max_bytes, data_blocks and
872 * ind_blocks */
873 calc_max_reserv(ip, &max_bytes, &data_blocks,
874 &ind_blocks, max_blks);
875
876 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
877 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
878 if (gfs2_is_jdata(ip))
879 rblocks += data_blocks ? data_blocks : 1;
880
881 error = gfs2_trans_begin(sdp, rblocks,
882 PAGE_SIZE/sdp->sd_sb.sb_bsize);
883 if (error)
884 goto out_trans_fail;
885
886 error = fallocate_chunk(inode, offset, max_bytes, mode);
887 gfs2_trans_end(sdp);
888
889 if (error)
890 goto out_trans_fail;
891
892 len -= max_bytes;
893 offset += max_bytes;
894 gfs2_inplace_release(ip);
895 gfs2_quota_unlock(ip);
896 }
897
898 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
899 i_size_write(inode, pos + count);
900 file_update_time(file);
901 mark_inode_dirty(inode);
902 }
903
904 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
905 return vfs_fsync_range(file, pos, pos + count - 1,
906 (file->f_flags & __O_SYNC) ? 0 : 1);
907 return 0;
908
909out_trans_fail:
910 gfs2_inplace_release(ip);
911out_qunlock:
912 gfs2_quota_unlock(ip);
913 return error;
914}
915
916static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
917{
918 struct inode *inode = file_inode(file);
919 struct gfs2_sbd *sdp = GFS2_SB(inode);
920 struct gfs2_inode *ip = GFS2_I(inode);
921 struct gfs2_holder gh;
922 int ret;
923
924 if (mode & ~FALLOC_FL_KEEP_SIZE)
925 return -EOPNOTSUPP;
926 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
927 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
928 return -EOPNOTSUPP;
929
930 inode_lock(inode);
931
932 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
933 ret = gfs2_glock_nq(&gh);
934 if (ret)
935 goto out_uninit;
936
937 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
938 (offset + len) > inode->i_size) {
939 ret = inode_newsize_ok(inode, offset + len);
940 if (ret)
941 goto out_unlock;
942 }
943
944 ret = get_write_access(inode);
945 if (ret)
946 goto out_unlock;
947
948 ret = gfs2_rsqa_alloc(ip);
949 if (ret)
950 goto out_putw;
951
952 ret = __gfs2_fallocate(file, mode, offset, len);
953 if (ret)
954 gfs2_rs_deltree(&ip->i_res);
955
956out_putw:
957 put_write_access(inode);
958out_unlock:
959 gfs2_glock_dq(&gh);
960out_uninit:
961 gfs2_holder_uninit(&gh);
962 inode_unlock(inode);
963 return ret;
964}
965
966static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
967 struct file *out, loff_t *ppos,
968 size_t len, unsigned int flags)
969{
970 int error;
971 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
972
973 error = gfs2_rsqa_alloc(ip);
974 if (error)
975 return (ssize_t)error;
976
977 gfs2_size_hint(out, *ppos, len);
978
979 return iter_file_splice_write(pipe, out, ppos, len, flags);
980}
981
982#ifdef CONFIG_GFS2_FS_LOCKING_DLM
983
984/**
985 * gfs2_lock - acquire/release a posix lock on a file
986 * @file: the file pointer
987 * @cmd: either modify or retrieve lock state, possibly wait
988 * @fl: type and range of lock
989 *
990 * Returns: errno
991 */
992
993static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
994{
995 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
996 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
997 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
998
999 if (!(fl->fl_flags & FL_POSIX))
1000 return -ENOLCK;
1001 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
1002 return -ENOLCK;
1003
1004 if (cmd == F_CANCELLK) {
1005 /* Hack: */
1006 cmd = F_SETLK;
1007 fl->fl_type = F_UNLCK;
1008 }
1009 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1010 if (fl->fl_type == F_UNLCK)
1011 locks_lock_file_wait(file, fl);
1012 return -EIO;
1013 }
1014 if (IS_GETLK(cmd))
1015 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1016 else if (fl->fl_type == F_UNLCK)
1017 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1018 else
1019 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1020}
1021
1022static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1023{
1024 struct gfs2_file *fp = file->private_data;
1025 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1026 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1027 struct gfs2_glock *gl;
1028 unsigned int state;
1029 u16 flags;
1030 int error = 0;
1031 int sleeptime;
1032
1033 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1034 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1035
1036 mutex_lock(&fp->f_fl_mutex);
1037
1038 if (gfs2_holder_initialized(fl_gh)) {
1039 if (fl_gh->gh_state == state)
1040 goto out;
1041 locks_lock_file_wait(file,
1042 &(struct file_lock) {
1043 .fl_type = F_UNLCK,
1044 .fl_flags = FL_FLOCK
1045 });
1046 gfs2_glock_dq(fl_gh);
1047 gfs2_holder_reinit(state, flags, fl_gh);
1048 } else {
1049 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1050 &gfs2_flock_glops, CREATE, &gl);
1051 if (error)
1052 goto out;
1053 gfs2_holder_init(gl, state, flags, fl_gh);
1054 gfs2_glock_put(gl);
1055 }
1056 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1057 error = gfs2_glock_nq(fl_gh);
1058 if (error != GLR_TRYFAILED)
1059 break;
1060 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1061 fl_gh->gh_error = 0;
1062 msleep(sleeptime);
1063 }
1064 if (error) {
1065 gfs2_holder_uninit(fl_gh);
1066 if (error == GLR_TRYFAILED)
1067 error = -EAGAIN;
1068 } else {
1069 error = locks_lock_file_wait(file, fl);
1070 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1071 }
1072
1073out:
1074 mutex_unlock(&fp->f_fl_mutex);
1075 return error;
1076}
1077
1078static void do_unflock(struct file *file, struct file_lock *fl)
1079{
1080 struct gfs2_file *fp = file->private_data;
1081 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1082
1083 mutex_lock(&fp->f_fl_mutex);
1084 locks_lock_file_wait(file, fl);
1085 if (gfs2_holder_initialized(fl_gh)) {
1086 gfs2_glock_dq(fl_gh);
1087 gfs2_holder_uninit(fl_gh);
1088 }
1089 mutex_unlock(&fp->f_fl_mutex);
1090}
1091
1092/**
1093 * gfs2_flock - acquire/release a flock lock on a file
1094 * @file: the file pointer
1095 * @cmd: either modify or retrieve lock state, possibly wait
1096 * @fl: type and range of lock
1097 *
1098 * Returns: errno
1099 */
1100
1101static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1102{
1103 if (!(fl->fl_flags & FL_FLOCK))
1104 return -ENOLCK;
1105 if (fl->fl_type & LOCK_MAND)
1106 return -EOPNOTSUPP;
1107
1108 if (fl->fl_type == F_UNLCK) {
1109 do_unflock(file, fl);
1110 return 0;
1111 } else {
1112 return do_flock(file, cmd, fl);
1113 }
1114}
1115
1116const struct file_operations gfs2_file_fops = {
1117 .llseek = gfs2_llseek,
1118 .read_iter = generic_file_read_iter,
1119 .write_iter = gfs2_file_write_iter,
1120 .unlocked_ioctl = gfs2_ioctl,
1121 .mmap = gfs2_mmap,
1122 .open = gfs2_open,
1123 .release = gfs2_release,
1124 .fsync = gfs2_fsync,
1125 .lock = gfs2_lock,
1126 .flock = gfs2_flock,
1127 .splice_read = generic_file_splice_read,
1128 .splice_write = gfs2_file_splice_write,
1129 .setlease = simple_nosetlease,
1130 .fallocate = gfs2_fallocate,
1131};
1132
1133const struct file_operations gfs2_dir_fops = {
1134 .iterate_shared = gfs2_readdir,
1135 .unlocked_ioctl = gfs2_ioctl,
1136 .open = gfs2_open,
1137 .release = gfs2_release,
1138 .fsync = gfs2_fsync,
1139 .lock = gfs2_lock,
1140 .flock = gfs2_flock,
1141 .llseek = default_llseek,
1142};
1143
1144#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1145
1146const struct file_operations gfs2_file_fops_nolock = {
1147 .llseek = gfs2_llseek,
1148 .read_iter = generic_file_read_iter,
1149 .write_iter = gfs2_file_write_iter,
1150 .unlocked_ioctl = gfs2_ioctl,
1151 .mmap = gfs2_mmap,
1152 .open = gfs2_open,
1153 .release = gfs2_release,
1154 .fsync = gfs2_fsync,
1155 .splice_read = generic_file_splice_read,
1156 .splice_write = gfs2_file_splice_write,
1157 .setlease = generic_setlease,
1158 .fallocate = gfs2_fallocate,
1159};
1160
1161const struct file_operations gfs2_dir_fops_nolock = {
1162 .iterate_shared = gfs2_readdir,
1163 .unlocked_ioctl = gfs2_ioctl,
1164 .open = gfs2_open,
1165 .release = gfs2_release,
1166 .fsync = gfs2_fsync,
1167 .llseek = default_llseek,
1168};
1169