blob: 962bf4824283d04fa302679dac93dae629cf378e [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 *
5 * See COPYING in top-level directory.
6 */
7
8/*
9 * Linux VFS file operations.
10 */
11
12#include "protocol.h"
13#include "orangefs-kernel.h"
14#include "orangefs-bufmap.h"
15#include <linux/fs.h>
16#include <linux/pagemap.h>
17
18static int flush_racache(struct inode *inode)
19{
20 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
21 struct orangefs_kernel_op_s *new_op;
22 int ret;
23
24 gossip_debug(GOSSIP_UTILS_DEBUG,
25 "%s: %pU: Handle is %pU | fs_id %d\n", __func__,
26 get_khandle_from_ino(inode), &orangefs_inode->refn.khandle,
27 orangefs_inode->refn.fs_id);
28
29 new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH);
30 if (!new_op)
31 return -ENOMEM;
32 new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn;
33
34 ret = service_operation(new_op, "orangefs_flush_racache",
35 get_interruptible_flag(inode));
36
37 gossip_debug(GOSSIP_UTILS_DEBUG, "%s: got return value of %d\n",
38 __func__, ret);
39
40 op_release(new_op);
41 return ret;
42}
43
44/*
45 * Copy to client-core's address space from the buffers specified
46 * by the iovec upto total_size bytes.
47 * NOTE: the iovector can either contain addresses which
48 * can futher be kernel-space or user-space addresses.
49 * or it can pointers to struct page's
50 */
51static int precopy_buffers(int buffer_index,
52 struct iov_iter *iter,
53 size_t total_size)
54{
55 int ret = 0;
56 /*
57 * copy data from application/kernel by pulling it out
58 * of the iovec.
59 */
60
61
62 if (total_size) {
63 ret = orangefs_bufmap_copy_from_iovec(iter,
64 buffer_index,
65 total_size);
66 if (ret < 0)
67 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
68 __func__,
69 (long)ret);
70 }
71
72 if (ret < 0)
73 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
74 __func__,
75 (long)ret);
76 return ret;
77}
78
79/*
80 * Copy from client-core's address space to the buffers specified
81 * by the iovec upto total_size bytes.
82 * NOTE: the iovector can either contain addresses which
83 * can futher be kernel-space or user-space addresses.
84 * or it can pointers to struct page's
85 */
86static int postcopy_buffers(int buffer_index,
87 struct iov_iter *iter,
88 size_t total_size)
89{
90 int ret = 0;
91 /*
92 * copy data to application/kernel by pushing it out to
93 * the iovec. NOTE; target buffers can be addresses or
94 * struct page pointers.
95 */
96 if (total_size) {
97 ret = orangefs_bufmap_copy_to_iovec(iter,
98 buffer_index,
99 total_size);
100 if (ret < 0)
101 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
102 __func__,
103 (long)ret);
104 }
105 return ret;
106}
107
108/*
109 * Post and wait for the I/O upcall to finish
110 */
111static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
112 loff_t *offset, struct iov_iter *iter,
113 size_t total_size, loff_t readahead_size)
114{
115 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
116 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
117 struct orangefs_kernel_op_s *new_op = NULL;
118 int buffer_index = -1;
119 ssize_t ret;
120
121 new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
122 if (!new_op)
123 return -ENOMEM;
124
125 /* synchronous I/O */
126 new_op->upcall.req.io.readahead_size = readahead_size;
127 new_op->upcall.req.io.io_type = type;
128 new_op->upcall.req.io.refn = orangefs_inode->refn;
129
130populate_shared_memory:
131 /* get a shared buffer index */
132 buffer_index = orangefs_bufmap_get();
133 if (buffer_index < 0) {
134 ret = buffer_index;
135 gossip_debug(GOSSIP_FILE_DEBUG,
136 "%s: orangefs_bufmap_get failure (%zd)\n",
137 __func__, ret);
138 goto out;
139 }
140 gossip_debug(GOSSIP_FILE_DEBUG,
141 "%s(%pU): GET op %p -> buffer_index %d\n",
142 __func__,
143 handle,
144 new_op,
145 buffer_index);
146
147 new_op->uses_shared_memory = 1;
148 new_op->upcall.req.io.buf_index = buffer_index;
149 new_op->upcall.req.io.count = total_size;
150 new_op->upcall.req.io.offset = *offset;
151
152 gossip_debug(GOSSIP_FILE_DEBUG,
153 "%s(%pU): offset: %llu total_size: %zd\n",
154 __func__,
155 handle,
156 llu(*offset),
157 total_size);
158 /*
159 * Stage 1: copy the buffers into client-core's address space
160 * precopy_buffers only pertains to writes.
161 */
162 if (type == ORANGEFS_IO_WRITE) {
163 ret = precopy_buffers(buffer_index,
164 iter,
165 total_size);
166 if (ret < 0)
167 goto out;
168 }
169
170 gossip_debug(GOSSIP_FILE_DEBUG,
171 "%s(%pU): Calling post_io_request with tag (%llu)\n",
172 __func__,
173 handle,
174 llu(new_op->tag));
175
176 /* Stage 2: Service the I/O operation */
177 ret = service_operation(new_op,
178 type == ORANGEFS_IO_WRITE ?
179 "file_write" :
180 "file_read",
181 get_interruptible_flag(inode));
182
183 /*
184 * If service_operation() returns -EAGAIN #and# the operation was
185 * purged from orangefs_request_list or htable_ops_in_progress, then
186 * we know that the client was restarted, causing the shared memory
187 * area to be wiped clean. To restart a write operation in this
188 * case, we must re-copy the data from the user's iovec to a NEW
189 * shared memory location. To restart a read operation, we must get
190 * a new shared memory location.
191 */
192 if (ret == -EAGAIN && op_state_purged(new_op)) {
193 orangefs_bufmap_put(buffer_index);
194 buffer_index = -1;
195 if (type == ORANGEFS_IO_WRITE)
196 iov_iter_revert(iter, total_size);
197 gossip_debug(GOSSIP_FILE_DEBUG,
198 "%s:going to repopulate_shared_memory.\n",
199 __func__);
200 goto populate_shared_memory;
201 }
202
203 if (ret < 0) {
204 if (ret == -EINTR) {
205 /*
206 * We can't return EINTR if any data was written,
207 * it's not POSIX. It is minimally acceptable
208 * to give a partial write, the way NFS does.
209 *
210 * It would be optimal to return all or nothing,
211 * but if a userspace write is bigger than
212 * an IO buffer, and the interrupt occurs
213 * between buffer writes, that would not be
214 * possible.
215 */
216 switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) {
217 /*
218 * If the op was waiting when the interrupt
219 * occurred, then the client-core did not
220 * trigger the write.
221 */
222 case OP_VFS_STATE_WAITING:
223 if (*offset == 0)
224 ret = -EINTR;
225 else
226 ret = 0;
227 break;
228 /*
229 * If the op was in progress when the interrupt
230 * occurred, then the client-core was able to
231 * trigger the write.
232 */
233 case OP_VFS_STATE_INPROGR:
234 ret = total_size;
235 break;
236 default:
237 gossip_err("%s: unexpected op state :%d:.\n",
238 __func__,
239 new_op->op_state);
240 ret = 0;
241 break;
242 }
243 gossip_debug(GOSSIP_FILE_DEBUG,
244 "%s: got EINTR, state:%d: %p\n",
245 __func__,
246 new_op->op_state,
247 new_op);
248 } else {
249 gossip_err("%s: error in %s handle %pU, returning %zd\n",
250 __func__,
251 type == ORANGEFS_IO_READ ?
252 "read from" : "write to",
253 handle, ret);
254 }
255 if (orangefs_cancel_op_in_progress(new_op))
256 return ret;
257
258 goto out;
259 }
260
261 /*
262 * Stage 3: Post copy buffers from client-core's address space
263 * postcopy_buffers only pertains to reads.
264 */
265 if (type == ORANGEFS_IO_READ) {
266 ret = postcopy_buffers(buffer_index,
267 iter,
268 new_op->downcall.resp.io.amt_complete);
269 if (ret < 0)
270 goto out;
271 }
272 gossip_debug(GOSSIP_FILE_DEBUG,
273 "%s(%pU): Amount %s, returned by the sys-io call:%d\n",
274 __func__,
275 handle,
276 type == ORANGEFS_IO_READ ? "read" : "written",
277 (int)new_op->downcall.resp.io.amt_complete);
278
279 ret = new_op->downcall.resp.io.amt_complete;
280
281out:
282 if (buffer_index >= 0) {
283 orangefs_bufmap_put(buffer_index);
284 gossip_debug(GOSSIP_FILE_DEBUG,
285 "%s(%pU): PUT buffer_index %d\n",
286 __func__, handle, buffer_index);
287 buffer_index = -1;
288 }
289 op_release(new_op);
290 return ret;
291}
292
293/*
294 * Common entry point for read/write/readv/writev
295 * This function will dispatch it to either the direct I/O
296 * or buffered I/O path depending on the mount options and/or
297 * augmented/extended metadata attached to the file.
298 * Note: File extended attributes override any mount options.
299 */
300static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
301 loff_t *offset, struct iov_iter *iter)
302{
303 struct inode *inode = file->f_mapping->host;
304 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
305 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
306 size_t count = iov_iter_count(iter);
307 ssize_t total_count = 0;
308 ssize_t ret = -EINVAL;
309
310 gossip_debug(GOSSIP_FILE_DEBUG,
311 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
312 __func__,
313 handle,
314 (int)count);
315
316 if (type == ORANGEFS_IO_WRITE) {
317 gossip_debug(GOSSIP_FILE_DEBUG,
318 "%s(%pU): proceeding with offset : %llu, "
319 "size %d\n",
320 __func__,
321 handle,
322 llu(*offset),
323 (int)count);
324 }
325
326 if (count == 0) {
327 ret = 0;
328 goto out;
329 }
330
331 while (iov_iter_count(iter)) {
332 size_t each_count = iov_iter_count(iter);
333 size_t amt_complete;
334
335 /* how much to transfer in this loop iteration */
336 if (each_count > orangefs_bufmap_size_query())
337 each_count = orangefs_bufmap_size_query();
338
339 gossip_debug(GOSSIP_FILE_DEBUG,
340 "%s(%pU): size of each_count(%d)\n",
341 __func__,
342 handle,
343 (int)each_count);
344 gossip_debug(GOSSIP_FILE_DEBUG,
345 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
346 __func__,
347 handle,
348 (int)*offset);
349
350 ret = wait_for_direct_io(type, inode, offset, iter,
351 each_count, 0);
352 gossip_debug(GOSSIP_FILE_DEBUG,
353 "%s(%pU): return from wait_for_io:%d\n",
354 __func__,
355 handle,
356 (int)ret);
357
358 if (ret < 0)
359 goto out;
360
361 *offset += ret;
362 total_count += ret;
363 amt_complete = ret;
364
365 gossip_debug(GOSSIP_FILE_DEBUG,
366 "%s(%pU): AFTER wait_for_io: offset is %d\n",
367 __func__,
368 handle,
369 (int)*offset);
370
371 /*
372 * if we got a short I/O operations,
373 * fall out and return what we got so far
374 */
375 if (amt_complete < each_count)
376 break;
377 } /*end while */
378
379out:
380 if (total_count > 0)
381 ret = total_count;
382 if (ret > 0) {
383 if (type == ORANGEFS_IO_READ) {
384 file_accessed(file);
385 } else {
386 SetMtimeFlag(orangefs_inode);
387 inode->i_mtime = current_time(inode);
388 mark_inode_dirty_sync(inode);
389 }
390 }
391
392 gossip_debug(GOSSIP_FILE_DEBUG,
393 "%s(%pU): Value(%d) returned.\n",
394 __func__,
395 handle,
396 (int)ret);
397
398 return ret;
399}
400
401/*
402 * Read data from a specified offset in a file (referenced by inode).
403 * Data may be placed either in a user or kernel buffer.
404 */
405ssize_t orangefs_inode_read(struct inode *inode,
406 struct iov_iter *iter,
407 loff_t *offset,
408 loff_t readahead_size)
409{
410 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
411 size_t count = iov_iter_count(iter);
412 size_t bufmap_size;
413 ssize_t ret = -EINVAL;
414
415 orangefs_stats.reads++;
416
417 bufmap_size = orangefs_bufmap_size_query();
418 if (count > bufmap_size) {
419 gossip_debug(GOSSIP_FILE_DEBUG,
420 "%s: count is too large (%zd/%zd)!\n",
421 __func__, count, bufmap_size);
422 return -EINVAL;
423 }
424
425 gossip_debug(GOSSIP_FILE_DEBUG,
426 "%s(%pU) %zd@%llu\n",
427 __func__,
428 &orangefs_inode->refn.khandle,
429 count,
430 llu(*offset));
431
432 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
433 count, readahead_size);
434 if (ret > 0)
435 *offset += ret;
436
437 gossip_debug(GOSSIP_FILE_DEBUG,
438 "%s(%pU): Value(%zd) returned.\n",
439 __func__,
440 &orangefs_inode->refn.khandle,
441 ret);
442
443 return ret;
444}
445
446static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
447{
448 struct file *file = iocb->ki_filp;
449 loff_t pos = iocb->ki_pos;
450 ssize_t rc = 0;
451
452 BUG_ON(iocb->private);
453
454 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
455
456 orangefs_stats.reads++;
457
458 rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
459 iocb->ki_pos = pos;
460
461 return rc;
462}
463
464static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
465{
466 struct file *file = iocb->ki_filp;
467 loff_t pos;
468 ssize_t rc;
469
470 BUG_ON(iocb->private);
471
472 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
473
474 inode_lock(file->f_mapping->host);
475
476 /* Make sure generic_write_checks sees an up to date inode size. */
477 if (file->f_flags & O_APPEND) {
478 rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
479 STATX_SIZE);
480 if (rc == -ESTALE)
481 rc = -EIO;
482 if (rc) {
483 gossip_err("%s: orangefs_inode_getattr failed, "
484 "rc:%zd:.\n", __func__, rc);
485 goto out;
486 }
487 }
488
489 rc = generic_write_checks(iocb, iter);
490
491 if (rc <= 0) {
492 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
493 __func__, rc);
494 goto out;
495 }
496
497 /*
498 * if we are appending, generic_write_checks would have updated
499 * pos to the end of the file, so we will wait till now to set
500 * pos...
501 */
502 pos = iocb->ki_pos;
503
504 rc = do_readv_writev(ORANGEFS_IO_WRITE,
505 file,
506 &pos,
507 iter);
508 if (rc < 0) {
509 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
510 __func__, rc);
511 goto out;
512 }
513
514 iocb->ki_pos = pos;
515 orangefs_stats.writes++;
516
517out:
518
519 inode_unlock(file->f_mapping->host);
520 return rc;
521}
522
523/*
524 * Perform a miscellaneous operation on a file.
525 */
526static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
527{
528 int ret = -ENOTTY;
529 __u64 val = 0;
530 unsigned long uval;
531
532 gossip_debug(GOSSIP_FILE_DEBUG,
533 "orangefs_ioctl: called with cmd %d\n",
534 cmd);
535
536 /*
537 * we understand some general ioctls on files, such as the immutable
538 * and append flags
539 */
540 if (cmd == FS_IOC_GETFLAGS) {
541 val = 0;
542 ret = orangefs_inode_getxattr(file_inode(file),
543 "user.pvfs2.meta_hint",
544 &val, sizeof(val));
545 if (ret < 0 && ret != -ENODATA)
546 return ret;
547 else if (ret == -ENODATA)
548 val = 0;
549 uval = val;
550 gossip_debug(GOSSIP_FILE_DEBUG,
551 "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
552 (unsigned long long)uval);
553 return put_user(uval, (int __user *)arg);
554 } else if (cmd == FS_IOC_SETFLAGS) {
555 ret = 0;
556 if (get_user(uval, (int __user *)arg))
557 return -EFAULT;
558 /*
559 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
560 * is turned on for a file. The user is not allowed to turn
561 * on this bit, but the bit is present if the user first gets
562 * the flags and then updates the flags with some new
563 * settings. So, we ignore it in the following edit. bligon.
564 */
565 if ((uval & ~ORANGEFS_MIRROR_FL) &
566 (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
567 gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
568 return -EINVAL;
569 }
570 val = uval;
571 gossip_debug(GOSSIP_FILE_DEBUG,
572 "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
573 (unsigned long long)val);
574 ret = orangefs_inode_setxattr(file_inode(file),
575 "user.pvfs2.meta_hint",
576 &val, sizeof(val), 0);
577 }
578
579 return ret;
580}
581
582/*
583 * Memory map a region of a file.
584 */
585static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
586{
587 gossip_debug(GOSSIP_FILE_DEBUG,
588 "orangefs_file_mmap: called on %s\n",
589 (file ?
590 (char *)file->f_path.dentry->d_name.name :
591 (char *)"Unknown"));
592
593 /* set the sequential readahead hint */
594 vma->vm_flags |= VM_SEQ_READ;
595 vma->vm_flags &= ~VM_RAND_READ;
596
597 /* Use readonly mmap since we cannot support writable maps. */
598 return generic_file_readonly_mmap(file, vma);
599}
600
601#define mapping_nrpages(idata) ((idata)->nrpages)
602
603/*
604 * Called to notify the module that there are no more references to
605 * this file (i.e. no processes have it open).
606 *
607 * \note Not called when each file is closed.
608 */
609static int orangefs_file_release(struct inode *inode, struct file *file)
610{
611 gossip_debug(GOSSIP_FILE_DEBUG,
612 "orangefs_file_release: called on %pD\n",
613 file);
614
615 orangefs_flush_inode(inode);
616
617 /*
618 * remove all associated inode pages from the page cache and
619 * readahead cache (if any); this forces an expensive refresh of
620 * data for the next caller of mmap (or 'get_block' accesses)
621 */
622 if (file_inode(file) &&
623 file_inode(file)->i_mapping &&
624 mapping_nrpages(&file_inode(file)->i_data)) {
625 if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
626 gossip_debug(GOSSIP_INODE_DEBUG,
627 "calling flush_racache on %pU\n",
628 get_khandle_from_ino(inode));
629 flush_racache(inode);
630 gossip_debug(GOSSIP_INODE_DEBUG,
631 "flush_racache finished\n");
632 }
633 truncate_inode_pages(file_inode(file)->i_mapping,
634 0);
635 }
636 return 0;
637}
638
639/*
640 * Push all data for a specific file onto permanent storage.
641 */
642static int orangefs_fsync(struct file *file,
643 loff_t start,
644 loff_t end,
645 int datasync)
646{
647 int ret;
648 struct orangefs_inode_s *orangefs_inode =
649 ORANGEFS_I(file_inode(file));
650 struct orangefs_kernel_op_s *new_op = NULL;
651
652 new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
653 if (!new_op)
654 return -ENOMEM;
655 new_op->upcall.req.fsync.refn = orangefs_inode->refn;
656
657 ret = service_operation(new_op,
658 "orangefs_fsync",
659 get_interruptible_flag(file_inode(file)));
660
661 gossip_debug(GOSSIP_FILE_DEBUG,
662 "orangefs_fsync got return value of %d\n",
663 ret);
664
665 op_release(new_op);
666
667 orangefs_flush_inode(file_inode(file));
668 return ret;
669}
670
671/*
672 * Change the file pointer position for an instance of an open file.
673 *
674 * \note If .llseek is overriden, we must acquire lock as described in
675 * Documentation/filesystems/Locking.
676 *
677 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
678 * require much changes to the FS
679 */
680static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
681{
682 int ret = -EINVAL;
683 struct inode *inode = file_inode(file);
684
685 if (origin == SEEK_END) {
686 /*
687 * revalidate the inode's file size.
688 * NOTE: We are only interested in file size here,
689 * so we set mask accordingly.
690 */
691 ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
692 STATX_SIZE);
693 if (ret == -ESTALE)
694 ret = -EIO;
695 if (ret) {
696 gossip_debug(GOSSIP_FILE_DEBUG,
697 "%s:%s:%d calling make bad inode\n",
698 __FILE__,
699 __func__,
700 __LINE__);
701 return ret;
702 }
703 }
704
705 gossip_debug(GOSSIP_FILE_DEBUG,
706 "orangefs_file_llseek: offset is %ld | origin is %d"
707 " | inode size is %lu\n",
708 (long)offset,
709 origin,
710 (unsigned long)i_size_read(inode));
711
712 return generic_file_llseek(file, offset, origin);
713}
714
715/*
716 * Support local locks (locks that only this kernel knows about)
717 * if Orangefs was mounted -o local_lock.
718 */
719static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
720{
721 int rc = -EINVAL;
722
723 if (ORANGEFS_SB(file_inode(filp)->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
724 if (cmd == F_GETLK) {
725 rc = 0;
726 posix_test_lock(filp, fl);
727 } else {
728 rc = posix_lock_file(filp, fl, NULL);
729 }
730 }
731
732 return rc;
733}
734
735/** ORANGEFS implementation of VFS file operations */
736const struct file_operations orangefs_file_operations = {
737 .llseek = orangefs_file_llseek,
738 .read_iter = orangefs_file_read_iter,
739 .write_iter = orangefs_file_write_iter,
740 .lock = orangefs_lock,
741 .unlocked_ioctl = orangefs_ioctl,
742 .mmap = orangefs_file_mmap,
743 .open = generic_file_open,
744 .release = orangefs_file_release,
745 .fsync = orangefs_fsync,
746};