blob: 6d39143cfa09460c9a3ca18c1036dc022225d7d7 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/sched/signal.h>
15#include <linux/uio.h>
16#include <linux/miscdevice.h>
17#include <linux/pagemap.h>
18#include <linux/file.h>
19#include <linux/slab.h>
20#include <linux/pipe_fs_i.h>
21#include <linux/swap.h>
22#include <linux/splice.h>
23#include <linux/sched.h>
24
25MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26MODULE_ALIAS("devname:fuse");
27
28static struct kmem_cache *fuse_req_cachep;
29
30static struct fuse_dev *fuse_get_dev(struct file *file)
31{
32 /*
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
35 */
36 return READ_ONCE(file->private_data);
37}
38
39static void fuse_request_init(struct fuse_req *req, struct page **pages,
40 struct fuse_page_desc *page_descs,
41 unsigned npages)
42{
43 memset(req, 0, sizeof(*req));
44 memset(pages, 0, sizeof(*pages) * npages);
45 memset(page_descs, 0, sizeof(*page_descs) * npages);
46 INIT_LIST_HEAD(&req->list);
47 INIT_LIST_HEAD(&req->intr_entry);
48 init_waitqueue_head(&req->waitq);
49 refcount_set(&req->count, 1);
50 req->pages = pages;
51 req->page_descs = page_descs;
52 req->max_pages = npages;
53 __set_bit(FR_PENDING, &req->flags);
54}
55
56static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57{
58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 if (req) {
60 struct page **pages;
61 struct fuse_page_desc *page_descs;
62
63 if (npages <= FUSE_REQ_INLINE_PAGES) {
64 pages = req->inline_pages;
65 page_descs = req->inline_page_descs;
66 } else {
67 pages = kmalloc_array(npages, sizeof(struct page *),
68 flags);
69 page_descs =
70 kmalloc_array(npages,
71 sizeof(struct fuse_page_desc),
72 flags);
73 }
74
75 if (!pages || !page_descs) {
76 kfree(pages);
77 kfree(page_descs);
78 kmem_cache_free(fuse_req_cachep, req);
79 return NULL;
80 }
81
82 fuse_request_init(req, pages, page_descs, npages);
83 }
84 return req;
85}
86
87struct fuse_req *fuse_request_alloc(unsigned npages)
88{
89 return __fuse_request_alloc(npages, GFP_KERNEL);
90}
91EXPORT_SYMBOL_GPL(fuse_request_alloc);
92
93struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
94{
95 return __fuse_request_alloc(npages, GFP_NOFS);
96}
97
98void fuse_request_free(struct fuse_req *req)
99{
100 if (req->pages != req->inline_pages) {
101 kfree(req->pages);
102 kfree(req->page_descs);
103 }
104 kmem_cache_free(fuse_req_cachep, req);
105}
106
107void __fuse_get_request(struct fuse_req *req)
108{
109 refcount_inc(&req->count);
110}
111
112/* Must be called with > 1 refcount */
113static void __fuse_put_request(struct fuse_req *req)
114{
115 refcount_dec(&req->count);
116}
117
118void fuse_set_initialized(struct fuse_conn *fc)
119{
120 /* Make sure stores before this are seen on another CPU */
121 smp_wmb();
122 fc->initialized = 1;
123}
124
125static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
126{
127 return !fc->initialized || (for_background && fc->blocked);
128}
129
130static void fuse_drop_waiting(struct fuse_conn *fc)
131{
132 /*
133 * lockess check of fc->connected is okay, because atomic_dec_and_test()
134 * provides a memory barrier mached with the one in fuse_wait_aborted()
135 * to ensure no wake-up is missed.
136 */
137 if (atomic_dec_and_test(&fc->num_waiting) &&
138 !READ_ONCE(fc->connected)) {
139 /* wake up aborters */
140 wake_up_all(&fc->blocked_waitq);
141 }
142}
143
144static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
145 bool for_background)
146{
147 struct fuse_req *req;
148 int err;
149 atomic_inc(&fc->num_waiting);
150
151 if (fuse_block_alloc(fc, for_background)) {
152 err = -EINTR;
153 if (wait_event_killable_exclusive(fc->blocked_waitq,
154 !fuse_block_alloc(fc, for_background)))
155 goto out;
156 }
157 /* Matches smp_wmb() in fuse_set_initialized() */
158 smp_rmb();
159
160 err = -ENOTCONN;
161 if (!fc->connected)
162 goto out;
163
164 err = -ECONNREFUSED;
165 if (fc->conn_error)
166 goto out;
167
168 req = fuse_request_alloc(npages);
169 err = -ENOMEM;
170 if (!req) {
171 if (for_background)
172 wake_up(&fc->blocked_waitq);
173 goto out;
174 }
175
176 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
177 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
178 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
179
180 __set_bit(FR_WAITING, &req->flags);
181 if (for_background)
182 __set_bit(FR_BACKGROUND, &req->flags);
183
184 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
185 req->in.h.gid == ((gid_t)-1))) {
186 fuse_put_request(fc, req);
187 return ERR_PTR(-EOVERFLOW);
188 }
189 return req;
190
191 out:
192 fuse_drop_waiting(fc);
193 return ERR_PTR(err);
194}
195
196struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197{
198 return __fuse_get_req(fc, npages, false);
199}
200EXPORT_SYMBOL_GPL(fuse_get_req);
201
202struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203 unsigned npages)
204{
205 return __fuse_get_req(fc, npages, true);
206}
207EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
209/*
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
212 * available.
213 */
214static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215 struct file *file)
216{
217 struct fuse_req *req = NULL;
218 struct fuse_file *ff = file->private_data;
219
220 do {
221 wait_event(fc->reserved_req_waitq, ff->reserved_req);
222 spin_lock(&fc->lock);
223 if (ff->reserved_req) {
224 req = ff->reserved_req;
225 ff->reserved_req = NULL;
226 req->stolen_file = get_file(file);
227 }
228 spin_unlock(&fc->lock);
229 } while (!req);
230
231 return req;
232}
233
234/*
235 * Put stolen request back into fuse_file->reserved_req
236 */
237static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238{
239 struct file *file = req->stolen_file;
240 struct fuse_file *ff = file->private_data;
241
242 spin_lock(&fc->lock);
243 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
244 BUG_ON(ff->reserved_req);
245 ff->reserved_req = req;
246 wake_up_all(&fc->reserved_req_waitq);
247 spin_unlock(&fc->lock);
248 fput(file);
249}
250
251/*
252 * Gets a requests for a file operation, always succeeds
253 *
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
256 *
257 * If allocation fails due to OOM, use the reserved request in
258 * fuse_file.
259 *
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
263 */
264struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265 struct file *file)
266{
267 struct fuse_req *req;
268
269 atomic_inc(&fc->num_waiting);
270 wait_event(fc->blocked_waitq, fc->initialized);
271 /* Matches smp_wmb() in fuse_set_initialized() */
272 smp_rmb();
273 req = fuse_request_alloc(0);
274 if (!req)
275 req = get_reserved_req(fc, file);
276
277 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
278 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
279 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
280
281 __set_bit(FR_WAITING, &req->flags);
282 __clear_bit(FR_BACKGROUND, &req->flags);
283 return req;
284}
285
286void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
287{
288 if (refcount_dec_and_test(&req->count)) {
289 if (test_bit(FR_BACKGROUND, &req->flags)) {
290 /*
291 * We get here in the unlikely case that a background
292 * request was allocated but not sent
293 */
294 spin_lock(&fc->lock);
295 if (!fc->blocked)
296 wake_up(&fc->blocked_waitq);
297 spin_unlock(&fc->lock);
298 }
299
300 if (test_bit(FR_WAITING, &req->flags)) {
301 __clear_bit(FR_WAITING, &req->flags);
302 fuse_drop_waiting(fc);
303 }
304
305 if (req->stolen_file)
306 put_reserved_req(fc, req);
307 else
308 fuse_request_free(req);
309 }
310}
311EXPORT_SYMBOL_GPL(fuse_put_request);
312
313static unsigned len_args(unsigned numargs, struct fuse_arg *args)
314{
315 unsigned nbytes = 0;
316 unsigned i;
317
318 for (i = 0; i < numargs; i++)
319 nbytes += args[i].size;
320
321 return nbytes;
322}
323
324static u64 fuse_get_unique(struct fuse_iqueue *fiq)
325{
326 return ++fiq->reqctr;
327}
328
329static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
330{
331 req->in.h.len = sizeof(struct fuse_in_header) +
332 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
333 list_add_tail(&req->list, &fiq->pending);
334 wake_up(&fiq->waitq);
335 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
336}
337
338void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
339 u64 nodeid, u64 nlookup)
340{
341 struct fuse_iqueue *fiq = &fc->iq;
342
343 forget->forget_one.nodeid = nodeid;
344 forget->forget_one.nlookup = nlookup;
345
346 spin_lock(&fiq->lock);
347 if (fiq->connected) {
348 fiq->forget_list_tail->next = forget;
349 fiq->forget_list_tail = forget;
350 wake_up(&fiq->waitq);
351 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
352 } else {
353 kfree(forget);
354 }
355 spin_unlock(&fiq->lock);
356}
357
358static void flush_bg_queue(struct fuse_conn *fc)
359{
360 while (fc->active_background < fc->max_background &&
361 !list_empty(&fc->bg_queue)) {
362 struct fuse_req *req;
363 struct fuse_iqueue *fiq = &fc->iq;
364
365 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
366 list_del(&req->list);
367 fc->active_background++;
368 spin_lock(&fiq->lock);
369 req->in.h.unique = fuse_get_unique(fiq);
370 queue_request(fiq, req);
371 spin_unlock(&fiq->lock);
372 }
373}
374
375/*
376 * This function is called when a request is finished. Either a reply
377 * has arrived or it was aborted (and not yet sent) or some error
378 * occurred during communication with userspace, or the device file
379 * was closed. The requester thread is woken up (if still waiting),
380 * the 'end' callback is called if given, else the reference to the
381 * request is released
382 */
383static void request_end(struct fuse_conn *fc, struct fuse_req *req)
384{
385 struct fuse_iqueue *fiq = &fc->iq;
386
387 if (test_and_set_bit(FR_FINISHED, &req->flags))
388 goto put_request;
389
390 spin_lock(&fiq->lock);
391 list_del_init(&req->intr_entry);
392 spin_unlock(&fiq->lock);
393 WARN_ON(test_bit(FR_PENDING, &req->flags));
394 WARN_ON(test_bit(FR_SENT, &req->flags));
395 if (test_bit(FR_BACKGROUND, &req->flags)) {
396 spin_lock(&fc->lock);
397 clear_bit(FR_BACKGROUND, &req->flags);
398 if (fc->num_background == fc->max_background) {
399 fc->blocked = 0;
400 wake_up(&fc->blocked_waitq);
401 } else if (!fc->blocked) {
402 /*
403 * Wake up next waiter, if any. It's okay to use
404 * waitqueue_active(), as we've already synced up
405 * fc->blocked with waiters with the wake_up() call
406 * above.
407 */
408 if (waitqueue_active(&fc->blocked_waitq))
409 wake_up(&fc->blocked_waitq);
410 }
411
412 if (fc->num_background == fc->congestion_threshold && fc->sb) {
413 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
414 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
415 }
416 fc->num_background--;
417 fc->active_background--;
418 flush_bg_queue(fc);
419 spin_unlock(&fc->lock);
420 }
421 wake_up(&req->waitq);
422 if (req->end)
423 req->end(fc, req);
424put_request:
425 fuse_put_request(fc, req);
426}
427
428static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
429{
430 spin_lock(&fiq->lock);
431 if (test_bit(FR_FINISHED, &req->flags)) {
432 spin_unlock(&fiq->lock);
433 return;
434 }
435 if (list_empty(&req->intr_entry)) {
436 list_add_tail(&req->intr_entry, &fiq->interrupts);
437 wake_up(&fiq->waitq);
438 }
439 spin_unlock(&fiq->lock);
440 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
441}
442
443static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
444{
445 struct fuse_iqueue *fiq = &fc->iq;
446 int err;
447
448 if (!fc->no_interrupt) {
449 /* Any signal may interrupt this */
450 err = wait_event_interruptible(req->waitq,
451 test_bit(FR_FINISHED, &req->flags));
452 if (!err)
453 return;
454
455 set_bit(FR_INTERRUPTED, &req->flags);
456 /* matches barrier in fuse_dev_do_read() */
457 smp_mb__after_atomic();
458 if (test_bit(FR_SENT, &req->flags))
459 queue_interrupt(fiq, req);
460 }
461
462 if (!test_bit(FR_FORCE, &req->flags)) {
463 /* Only fatal signals may interrupt this */
464 err = wait_event_killable(req->waitq,
465 test_bit(FR_FINISHED, &req->flags));
466 if (!err)
467 return;
468
469 spin_lock(&fiq->lock);
470 /* Request is not yet in userspace, bail out */
471 if (test_bit(FR_PENDING, &req->flags)) {
472 list_del(&req->list);
473 spin_unlock(&fiq->lock);
474 __fuse_put_request(req);
475 req->out.h.error = -EINTR;
476 return;
477 }
478 spin_unlock(&fiq->lock);
479 }
480
481 /*
482 * Either request is already in userspace, or it was forced.
483 * Wait it out.
484 */
485 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
486}
487
488static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
489{
490 struct fuse_iqueue *fiq = &fc->iq;
491
492 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
493 spin_lock(&fiq->lock);
494 if (!fiq->connected) {
495 spin_unlock(&fiq->lock);
496 req->out.h.error = -ENOTCONN;
497 } else {
498 req->in.h.unique = fuse_get_unique(fiq);
499 queue_request(fiq, req);
500 /* acquire extra reference, since request is still needed
501 after request_end() */
502 __fuse_get_request(req);
503 spin_unlock(&fiq->lock);
504
505 request_wait_answer(fc, req);
506 /* Pairs with smp_wmb() in request_end() */
507 smp_rmb();
508 }
509}
510
511void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
512{
513 __set_bit(FR_ISREPLY, &req->flags);
514 if (!test_bit(FR_WAITING, &req->flags)) {
515 __set_bit(FR_WAITING, &req->flags);
516 atomic_inc(&fc->num_waiting);
517 }
518 __fuse_request_send(fc, req);
519}
520EXPORT_SYMBOL_GPL(fuse_request_send);
521
522static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
523{
524 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
525 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
526
527 if (fc->minor < 9) {
528 switch (args->in.h.opcode) {
529 case FUSE_LOOKUP:
530 case FUSE_CREATE:
531 case FUSE_MKNOD:
532 case FUSE_MKDIR:
533 case FUSE_SYMLINK:
534 case FUSE_LINK:
535 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
536 break;
537 case FUSE_GETATTR:
538 case FUSE_SETATTR:
539 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
540 break;
541 }
542 }
543 if (fc->minor < 12) {
544 switch (args->in.h.opcode) {
545 case FUSE_CREATE:
546 args->in.args[0].size = sizeof(struct fuse_open_in);
547 break;
548 case FUSE_MKNOD:
549 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
550 break;
551 }
552 }
553}
554
555ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
556{
557 struct fuse_req *req;
558 ssize_t ret;
559
560 req = fuse_get_req(fc, 0);
561 if (IS_ERR(req))
562 return PTR_ERR(req);
563
564 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
565 fuse_adjust_compat(fc, args);
566
567 req->in.h.opcode = args->in.h.opcode;
568 req->in.h.nodeid = args->in.h.nodeid;
569 req->in.numargs = args->in.numargs;
570 memcpy(req->in.args, args->in.args,
571 args->in.numargs * sizeof(struct fuse_in_arg));
572 req->out.argvar = args->out.argvar;
573 req->out.numargs = args->out.numargs;
574 memcpy(req->out.args, args->out.args,
575 args->out.numargs * sizeof(struct fuse_arg));
576 fuse_request_send(fc, req);
577 ret = req->out.h.error;
578 if (!ret && args->out.argvar) {
579 BUG_ON(args->out.numargs != 1);
580 ret = req->out.args[0].size;
581 }
582 fuse_put_request(fc, req);
583
584 return ret;
585}
586
587/*
588 * Called under fc->lock
589 *
590 * fc->connected must have been checked previously
591 */
592void fuse_request_send_background_locked(struct fuse_conn *fc,
593 struct fuse_req *req)
594{
595 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
596 if (!test_bit(FR_WAITING, &req->flags)) {
597 __set_bit(FR_WAITING, &req->flags);
598 atomic_inc(&fc->num_waiting);
599 }
600 __set_bit(FR_ISREPLY, &req->flags);
601 fc->num_background++;
602 if (fc->num_background == fc->max_background)
603 fc->blocked = 1;
604 if (fc->num_background == fc->congestion_threshold && fc->sb) {
605 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
606 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
607 }
608 list_add_tail(&req->list, &fc->bg_queue);
609 flush_bg_queue(fc);
610}
611
612void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
613{
614 BUG_ON(!req->end);
615 spin_lock(&fc->lock);
616 if (fc->connected) {
617 fuse_request_send_background_locked(fc, req);
618 spin_unlock(&fc->lock);
619 } else {
620 spin_unlock(&fc->lock);
621 req->out.h.error = -ENOTCONN;
622 req->end(fc, req);
623 fuse_put_request(fc, req);
624 }
625}
626EXPORT_SYMBOL_GPL(fuse_request_send_background);
627
628static int fuse_request_send_notify_reply(struct fuse_conn *fc,
629 struct fuse_req *req, u64 unique)
630{
631 int err = -ENODEV;
632 struct fuse_iqueue *fiq = &fc->iq;
633
634 __clear_bit(FR_ISREPLY, &req->flags);
635 req->in.h.unique = unique;
636 spin_lock(&fiq->lock);
637 if (fiq->connected) {
638 queue_request(fiq, req);
639 err = 0;
640 }
641 spin_unlock(&fiq->lock);
642
643 return err;
644}
645
646void fuse_force_forget(struct file *file, u64 nodeid)
647{
648 struct inode *inode = file_inode(file);
649 struct fuse_conn *fc = get_fuse_conn(inode);
650 struct fuse_req *req;
651 struct fuse_forget_in inarg;
652
653 memset(&inarg, 0, sizeof(inarg));
654 inarg.nlookup = 1;
655 req = fuse_get_req_nofail_nopages(fc, file);
656 req->in.h.opcode = FUSE_FORGET;
657 req->in.h.nodeid = nodeid;
658 req->in.numargs = 1;
659 req->in.args[0].size = sizeof(inarg);
660 req->in.args[0].value = &inarg;
661 __clear_bit(FR_ISREPLY, &req->flags);
662 __fuse_request_send(fc, req);
663 /* ignore errors */
664 fuse_put_request(fc, req);
665}
666
667/*
668 * Lock the request. Up to the next unlock_request() there mustn't be
669 * anything that could cause a page-fault. If the request was already
670 * aborted bail out.
671 */
672static int lock_request(struct fuse_req *req)
673{
674 int err = 0;
675 if (req) {
676 spin_lock(&req->waitq.lock);
677 if (test_bit(FR_ABORTED, &req->flags))
678 err = -ENOENT;
679 else
680 set_bit(FR_LOCKED, &req->flags);
681 spin_unlock(&req->waitq.lock);
682 }
683 return err;
684}
685
686/*
687 * Unlock request. If it was aborted while locked, caller is responsible
688 * for unlocking and ending the request.
689 */
690static int unlock_request(struct fuse_req *req)
691{
692 int err = 0;
693 if (req) {
694 spin_lock(&req->waitq.lock);
695 if (test_bit(FR_ABORTED, &req->flags))
696 err = -ENOENT;
697 else
698 clear_bit(FR_LOCKED, &req->flags);
699 spin_unlock(&req->waitq.lock);
700 }
701 return err;
702}
703
704struct fuse_copy_state {
705 int write;
706 struct fuse_req *req;
707 struct iov_iter *iter;
708 struct pipe_buffer *pipebufs;
709 struct pipe_buffer *currbuf;
710 struct pipe_inode_info *pipe;
711 unsigned long nr_segs;
712 struct page *pg;
713 unsigned len;
714 unsigned offset;
715 unsigned move_pages:1;
716};
717
718static void fuse_copy_init(struct fuse_copy_state *cs, int write,
719 struct iov_iter *iter)
720{
721 memset(cs, 0, sizeof(*cs));
722 cs->write = write;
723 cs->iter = iter;
724}
725
726/* Unmap and put previous page of userspace buffer */
727static void fuse_copy_finish(struct fuse_copy_state *cs)
728{
729 if (cs->currbuf) {
730 struct pipe_buffer *buf = cs->currbuf;
731
732 if (cs->write)
733 buf->len = PAGE_SIZE - cs->len;
734 cs->currbuf = NULL;
735 } else if (cs->pg) {
736 if (cs->write) {
737 flush_dcache_page(cs->pg);
738 set_page_dirty_lock(cs->pg);
739 }
740 put_page(cs->pg);
741 }
742 cs->pg = NULL;
743}
744
745/*
746 * Get another pagefull of userspace buffer, and map it to kernel
747 * address space, and lock request
748 */
749static int fuse_copy_fill(struct fuse_copy_state *cs)
750{
751 struct page *page;
752 int err;
753
754 err = unlock_request(cs->req);
755 if (err)
756 return err;
757
758 fuse_copy_finish(cs);
759 if (cs->pipebufs) {
760 struct pipe_buffer *buf = cs->pipebufs;
761
762 if (!cs->write) {
763 err = pipe_buf_confirm(cs->pipe, buf);
764 if (err)
765 return err;
766
767 BUG_ON(!cs->nr_segs);
768 cs->currbuf = buf;
769 cs->pg = buf->page;
770 cs->offset = buf->offset;
771 cs->len = buf->len;
772 cs->pipebufs++;
773 cs->nr_segs--;
774 } else {
775 if (cs->nr_segs == cs->pipe->buffers)
776 return -EIO;
777
778 page = alloc_page(GFP_HIGHUSER);
779 if (!page)
780 return -ENOMEM;
781
782 buf->page = page;
783 buf->offset = 0;
784 buf->len = 0;
785
786 cs->currbuf = buf;
787 cs->pg = page;
788 cs->offset = 0;
789 cs->len = PAGE_SIZE;
790 cs->pipebufs++;
791 cs->nr_segs++;
792 }
793 } else {
794 size_t off;
795 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
796 if (err < 0)
797 return err;
798 BUG_ON(!err);
799 cs->len = err;
800 cs->offset = off;
801 cs->pg = page;
802 iov_iter_advance(cs->iter, err);
803 }
804
805 return lock_request(cs->req);
806}
807
808/* Do as much copy to/from userspace buffer as we can */
809static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
810{
811 unsigned ncpy = min(*size, cs->len);
812 if (val) {
813 void *pgaddr = kmap_atomic(cs->pg);
814 void *buf = pgaddr + cs->offset;
815
816 if (cs->write)
817 memcpy(buf, *val, ncpy);
818 else
819 memcpy(*val, buf, ncpy);
820
821 kunmap_atomic(pgaddr);
822 *val += ncpy;
823 }
824 *size -= ncpy;
825 cs->len -= ncpy;
826 cs->offset += ncpy;
827 return ncpy;
828}
829
830static int fuse_check_page(struct page *page)
831{
832 if (page_mapcount(page) ||
833 page->mapping != NULL ||
834 page_count(page) != 1 ||
835 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
836 ~(1 << PG_locked |
837 1 << PG_referenced |
838 1 << PG_uptodate |
839 1 << PG_lru |
840 1 << PG_active |
841 1 << PG_reclaim))) {
842 printk(KERN_WARNING "fuse: trying to steal weird page\n");
843 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
844 return 1;
845 }
846 return 0;
847}
848
849static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
850{
851 int err;
852 struct page *oldpage = *pagep;
853 struct page *newpage;
854 struct pipe_buffer *buf = cs->pipebufs;
855
856 err = unlock_request(cs->req);
857 if (err)
858 return err;
859
860 fuse_copy_finish(cs);
861
862 err = pipe_buf_confirm(cs->pipe, buf);
863 if (err)
864 return err;
865
866 BUG_ON(!cs->nr_segs);
867 cs->currbuf = buf;
868 cs->len = buf->len;
869 cs->pipebufs++;
870 cs->nr_segs--;
871
872 if (cs->len != PAGE_SIZE)
873 goto out_fallback;
874
875 if (pipe_buf_steal(cs->pipe, buf) != 0)
876 goto out_fallback;
877
878 newpage = buf->page;
879
880 if (!PageUptodate(newpage))
881 SetPageUptodate(newpage);
882
883 ClearPageMappedToDisk(newpage);
884
885 if (fuse_check_page(newpage) != 0)
886 goto out_fallback_unlock;
887
888 /*
889 * This is a new and locked page, it shouldn't be mapped or
890 * have any special flags on it
891 */
892 if (WARN_ON(page_mapped(oldpage)))
893 goto out_fallback_unlock;
894 if (WARN_ON(page_has_private(oldpage)))
895 goto out_fallback_unlock;
896 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
897 goto out_fallback_unlock;
898 if (WARN_ON(PageMlocked(oldpage)))
899 goto out_fallback_unlock;
900
901 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
902 if (err) {
903 unlock_page(newpage);
904 return err;
905 }
906
907 get_page(newpage);
908
909 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
910 lru_cache_add_file(newpage);
911
912 err = 0;
913 spin_lock(&cs->req->waitq.lock);
914 if (test_bit(FR_ABORTED, &cs->req->flags))
915 err = -ENOENT;
916 else
917 *pagep = newpage;
918 spin_unlock(&cs->req->waitq.lock);
919
920 if (err) {
921 unlock_page(newpage);
922 put_page(newpage);
923 return err;
924 }
925
926 unlock_page(oldpage);
927 put_page(oldpage);
928 cs->len = 0;
929
930 return 0;
931
932out_fallback_unlock:
933 unlock_page(newpage);
934out_fallback:
935 cs->pg = buf->page;
936 cs->offset = buf->offset;
937
938 err = lock_request(cs->req);
939 if (err)
940 return err;
941
942 return 1;
943}
944
945static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
946 unsigned offset, unsigned count)
947{
948 struct pipe_buffer *buf;
949 int err;
950
951 if (cs->nr_segs == cs->pipe->buffers)
952 return -EIO;
953
954 err = unlock_request(cs->req);
955 if (err)
956 return err;
957
958 fuse_copy_finish(cs);
959
960 buf = cs->pipebufs;
961 get_page(page);
962 buf->page = page;
963 buf->offset = offset;
964 buf->len = count;
965
966 cs->pipebufs++;
967 cs->nr_segs++;
968 cs->len = 0;
969
970 return 0;
971}
972
973/*
974 * Copy a page in the request to/from the userspace buffer. Must be
975 * done atomically
976 */
977static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
978 unsigned offset, unsigned count, int zeroing)
979{
980 int err;
981 struct page *page = *pagep;
982
983 if (page && zeroing && count < PAGE_SIZE)
984 clear_highpage(page);
985
986 while (count) {
987 if (cs->write && cs->pipebufs && page) {
988 return fuse_ref_page(cs, page, offset, count);
989 } else if (!cs->len) {
990 if (cs->move_pages && page &&
991 offset == 0 && count == PAGE_SIZE) {
992 err = fuse_try_move_page(cs, pagep);
993 if (err <= 0)
994 return err;
995 } else {
996 err = fuse_copy_fill(cs);
997 if (err)
998 return err;
999 }
1000 }
1001 if (page) {
1002 void *mapaddr = kmap_atomic(page);
1003 void *buf = mapaddr + offset;
1004 offset += fuse_copy_do(cs, &buf, &count);
1005 kunmap_atomic(mapaddr);
1006 } else
1007 offset += fuse_copy_do(cs, NULL, &count);
1008 }
1009 if (page && !cs->write)
1010 flush_dcache_page(page);
1011 return 0;
1012}
1013
1014/* Copy pages in the request to/from userspace buffer */
1015static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1016 int zeroing)
1017{
1018 unsigned i;
1019 struct fuse_req *req = cs->req;
1020
1021 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1022 int err;
1023 unsigned offset = req->page_descs[i].offset;
1024 unsigned count = min(nbytes, req->page_descs[i].length);
1025
1026 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1027 zeroing);
1028 if (err)
1029 return err;
1030
1031 nbytes -= count;
1032 }
1033 return 0;
1034}
1035
1036/* Copy a single argument in the request to/from userspace buffer */
1037static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1038{
1039 while (size) {
1040 if (!cs->len) {
1041 int err = fuse_copy_fill(cs);
1042 if (err)
1043 return err;
1044 }
1045 fuse_copy_do(cs, &val, &size);
1046 }
1047 return 0;
1048}
1049
1050/* Copy request arguments to/from userspace buffer */
1051static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1052 unsigned argpages, struct fuse_arg *args,
1053 int zeroing)
1054{
1055 int err = 0;
1056 unsigned i;
1057
1058 for (i = 0; !err && i < numargs; i++) {
1059 struct fuse_arg *arg = &args[i];
1060 if (i == numargs - 1 && argpages)
1061 err = fuse_copy_pages(cs, arg->size, zeroing);
1062 else
1063 err = fuse_copy_one(cs, arg->value, arg->size);
1064 }
1065 return err;
1066}
1067
1068static int forget_pending(struct fuse_iqueue *fiq)
1069{
1070 return fiq->forget_list_head.next != NULL;
1071}
1072
1073static int request_pending(struct fuse_iqueue *fiq)
1074{
1075 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1076 forget_pending(fiq);
1077}
1078
1079/*
1080 * Transfer an interrupt request to userspace
1081 *
1082 * Unlike other requests this is assembled on demand, without a need
1083 * to allocate a separate fuse_req structure.
1084 *
1085 * Called with fiq->lock held, releases it
1086 */
1087static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1088 struct fuse_copy_state *cs,
1089 size_t nbytes, struct fuse_req *req)
1090__releases(fiq->lock)
1091{
1092 struct fuse_in_header ih;
1093 struct fuse_interrupt_in arg;
1094 unsigned reqsize = sizeof(ih) + sizeof(arg);
1095 int err;
1096
1097 list_del_init(&req->intr_entry);
1098 req->intr_unique = fuse_get_unique(fiq);
1099 memset(&ih, 0, sizeof(ih));
1100 memset(&arg, 0, sizeof(arg));
1101 ih.len = reqsize;
1102 ih.opcode = FUSE_INTERRUPT;
1103 ih.unique = req->intr_unique;
1104 arg.unique = req->in.h.unique;
1105
1106 spin_unlock(&fiq->lock);
1107 if (nbytes < reqsize)
1108 return -EINVAL;
1109
1110 err = fuse_copy_one(cs, &ih, sizeof(ih));
1111 if (!err)
1112 err = fuse_copy_one(cs, &arg, sizeof(arg));
1113 fuse_copy_finish(cs);
1114
1115 return err ? err : reqsize;
1116}
1117
1118static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1119 unsigned max,
1120 unsigned *countp)
1121{
1122 struct fuse_forget_link *head = fiq->forget_list_head.next;
1123 struct fuse_forget_link **newhead = &head;
1124 unsigned count;
1125
1126 for (count = 0; *newhead != NULL && count < max; count++)
1127 newhead = &(*newhead)->next;
1128
1129 fiq->forget_list_head.next = *newhead;
1130 *newhead = NULL;
1131 if (fiq->forget_list_head.next == NULL)
1132 fiq->forget_list_tail = &fiq->forget_list_head;
1133
1134 if (countp != NULL)
1135 *countp = count;
1136
1137 return head;
1138}
1139
1140static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1141 struct fuse_copy_state *cs,
1142 size_t nbytes)
1143__releases(fiq->lock)
1144{
1145 int err;
1146 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1147 struct fuse_forget_in arg = {
1148 .nlookup = forget->forget_one.nlookup,
1149 };
1150 struct fuse_in_header ih = {
1151 .opcode = FUSE_FORGET,
1152 .nodeid = forget->forget_one.nodeid,
1153 .unique = fuse_get_unique(fiq),
1154 .len = sizeof(ih) + sizeof(arg),
1155 };
1156
1157 spin_unlock(&fiq->lock);
1158 kfree(forget);
1159 if (nbytes < ih.len)
1160 return -EINVAL;
1161
1162 err = fuse_copy_one(cs, &ih, sizeof(ih));
1163 if (!err)
1164 err = fuse_copy_one(cs, &arg, sizeof(arg));
1165 fuse_copy_finish(cs);
1166
1167 if (err)
1168 return err;
1169
1170 return ih.len;
1171}
1172
1173static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1174 struct fuse_copy_state *cs, size_t nbytes)
1175__releases(fiq->lock)
1176{
1177 int err;
1178 unsigned max_forgets;
1179 unsigned count;
1180 struct fuse_forget_link *head;
1181 struct fuse_batch_forget_in arg = { .count = 0 };
1182 struct fuse_in_header ih = {
1183 .opcode = FUSE_BATCH_FORGET,
1184 .unique = fuse_get_unique(fiq),
1185 .len = sizeof(ih) + sizeof(arg),
1186 };
1187
1188 if (nbytes < ih.len) {
1189 spin_unlock(&fiq->lock);
1190 return -EINVAL;
1191 }
1192
1193 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1194 head = dequeue_forget(fiq, max_forgets, &count);
1195 spin_unlock(&fiq->lock);
1196
1197 arg.count = count;
1198 ih.len += count * sizeof(struct fuse_forget_one);
1199 err = fuse_copy_one(cs, &ih, sizeof(ih));
1200 if (!err)
1201 err = fuse_copy_one(cs, &arg, sizeof(arg));
1202
1203 while (head) {
1204 struct fuse_forget_link *forget = head;
1205
1206 if (!err) {
1207 err = fuse_copy_one(cs, &forget->forget_one,
1208 sizeof(forget->forget_one));
1209 }
1210 head = forget->next;
1211 kfree(forget);
1212 }
1213
1214 fuse_copy_finish(cs);
1215
1216 if (err)
1217 return err;
1218
1219 return ih.len;
1220}
1221
1222static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1223 struct fuse_copy_state *cs,
1224 size_t nbytes)
1225__releases(fiq->lock)
1226{
1227 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1228 return fuse_read_single_forget(fiq, cs, nbytes);
1229 else
1230 return fuse_read_batch_forget(fiq, cs, nbytes);
1231}
1232
1233/*
1234 * Read a single request into the userspace filesystem's buffer. This
1235 * function waits until a request is available, then removes it from
1236 * the pending list and copies request data to userspace buffer. If
1237 * no reply is needed (FORGET) or request has been aborted or there
1238 * was an error during the copying then it's finished by calling
1239 * request_end(). Otherwise add it to the processing list, and set
1240 * the 'sent' flag.
1241 */
1242static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1243 struct fuse_copy_state *cs, size_t nbytes)
1244{
1245 ssize_t err;
1246 struct fuse_conn *fc = fud->fc;
1247 struct fuse_iqueue *fiq = &fc->iq;
1248 struct fuse_pqueue *fpq = &fud->pq;
1249 struct fuse_req *req;
1250 struct fuse_in *in;
1251 unsigned reqsize;
1252
1253 restart:
1254 for (;;) {
1255 spin_lock(&fiq->lock);
1256 if (!fiq->connected || request_pending(fiq))
1257 break;
1258 spin_unlock(&fiq->lock);
1259
1260 if (file->f_flags & O_NONBLOCK)
1261 return -EAGAIN;
1262 err = wait_event_interruptible_exclusive(fiq->waitq,
1263 !fiq->connected || request_pending(fiq));
1264 if (err)
1265 return err;
1266 }
1267
1268 if (!fiq->connected) {
1269 err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
1270 goto err_unlock;
1271 }
1272
1273 if (!list_empty(&fiq->interrupts)) {
1274 req = list_entry(fiq->interrupts.next, struct fuse_req,
1275 intr_entry);
1276 return fuse_read_interrupt(fiq, cs, nbytes, req);
1277 }
1278
1279 if (forget_pending(fiq)) {
1280 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1281 return fuse_read_forget(fc, fiq, cs, nbytes);
1282
1283 if (fiq->forget_batch <= -8)
1284 fiq->forget_batch = 16;
1285 }
1286
1287 req = list_entry(fiq->pending.next, struct fuse_req, list);
1288 clear_bit(FR_PENDING, &req->flags);
1289 list_del_init(&req->list);
1290 spin_unlock(&fiq->lock);
1291
1292 in = &req->in;
1293 reqsize = in->h.len;
1294
1295 /* If request is too large, reply with an error and restart the read */
1296 if (nbytes < reqsize) {
1297 req->out.h.error = -EIO;
1298 /* SETXATTR is special, since it may contain too large data */
1299 if (in->h.opcode == FUSE_SETXATTR)
1300 req->out.h.error = -E2BIG;
1301 request_end(fc, req);
1302 goto restart;
1303 }
1304 spin_lock(&fpq->lock);
1305 list_add(&req->list, &fpq->io);
1306 spin_unlock(&fpq->lock);
1307 cs->req = req;
1308 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1309 if (!err)
1310 err = fuse_copy_args(cs, in->numargs, in->argpages,
1311 (struct fuse_arg *) in->args, 0);
1312 fuse_copy_finish(cs);
1313 spin_lock(&fpq->lock);
1314 clear_bit(FR_LOCKED, &req->flags);
1315 if (!fpq->connected) {
1316 err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
1317 goto out_end;
1318 }
1319 if (err) {
1320 req->out.h.error = -EIO;
1321 goto out_end;
1322 }
1323 if (!test_bit(FR_ISREPLY, &req->flags)) {
1324 err = reqsize;
1325 goto out_end;
1326 }
1327 list_move_tail(&req->list, &fpq->processing);
1328 __fuse_get_request(req);
1329 set_bit(FR_SENT, &req->flags);
1330 spin_unlock(&fpq->lock);
1331 /* matches barrier in request_wait_answer() */
1332 smp_mb__after_atomic();
1333 if (test_bit(FR_INTERRUPTED, &req->flags))
1334 queue_interrupt(fiq, req);
1335 fuse_put_request(fc, req);
1336
1337 return reqsize;
1338
1339out_end:
1340 if (!test_bit(FR_PRIVATE, &req->flags))
1341 list_del_init(&req->list);
1342 spin_unlock(&fpq->lock);
1343 request_end(fc, req);
1344 return err;
1345
1346 err_unlock:
1347 spin_unlock(&fiq->lock);
1348 return err;
1349}
1350
1351static int fuse_dev_open(struct inode *inode, struct file *file)
1352{
1353 /*
1354 * The fuse device's file's private_data is used to hold
1355 * the fuse_conn(ection) when it is mounted, and is used to
1356 * keep track of whether the file has been mounted already.
1357 */
1358 file->private_data = NULL;
1359 return 0;
1360}
1361
1362static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1363{
1364 struct fuse_copy_state cs;
1365 struct file *file = iocb->ki_filp;
1366 struct fuse_dev *fud = fuse_get_dev(file);
1367
1368 if (!fud)
1369 return -EPERM;
1370
1371 if (!iter_is_iovec(to))
1372 return -EINVAL;
1373
1374 fuse_copy_init(&cs, 1, to);
1375
1376 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1377}
1378
1379static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1380 struct pipe_inode_info *pipe,
1381 size_t len, unsigned int flags)
1382{
1383 int total, ret;
1384 int page_nr = 0;
1385 struct pipe_buffer *bufs;
1386 struct fuse_copy_state cs;
1387 struct fuse_dev *fud = fuse_get_dev(in);
1388
1389 if (!fud)
1390 return -EPERM;
1391
1392 bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
1393 GFP_KERNEL);
1394 if (!bufs)
1395 return -ENOMEM;
1396
1397 fuse_copy_init(&cs, 1, NULL);
1398 cs.pipebufs = bufs;
1399 cs.pipe = pipe;
1400 ret = fuse_dev_do_read(fud, in, &cs, len);
1401 if (ret < 0)
1402 goto out;
1403
1404 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1405 ret = -EIO;
1406 goto out;
1407 }
1408
1409 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1410 /*
1411 * Need to be careful about this. Having buf->ops in module
1412 * code can Oops if the buffer persists after module unload.
1413 */
1414 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1415 bufs[page_nr].flags = 0;
1416 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1417 if (unlikely(ret < 0))
1418 break;
1419 }
1420 if (total)
1421 ret = total;
1422out:
1423 for (; page_nr < cs.nr_segs; page_nr++)
1424 put_page(bufs[page_nr].page);
1425
1426 kvfree(bufs);
1427 return ret;
1428}
1429
1430static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1431 struct fuse_copy_state *cs)
1432{
1433 struct fuse_notify_poll_wakeup_out outarg;
1434 int err = -EINVAL;
1435
1436 if (size != sizeof(outarg))
1437 goto err;
1438
1439 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1440 if (err)
1441 goto err;
1442
1443 fuse_copy_finish(cs);
1444 return fuse_notify_poll_wakeup(fc, &outarg);
1445
1446err:
1447 fuse_copy_finish(cs);
1448 return err;
1449}
1450
1451static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1452 struct fuse_copy_state *cs)
1453{
1454 struct fuse_notify_inval_inode_out outarg;
1455 int err = -EINVAL;
1456
1457 if (size != sizeof(outarg))
1458 goto err;
1459
1460 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1461 if (err)
1462 goto err;
1463 fuse_copy_finish(cs);
1464
1465 down_read(&fc->killsb);
1466 err = -ENOENT;
1467 if (fc->sb) {
1468 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1469 outarg.off, outarg.len);
1470 }
1471 up_read(&fc->killsb);
1472 return err;
1473
1474err:
1475 fuse_copy_finish(cs);
1476 return err;
1477}
1478
1479static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1480 struct fuse_copy_state *cs)
1481{
1482 struct fuse_notify_inval_entry_out outarg;
1483 int err = -ENOMEM;
1484 char *buf;
1485 struct qstr name;
1486
1487 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1488 if (!buf)
1489 goto err;
1490
1491 err = -EINVAL;
1492 if (size < sizeof(outarg))
1493 goto err;
1494
1495 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1496 if (err)
1497 goto err;
1498
1499 err = -ENAMETOOLONG;
1500 if (outarg.namelen > FUSE_NAME_MAX)
1501 goto err;
1502
1503 err = -EINVAL;
1504 if (size != sizeof(outarg) + outarg.namelen + 1)
1505 goto err;
1506
1507 name.name = buf;
1508 name.len = outarg.namelen;
1509 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1510 if (err)
1511 goto err;
1512 fuse_copy_finish(cs);
1513 buf[outarg.namelen] = 0;
1514
1515 down_read(&fc->killsb);
1516 err = -ENOENT;
1517 if (fc->sb)
1518 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1519 up_read(&fc->killsb);
1520 kfree(buf);
1521 return err;
1522
1523err:
1524 kfree(buf);
1525 fuse_copy_finish(cs);
1526 return err;
1527}
1528
1529static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1530 struct fuse_copy_state *cs)
1531{
1532 struct fuse_notify_delete_out outarg;
1533 int err = -ENOMEM;
1534 char *buf;
1535 struct qstr name;
1536
1537 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1538 if (!buf)
1539 goto err;
1540
1541 err = -EINVAL;
1542 if (size < sizeof(outarg))
1543 goto err;
1544
1545 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1546 if (err)
1547 goto err;
1548
1549 err = -ENAMETOOLONG;
1550 if (outarg.namelen > FUSE_NAME_MAX)
1551 goto err;
1552
1553 err = -EINVAL;
1554 if (size != sizeof(outarg) + outarg.namelen + 1)
1555 goto err;
1556
1557 name.name = buf;
1558 name.len = outarg.namelen;
1559 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1560 if (err)
1561 goto err;
1562 fuse_copy_finish(cs);
1563 buf[outarg.namelen] = 0;
1564
1565 down_read(&fc->killsb);
1566 err = -ENOENT;
1567 if (fc->sb)
1568 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1569 outarg.child, &name);
1570 up_read(&fc->killsb);
1571 kfree(buf);
1572 return err;
1573
1574err:
1575 kfree(buf);
1576 fuse_copy_finish(cs);
1577 return err;
1578}
1579
1580static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1581 struct fuse_copy_state *cs)
1582{
1583 struct fuse_notify_store_out outarg;
1584 struct inode *inode;
1585 struct address_space *mapping;
1586 u64 nodeid;
1587 int err;
1588 pgoff_t index;
1589 unsigned int offset;
1590 unsigned int num;
1591 loff_t file_size;
1592 loff_t end;
1593
1594 err = -EINVAL;
1595 if (size < sizeof(outarg))
1596 goto out_finish;
1597
1598 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1599 if (err)
1600 goto out_finish;
1601
1602 err = -EINVAL;
1603 if (size - sizeof(outarg) != outarg.size)
1604 goto out_finish;
1605
1606 nodeid = outarg.nodeid;
1607
1608 down_read(&fc->killsb);
1609
1610 err = -ENOENT;
1611 if (!fc->sb)
1612 goto out_up_killsb;
1613
1614 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1615 if (!inode)
1616 goto out_up_killsb;
1617
1618 mapping = inode->i_mapping;
1619 index = outarg.offset >> PAGE_SHIFT;
1620 offset = outarg.offset & ~PAGE_MASK;
1621 file_size = i_size_read(inode);
1622 end = outarg.offset + outarg.size;
1623 if (end > file_size) {
1624 file_size = end;
1625 fuse_write_update_size(inode, file_size);
1626 }
1627
1628 num = outarg.size;
1629 while (num) {
1630 struct page *page;
1631 unsigned int this_num;
1632
1633 err = -ENOMEM;
1634 page = find_or_create_page(mapping, index,
1635 mapping_gfp_mask(mapping));
1636 if (!page)
1637 goto out_iput;
1638
1639 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1640 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1641 if (!err && offset == 0 &&
1642 (this_num == PAGE_SIZE || file_size == end))
1643 SetPageUptodate(page);
1644 unlock_page(page);
1645 put_page(page);
1646
1647 if (err)
1648 goto out_iput;
1649
1650 num -= this_num;
1651 offset = 0;
1652 index++;
1653 }
1654
1655 err = 0;
1656
1657out_iput:
1658 iput(inode);
1659out_up_killsb:
1660 up_read(&fc->killsb);
1661out_finish:
1662 fuse_copy_finish(cs);
1663 return err;
1664}
1665
1666static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1667{
1668 release_pages(req->pages, req->num_pages);
1669}
1670
1671static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1672 struct fuse_notify_retrieve_out *outarg)
1673{
1674 int err;
1675 struct address_space *mapping = inode->i_mapping;
1676 struct fuse_req *req;
1677 pgoff_t index;
1678 loff_t file_size;
1679 unsigned int num;
1680 unsigned int offset;
1681 size_t total_len = 0;
1682 int num_pages;
1683
1684 offset = outarg->offset & ~PAGE_MASK;
1685 file_size = i_size_read(inode);
1686
1687 num = min(outarg->size, fc->max_write);
1688 if (outarg->offset > file_size)
1689 num = 0;
1690 else if (outarg->offset + num > file_size)
1691 num = file_size - outarg->offset;
1692
1693 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1694 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1695
1696 req = fuse_get_req(fc, num_pages);
1697 if (IS_ERR(req))
1698 return PTR_ERR(req);
1699
1700 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1701 req->in.h.nodeid = outarg->nodeid;
1702 req->in.numargs = 2;
1703 req->in.argpages = 1;
1704 req->end = fuse_retrieve_end;
1705
1706 index = outarg->offset >> PAGE_SHIFT;
1707
1708 while (num && req->num_pages < num_pages) {
1709 struct page *page;
1710 unsigned int this_num;
1711
1712 page = find_get_page(mapping, index);
1713 if (!page)
1714 break;
1715
1716 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1717 req->pages[req->num_pages] = page;
1718 req->page_descs[req->num_pages].offset = offset;
1719 req->page_descs[req->num_pages].length = this_num;
1720 req->num_pages++;
1721
1722 offset = 0;
1723 num -= this_num;
1724 total_len += this_num;
1725 index++;
1726 }
1727 req->misc.retrieve_in.offset = outarg->offset;
1728 req->misc.retrieve_in.size = total_len;
1729 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1730 req->in.args[0].value = &req->misc.retrieve_in;
1731 req->in.args[1].size = total_len;
1732
1733 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1734 if (err) {
1735 fuse_retrieve_end(fc, req);
1736 fuse_put_request(fc, req);
1737 }
1738
1739 return err;
1740}
1741
1742static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1743 struct fuse_copy_state *cs)
1744{
1745 struct fuse_notify_retrieve_out outarg;
1746 struct inode *inode;
1747 int err;
1748
1749 err = -EINVAL;
1750 if (size != sizeof(outarg))
1751 goto copy_finish;
1752
1753 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1754 if (err)
1755 goto copy_finish;
1756
1757 fuse_copy_finish(cs);
1758
1759 down_read(&fc->killsb);
1760 err = -ENOENT;
1761 if (fc->sb) {
1762 u64 nodeid = outarg.nodeid;
1763
1764 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1765 if (inode) {
1766 err = fuse_retrieve(fc, inode, &outarg);
1767 iput(inode);
1768 }
1769 }
1770 up_read(&fc->killsb);
1771
1772 return err;
1773
1774copy_finish:
1775 fuse_copy_finish(cs);
1776 return err;
1777}
1778
1779static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1780 unsigned int size, struct fuse_copy_state *cs)
1781{
1782 /* Don't try to move pages (yet) */
1783 cs->move_pages = 0;
1784
1785 switch (code) {
1786 case FUSE_NOTIFY_POLL:
1787 return fuse_notify_poll(fc, size, cs);
1788
1789 case FUSE_NOTIFY_INVAL_INODE:
1790 return fuse_notify_inval_inode(fc, size, cs);
1791
1792 case FUSE_NOTIFY_INVAL_ENTRY:
1793 return fuse_notify_inval_entry(fc, size, cs);
1794
1795 case FUSE_NOTIFY_STORE:
1796 return fuse_notify_store(fc, size, cs);
1797
1798 case FUSE_NOTIFY_RETRIEVE:
1799 return fuse_notify_retrieve(fc, size, cs);
1800
1801 case FUSE_NOTIFY_DELETE:
1802 return fuse_notify_delete(fc, size, cs);
1803
1804 default:
1805 fuse_copy_finish(cs);
1806 return -EINVAL;
1807 }
1808}
1809
1810/* Look up request on processing list by unique ID */
1811static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1812{
1813 struct fuse_req *req;
1814
1815 list_for_each_entry(req, &fpq->processing, list) {
1816 if (req->in.h.unique == unique || req->intr_unique == unique)
1817 return req;
1818 }
1819 return NULL;
1820}
1821
1822static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1823 unsigned nbytes)
1824{
1825 unsigned reqsize = sizeof(struct fuse_out_header);
1826
1827 if (out->h.error)
1828 return nbytes != reqsize ? -EINVAL : 0;
1829
1830 reqsize += len_args(out->numargs, out->args);
1831
1832 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1833 return -EINVAL;
1834 else if (reqsize > nbytes) {
1835 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1836 unsigned diffsize = reqsize - nbytes;
1837 if (diffsize > lastarg->size)
1838 return -EINVAL;
1839 lastarg->size -= diffsize;
1840 }
1841 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1842 out->page_zeroing);
1843}
1844
1845/*
1846 * Write a single reply to a request. First the header is copied from
1847 * the write buffer. The request is then searched on the processing
1848 * list by the unique ID found in the header. If found, then remove
1849 * it from the list and copy the rest of the buffer to the request.
1850 * The request is finished by calling request_end()
1851 */
1852static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1853 struct fuse_copy_state *cs, size_t nbytes)
1854{
1855 int err;
1856 struct fuse_conn *fc = fud->fc;
1857 struct fuse_pqueue *fpq = &fud->pq;
1858 struct fuse_req *req;
1859 struct fuse_out_header oh;
1860
1861 if (nbytes < sizeof(struct fuse_out_header))
1862 return -EINVAL;
1863
1864 err = fuse_copy_one(cs, &oh, sizeof(oh));
1865 if (err)
1866 goto err_finish;
1867
1868 err = -EINVAL;
1869 if (oh.len != nbytes)
1870 goto err_finish;
1871
1872 /*
1873 * Zero oh.unique indicates unsolicited notification message
1874 * and error contains notification code.
1875 */
1876 if (!oh.unique) {
1877 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1878 return err ? err : nbytes;
1879 }
1880
1881 err = -EINVAL;
1882 if (oh.error <= -1000 || oh.error > 0)
1883 goto err_finish;
1884
1885 spin_lock(&fpq->lock);
1886 err = -ENOENT;
1887 if (!fpq->connected)
1888 goto err_unlock_pq;
1889
1890 req = request_find(fpq, oh.unique);
1891 if (!req)
1892 goto err_unlock_pq;
1893
1894 /* Is it an interrupt reply? */
1895 if (req->intr_unique == oh.unique) {
1896 __fuse_get_request(req);
1897 spin_unlock(&fpq->lock);
1898
1899 err = -EINVAL;
1900 if (nbytes != sizeof(struct fuse_out_header)) {
1901 fuse_put_request(fc, req);
1902 goto err_finish;
1903 }
1904
1905 if (oh.error == -ENOSYS)
1906 fc->no_interrupt = 1;
1907 else if (oh.error == -EAGAIN)
1908 queue_interrupt(&fc->iq, req);
1909 fuse_put_request(fc, req);
1910
1911 fuse_copy_finish(cs);
1912 return nbytes;
1913 }
1914
1915 clear_bit(FR_SENT, &req->flags);
1916 list_move(&req->list, &fpq->io);
1917 req->out.h = oh;
1918 set_bit(FR_LOCKED, &req->flags);
1919 spin_unlock(&fpq->lock);
1920 cs->req = req;
1921 if (!req->out.page_replace)
1922 cs->move_pages = 0;
1923
1924 err = copy_out_args(cs, &req->out, nbytes);
1925 fuse_copy_finish(cs);
1926
1927 spin_lock(&fpq->lock);
1928 clear_bit(FR_LOCKED, &req->flags);
1929 if (!fpq->connected)
1930 err = -ENOENT;
1931 else if (err)
1932 req->out.h.error = -EIO;
1933 if (!test_bit(FR_PRIVATE, &req->flags))
1934 list_del_init(&req->list);
1935 spin_unlock(&fpq->lock);
1936
1937 request_end(fc, req);
1938
1939 return err ? err : nbytes;
1940
1941 err_unlock_pq:
1942 spin_unlock(&fpq->lock);
1943 err_finish:
1944 fuse_copy_finish(cs);
1945 return err;
1946}
1947
1948static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1949{
1950 struct fuse_copy_state cs;
1951 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1952
1953 if (!fud)
1954 return -EPERM;
1955
1956 if (!iter_is_iovec(from))
1957 return -EINVAL;
1958
1959 fuse_copy_init(&cs, 0, from);
1960
1961 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1962}
1963
1964static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1965 struct file *out, loff_t *ppos,
1966 size_t len, unsigned int flags)
1967{
1968 unsigned nbuf;
1969 unsigned idx;
1970 struct pipe_buffer *bufs;
1971 struct fuse_copy_state cs;
1972 struct fuse_dev *fud;
1973 size_t rem;
1974 ssize_t ret;
1975
1976 fud = fuse_get_dev(out);
1977 if (!fud)
1978 return -EPERM;
1979
1980 pipe_lock(pipe);
1981
1982 bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
1983 GFP_KERNEL);
1984 if (!bufs) {
1985 pipe_unlock(pipe);
1986 return -ENOMEM;
1987 }
1988
1989 nbuf = 0;
1990 rem = 0;
1991 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1992 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1993
1994 ret = -EINVAL;
1995 if (rem < len)
1996 goto out_free;
1997
1998 rem = len;
1999 while (rem) {
2000 struct pipe_buffer *ibuf;
2001 struct pipe_buffer *obuf;
2002
2003 BUG_ON(nbuf >= pipe->buffers);
2004 BUG_ON(!pipe->nrbufs);
2005 ibuf = &pipe->bufs[pipe->curbuf];
2006 obuf = &bufs[nbuf];
2007
2008 if (rem >= ibuf->len) {
2009 *obuf = *ibuf;
2010 ibuf->ops = NULL;
2011 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2012 pipe->nrbufs--;
2013 } else {
2014 if (!pipe_buf_get(pipe, ibuf))
2015 goto out_free;
2016
2017 *obuf = *ibuf;
2018 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2019 obuf->len = rem;
2020 ibuf->offset += obuf->len;
2021 ibuf->len -= obuf->len;
2022 }
2023 nbuf++;
2024 rem -= obuf->len;
2025 }
2026 pipe_unlock(pipe);
2027
2028 fuse_copy_init(&cs, 0, NULL);
2029 cs.pipebufs = bufs;
2030 cs.nr_segs = nbuf;
2031 cs.pipe = pipe;
2032
2033 if (flags & SPLICE_F_MOVE)
2034 cs.move_pages = 1;
2035
2036 ret = fuse_dev_do_write(fud, &cs, len);
2037
2038 pipe_lock(pipe);
2039out_free:
2040 for (idx = 0; idx < nbuf; idx++)
2041 pipe_buf_release(pipe, &bufs[idx]);
2042 pipe_unlock(pipe);
2043
2044 kvfree(bufs);
2045 return ret;
2046}
2047
2048static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2049{
2050 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2051 struct fuse_iqueue *fiq;
2052 struct fuse_dev *fud = fuse_get_dev(file);
2053
2054 if (!fud)
2055 return EPOLLERR;
2056
2057 fiq = &fud->fc->iq;
2058 poll_wait(file, &fiq->waitq, wait);
2059
2060 spin_lock(&fiq->lock);
2061 if (!fiq->connected)
2062 mask = EPOLLERR;
2063 else if (request_pending(fiq))
2064 mask |= EPOLLIN | EPOLLRDNORM;
2065 spin_unlock(&fiq->lock);
2066
2067 return mask;
2068}
2069
2070/*
2071 * Abort all requests on the given list (pending or processing)
2072 *
2073 * This function releases and reacquires fc->lock
2074 */
2075static void end_requests(struct fuse_conn *fc, struct list_head *head)
2076{
2077 while (!list_empty(head)) {
2078 struct fuse_req *req;
2079 req = list_entry(head->next, struct fuse_req, list);
2080 req->out.h.error = -ECONNABORTED;
2081 clear_bit(FR_SENT, &req->flags);
2082 list_del_init(&req->list);
2083 request_end(fc, req);
2084 }
2085}
2086
2087static void end_polls(struct fuse_conn *fc)
2088{
2089 struct rb_node *p;
2090
2091 p = rb_first(&fc->polled_files);
2092
2093 while (p) {
2094 struct fuse_file *ff;
2095 ff = rb_entry(p, struct fuse_file, polled_node);
2096 wake_up_interruptible_all(&ff->poll_wait);
2097
2098 p = rb_next(p);
2099 }
2100}
2101
2102/*
2103 * Abort all requests.
2104 *
2105 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2106 * filesystem.
2107 *
2108 * The same effect is usually achievable through killing the filesystem daemon
2109 * and all users of the filesystem. The exception is the combination of an
2110 * asynchronous request and the tricky deadlock (see
2111 * Documentation/filesystems/fuse.txt).
2112 *
2113 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2114 * requests, they should be finished off immediately. Locked requests will be
2115 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2116 * requests. It is possible that some request will finish before we can. This
2117 * is OK, the request will in that case be removed from the list before we touch
2118 * it.
2119 */
2120void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
2121{
2122 struct fuse_iqueue *fiq = &fc->iq;
2123
2124 spin_lock(&fc->lock);
2125 if (fc->connected) {
2126 struct fuse_dev *fud;
2127 struct fuse_req *req, *next;
2128 LIST_HEAD(to_end);
2129
2130 fc->connected = 0;
2131 fc->blocked = 0;
2132 fc->aborted = is_abort;
2133 fuse_set_initialized(fc);
2134 list_for_each_entry(fud, &fc->devices, entry) {
2135 struct fuse_pqueue *fpq = &fud->pq;
2136
2137 spin_lock(&fpq->lock);
2138 fpq->connected = 0;
2139 list_for_each_entry_safe(req, next, &fpq->io, list) {
2140 req->out.h.error = -ECONNABORTED;
2141 spin_lock(&req->waitq.lock);
2142 set_bit(FR_ABORTED, &req->flags);
2143 if (!test_bit(FR_LOCKED, &req->flags)) {
2144 set_bit(FR_PRIVATE, &req->flags);
2145 __fuse_get_request(req);
2146 list_move(&req->list, &to_end);
2147 }
2148 spin_unlock(&req->waitq.lock);
2149 }
2150 list_splice_tail_init(&fpq->processing, &to_end);
2151 spin_unlock(&fpq->lock);
2152 }
2153 fc->max_background = UINT_MAX;
2154 flush_bg_queue(fc);
2155
2156 spin_lock(&fiq->lock);
2157 fiq->connected = 0;
2158 list_for_each_entry(req, &fiq->pending, list)
2159 clear_bit(FR_PENDING, &req->flags);
2160 list_splice_tail_init(&fiq->pending, &to_end);
2161 while (forget_pending(fiq))
2162 kfree(dequeue_forget(fiq, 1, NULL));
2163 wake_up_all(&fiq->waitq);
2164 spin_unlock(&fiq->lock);
2165 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2166 end_polls(fc);
2167 wake_up_all(&fc->blocked_waitq);
2168 spin_unlock(&fc->lock);
2169
2170 end_requests(fc, &to_end);
2171 } else {
2172 spin_unlock(&fc->lock);
2173 }
2174}
2175EXPORT_SYMBOL_GPL(fuse_abort_conn);
2176
2177void fuse_wait_aborted(struct fuse_conn *fc)
2178{
2179 /* matches implicit memory barrier in fuse_drop_waiting() */
2180 smp_mb();
2181 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2182}
2183
2184int fuse_dev_release(struct inode *inode, struct file *file)
2185{
2186 struct fuse_dev *fud = fuse_get_dev(file);
2187
2188 if (fud) {
2189 struct fuse_conn *fc = fud->fc;
2190 struct fuse_pqueue *fpq = &fud->pq;
2191 LIST_HEAD(to_end);
2192
2193 spin_lock(&fpq->lock);
2194 WARN_ON(!list_empty(&fpq->io));
2195 list_splice_init(&fpq->processing, &to_end);
2196 spin_unlock(&fpq->lock);
2197
2198 end_requests(fc, &to_end);
2199
2200 /* Are we the last open device? */
2201 if (atomic_dec_and_test(&fc->dev_count)) {
2202 WARN_ON(fc->iq.fasync != NULL);
2203 fuse_abort_conn(fc, false);
2204 }
2205 fuse_dev_free(fud);
2206 }
2207 return 0;
2208}
2209EXPORT_SYMBOL_GPL(fuse_dev_release);
2210
2211static int fuse_dev_fasync(int fd, struct file *file, int on)
2212{
2213 struct fuse_dev *fud = fuse_get_dev(file);
2214
2215 if (!fud)
2216 return -EPERM;
2217
2218 /* No locking - fasync_helper does its own locking */
2219 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2220}
2221
2222static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2223{
2224 struct fuse_dev *fud;
2225
2226 if (new->private_data)
2227 return -EINVAL;
2228
2229 fud = fuse_dev_alloc(fc);
2230 if (!fud)
2231 return -ENOMEM;
2232
2233 new->private_data = fud;
2234 atomic_inc(&fc->dev_count);
2235
2236 return 0;
2237}
2238
2239static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2240 unsigned long arg)
2241{
2242 int err = -ENOTTY;
2243
2244 if (cmd == FUSE_DEV_IOC_CLONE) {
2245 int oldfd;
2246
2247 err = -EFAULT;
2248 if (!get_user(oldfd, (__u32 __user *) arg)) {
2249 struct file *old = fget(oldfd);
2250
2251 err = -EINVAL;
2252 if (old) {
2253 struct fuse_dev *fud = NULL;
2254
2255 /*
2256 * Check against file->f_op because CUSE
2257 * uses the same ioctl handler.
2258 */
2259 if (old->f_op == file->f_op &&
2260 old->f_cred->user_ns == file->f_cred->user_ns)
2261 fud = fuse_get_dev(old);
2262
2263 if (fud) {
2264 mutex_lock(&fuse_mutex);
2265 err = fuse_device_clone(fud->fc, file);
2266 mutex_unlock(&fuse_mutex);
2267 }
2268 fput(old);
2269 }
2270 }
2271 }
2272 return err;
2273}
2274
2275const struct file_operations fuse_dev_operations = {
2276 .owner = THIS_MODULE,
2277 .open = fuse_dev_open,
2278 .llseek = no_llseek,
2279 .read_iter = fuse_dev_read,
2280 .splice_read = fuse_dev_splice_read,
2281 .write_iter = fuse_dev_write,
2282 .splice_write = fuse_dev_splice_write,
2283 .poll = fuse_dev_poll,
2284 .release = fuse_dev_release,
2285 .fasync = fuse_dev_fasync,
2286 .unlocked_ioctl = fuse_dev_ioctl,
2287 .compat_ioctl = fuse_dev_ioctl,
2288};
2289EXPORT_SYMBOL_GPL(fuse_dev_operations);
2290
2291static struct miscdevice fuse_miscdevice = {
2292 .minor = FUSE_MINOR,
2293 .name = "fuse",
2294 .fops = &fuse_dev_operations,
2295};
2296
2297int __init fuse_dev_init(void)
2298{
2299 int err = -ENOMEM;
2300 fuse_req_cachep = kmem_cache_create("fuse_request",
2301 sizeof(struct fuse_req),
2302 0, 0, NULL);
2303 if (!fuse_req_cachep)
2304 goto out;
2305
2306 err = misc_register(&fuse_miscdevice);
2307 if (err)
2308 goto out_cache_clean;
2309
2310 return 0;
2311
2312 out_cache_clean:
2313 kmem_cache_destroy(fuse_req_cachep);
2314 out:
2315 return err;
2316}
2317
2318void fuse_dev_cleanup(void)
2319{
2320 misc_deregister(&fuse_miscdevice);
2321 kmem_cache_destroy(fuse_req_cachep);
2322}