blob: d5d1c70bb927b6d828b6ff0787fba4f2a8d09902 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include "xdr4.h"
46#include "xdr4cb.h"
47#include "vfs.h"
48#include "current_stateid.h"
49
50#include "netns.h"
51#include "pnfs.h"
52
53#define NFSDDBG_FACILITY NFSDDBG_PROC
54
55#define all_ones {{~0,~0},~0}
56static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59};
60static const stateid_t zero_stateid = {
61 /* all fields zero */
62};
63static const stateid_t currentstateid = {
64 .si_generation = 1,
65};
66static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68};
69
70static u64 current_sessionid = 1;
71
72#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77/* forward declarations */
78static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81/* Locking: */
82
83/*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88static DEFINE_SPINLOCK(state_lock);
89
90/*
91 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
92 * the refcount on the open stateid to drop.
93 */
94static DECLARE_WAIT_QUEUE_HEAD(close_wq);
95
96static struct kmem_cache *openowner_slab;
97static struct kmem_cache *lockowner_slab;
98static struct kmem_cache *file_slab;
99static struct kmem_cache *stateid_slab;
100static struct kmem_cache *deleg_slab;
101static struct kmem_cache *odstate_slab;
102
103static void free_session(struct nfsd4_session *);
104
105static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
106static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
107
108static bool is_session_dead(struct nfsd4_session *ses)
109{
110 return ses->se_flags & NFS4_SESSION_DEAD;
111}
112
113static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
114{
115 if (atomic_read(&ses->se_ref) > ref_held_by_me)
116 return nfserr_jukebox;
117 ses->se_flags |= NFS4_SESSION_DEAD;
118 return nfs_ok;
119}
120
121static bool is_client_expired(struct nfs4_client *clp)
122{
123 return clp->cl_time == 0;
124}
125
126static __be32 get_client_locked(struct nfs4_client *clp)
127{
128 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
129
130 lockdep_assert_held(&nn->client_lock);
131
132 if (is_client_expired(clp))
133 return nfserr_expired;
134 atomic_inc(&clp->cl_refcount);
135 return nfs_ok;
136}
137
138/* must be called under the client_lock */
139static inline void
140renew_client_locked(struct nfs4_client *clp)
141{
142 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
143
144 if (is_client_expired(clp)) {
145 WARN_ON(1);
146 printk("%s: client (clientid %08x/%08x) already expired\n",
147 __func__,
148 clp->cl_clientid.cl_boot,
149 clp->cl_clientid.cl_id);
150 return;
151 }
152
153 dprintk("renewing client (clientid %08x/%08x)\n",
154 clp->cl_clientid.cl_boot,
155 clp->cl_clientid.cl_id);
156 list_move_tail(&clp->cl_lru, &nn->client_lru);
157 clp->cl_time = get_seconds();
158}
159
160static void put_client_renew_locked(struct nfs4_client *clp)
161{
162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
163
164 lockdep_assert_held(&nn->client_lock);
165
166 if (!atomic_dec_and_test(&clp->cl_refcount))
167 return;
168 if (!is_client_expired(clp))
169 renew_client_locked(clp);
170}
171
172static void put_client_renew(struct nfs4_client *clp)
173{
174 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
175
176 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
177 return;
178 if (!is_client_expired(clp))
179 renew_client_locked(clp);
180 spin_unlock(&nn->client_lock);
181}
182
183static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
184{
185 __be32 status;
186
187 if (is_session_dead(ses))
188 return nfserr_badsession;
189 status = get_client_locked(ses->se_client);
190 if (status)
191 return status;
192 atomic_inc(&ses->se_ref);
193 return nfs_ok;
194}
195
196static void nfsd4_put_session_locked(struct nfsd4_session *ses)
197{
198 struct nfs4_client *clp = ses->se_client;
199 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
200
201 lockdep_assert_held(&nn->client_lock);
202
203 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
204 free_session(ses);
205 put_client_renew_locked(clp);
206}
207
208static void nfsd4_put_session(struct nfsd4_session *ses)
209{
210 struct nfs4_client *clp = ses->se_client;
211 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
212
213 spin_lock(&nn->client_lock);
214 nfsd4_put_session_locked(ses);
215 spin_unlock(&nn->client_lock);
216}
217
218static struct nfsd4_blocked_lock *
219find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
220 struct nfsd_net *nn)
221{
222 struct nfsd4_blocked_lock *cur, *found = NULL;
223
224 spin_lock(&nn->blocked_locks_lock);
225 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
226 if (fh_match(fh, &cur->nbl_fh)) {
227 list_del_init(&cur->nbl_list);
228 list_del_init(&cur->nbl_lru);
229 found = cur;
230 break;
231 }
232 }
233 spin_unlock(&nn->blocked_locks_lock);
234 if (found)
235 posix_unblock_lock(&found->nbl_lock);
236 return found;
237}
238
239static struct nfsd4_blocked_lock *
240find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
241 struct nfsd_net *nn)
242{
243 struct nfsd4_blocked_lock *nbl;
244
245 nbl = find_blocked_lock(lo, fh, nn);
246 if (!nbl) {
247 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
248 if (nbl) {
249 INIT_LIST_HEAD(&nbl->nbl_list);
250 INIT_LIST_HEAD(&nbl->nbl_lru);
251 fh_copy_shallow(&nbl->nbl_fh, fh);
252 locks_init_lock(&nbl->nbl_lock);
253 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
254 &nfsd4_cb_notify_lock_ops,
255 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
256 }
257 }
258 return nbl;
259}
260
261static void
262free_blocked_lock(struct nfsd4_blocked_lock *nbl)
263{
264 locks_release_private(&nbl->nbl_lock);
265 kfree(nbl);
266}
267
268static void
269remove_blocked_locks(struct nfs4_lockowner *lo)
270{
271 struct nfs4_client *clp = lo->lo_owner.so_client;
272 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
273 struct nfsd4_blocked_lock *nbl;
274 LIST_HEAD(reaplist);
275
276 /* Dequeue all blocked locks */
277 spin_lock(&nn->blocked_locks_lock);
278 while (!list_empty(&lo->lo_blocked)) {
279 nbl = list_first_entry(&lo->lo_blocked,
280 struct nfsd4_blocked_lock,
281 nbl_list);
282 list_del_init(&nbl->nbl_list);
283 list_move(&nbl->nbl_lru, &reaplist);
284 }
285 spin_unlock(&nn->blocked_locks_lock);
286
287 /* Now free them */
288 while (!list_empty(&reaplist)) {
289 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
290 nbl_lru);
291 list_del_init(&nbl->nbl_lru);
292 posix_unblock_lock(&nbl->nbl_lock);
293 free_blocked_lock(nbl);
294 }
295}
296
297static int
298nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
299{
300 /*
301 * Since this is just an optimization, we don't try very hard if it
302 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
303 * just quit trying on anything else.
304 */
305 switch (task->tk_status) {
306 case -NFS4ERR_DELAY:
307 rpc_delay(task, 1 * HZ);
308 return 0;
309 default:
310 return 1;
311 }
312}
313
314static void
315nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
316{
317 struct nfsd4_blocked_lock *nbl = container_of(cb,
318 struct nfsd4_blocked_lock, nbl_cb);
319
320 free_blocked_lock(nbl);
321}
322
323static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
324 .done = nfsd4_cb_notify_lock_done,
325 .release = nfsd4_cb_notify_lock_release,
326};
327
328static inline struct nfs4_stateowner *
329nfs4_get_stateowner(struct nfs4_stateowner *sop)
330{
331 atomic_inc(&sop->so_count);
332 return sop;
333}
334
335static int
336same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
337{
338 return (sop->so_owner.len == owner->len) &&
339 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
340}
341
342static struct nfs4_openowner *
343find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
344 struct nfs4_client *clp)
345{
346 struct nfs4_stateowner *so;
347
348 lockdep_assert_held(&clp->cl_lock);
349
350 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
351 so_strhash) {
352 if (!so->so_is_open_owner)
353 continue;
354 if (same_owner_str(so, &open->op_owner))
355 return openowner(nfs4_get_stateowner(so));
356 }
357 return NULL;
358}
359
360static struct nfs4_openowner *
361find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
362 struct nfs4_client *clp)
363{
364 struct nfs4_openowner *oo;
365
366 spin_lock(&clp->cl_lock);
367 oo = find_openstateowner_str_locked(hashval, open, clp);
368 spin_unlock(&clp->cl_lock);
369 return oo;
370}
371
372static inline u32
373opaque_hashval(const void *ptr, int nbytes)
374{
375 unsigned char *cptr = (unsigned char *) ptr;
376
377 u32 x = 0;
378 while (nbytes--) {
379 x *= 37;
380 x += *cptr++;
381 }
382 return x;
383}
384
385static void nfsd4_free_file_rcu(struct rcu_head *rcu)
386{
387 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
388
389 kmem_cache_free(file_slab, fp);
390}
391
392void
393put_nfs4_file(struct nfs4_file *fi)
394{
395 might_lock(&state_lock);
396
397 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
398 hlist_del_rcu(&fi->fi_hash);
399 spin_unlock(&state_lock);
400 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
401 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
402 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
403 }
404}
405
406static struct file *
407__nfs4_get_fd(struct nfs4_file *f, int oflag)
408{
409 if (f->fi_fds[oflag])
410 return get_file(f->fi_fds[oflag]);
411 return NULL;
412}
413
414static struct file *
415find_writeable_file_locked(struct nfs4_file *f)
416{
417 struct file *ret;
418
419 lockdep_assert_held(&f->fi_lock);
420
421 ret = __nfs4_get_fd(f, O_WRONLY);
422 if (!ret)
423 ret = __nfs4_get_fd(f, O_RDWR);
424 return ret;
425}
426
427static struct file *
428find_writeable_file(struct nfs4_file *f)
429{
430 struct file *ret;
431
432 spin_lock(&f->fi_lock);
433 ret = find_writeable_file_locked(f);
434 spin_unlock(&f->fi_lock);
435
436 return ret;
437}
438
439static struct file *find_readable_file_locked(struct nfs4_file *f)
440{
441 struct file *ret;
442
443 lockdep_assert_held(&f->fi_lock);
444
445 ret = __nfs4_get_fd(f, O_RDONLY);
446 if (!ret)
447 ret = __nfs4_get_fd(f, O_RDWR);
448 return ret;
449}
450
451static struct file *
452find_readable_file(struct nfs4_file *f)
453{
454 struct file *ret;
455
456 spin_lock(&f->fi_lock);
457 ret = find_readable_file_locked(f);
458 spin_unlock(&f->fi_lock);
459
460 return ret;
461}
462
463struct file *
464find_any_file(struct nfs4_file *f)
465{
466 struct file *ret;
467
468 spin_lock(&f->fi_lock);
469 ret = __nfs4_get_fd(f, O_RDWR);
470 if (!ret) {
471 ret = __nfs4_get_fd(f, O_WRONLY);
472 if (!ret)
473 ret = __nfs4_get_fd(f, O_RDONLY);
474 }
475 spin_unlock(&f->fi_lock);
476 return ret;
477}
478
479static atomic_long_t num_delegations;
480unsigned long max_delegations;
481
482/*
483 * Open owner state (share locks)
484 */
485
486/* hash tables for lock and open owners */
487#define OWNER_HASH_BITS 8
488#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
489#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
490
491static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
492{
493 unsigned int ret;
494
495 ret = opaque_hashval(ownername->data, ownername->len);
496 return ret & OWNER_HASH_MASK;
497}
498
499/* hash table for nfs4_file */
500#define FILE_HASH_BITS 8
501#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
502
503static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
504{
505 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
506}
507
508static unsigned int file_hashval(struct knfsd_fh *fh)
509{
510 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
511}
512
513static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
514
515static void
516__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
517{
518 lockdep_assert_held(&fp->fi_lock);
519
520 if (access & NFS4_SHARE_ACCESS_WRITE)
521 atomic_inc(&fp->fi_access[O_WRONLY]);
522 if (access & NFS4_SHARE_ACCESS_READ)
523 atomic_inc(&fp->fi_access[O_RDONLY]);
524}
525
526static __be32
527nfs4_file_get_access(struct nfs4_file *fp, u32 access)
528{
529 lockdep_assert_held(&fp->fi_lock);
530
531 /* Does this access mode make sense? */
532 if (access & ~NFS4_SHARE_ACCESS_BOTH)
533 return nfserr_inval;
534
535 /* Does it conflict with a deny mode already set? */
536 if ((access & fp->fi_share_deny) != 0)
537 return nfserr_share_denied;
538
539 __nfs4_file_get_access(fp, access);
540 return nfs_ok;
541}
542
543static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
544{
545 /* Common case is that there is no deny mode. */
546 if (deny) {
547 /* Does this deny mode make sense? */
548 if (deny & ~NFS4_SHARE_DENY_BOTH)
549 return nfserr_inval;
550
551 if ((deny & NFS4_SHARE_DENY_READ) &&
552 atomic_read(&fp->fi_access[O_RDONLY]))
553 return nfserr_share_denied;
554
555 if ((deny & NFS4_SHARE_DENY_WRITE) &&
556 atomic_read(&fp->fi_access[O_WRONLY]))
557 return nfserr_share_denied;
558 }
559 return nfs_ok;
560}
561
562static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
563{
564 might_lock(&fp->fi_lock);
565
566 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
567 struct file *f1 = NULL;
568 struct file *f2 = NULL;
569
570 swap(f1, fp->fi_fds[oflag]);
571 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
572 swap(f2, fp->fi_fds[O_RDWR]);
573 spin_unlock(&fp->fi_lock);
574 if (f1)
575 fput(f1);
576 if (f2)
577 fput(f2);
578 }
579}
580
581static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
582{
583 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
584
585 if (access & NFS4_SHARE_ACCESS_WRITE)
586 __nfs4_file_put_access(fp, O_WRONLY);
587 if (access & NFS4_SHARE_ACCESS_READ)
588 __nfs4_file_put_access(fp, O_RDONLY);
589}
590
591/*
592 * Allocate a new open/delegation state counter. This is needed for
593 * pNFS for proper return on close semantics.
594 *
595 * Note that we only allocate it for pNFS-enabled exports, otherwise
596 * all pointers to struct nfs4_clnt_odstate are always NULL.
597 */
598static struct nfs4_clnt_odstate *
599alloc_clnt_odstate(struct nfs4_client *clp)
600{
601 struct nfs4_clnt_odstate *co;
602
603 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
604 if (co) {
605 co->co_client = clp;
606 atomic_set(&co->co_odcount, 1);
607 }
608 return co;
609}
610
611static void
612hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
613{
614 struct nfs4_file *fp = co->co_file;
615
616 lockdep_assert_held(&fp->fi_lock);
617 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
618}
619
620static inline void
621get_clnt_odstate(struct nfs4_clnt_odstate *co)
622{
623 if (co)
624 atomic_inc(&co->co_odcount);
625}
626
627static void
628put_clnt_odstate(struct nfs4_clnt_odstate *co)
629{
630 struct nfs4_file *fp;
631
632 if (!co)
633 return;
634
635 fp = co->co_file;
636 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
637 list_del(&co->co_perfile);
638 spin_unlock(&fp->fi_lock);
639
640 nfsd4_return_all_file_layouts(co->co_client, fp);
641 kmem_cache_free(odstate_slab, co);
642 }
643}
644
645static struct nfs4_clnt_odstate *
646find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
647{
648 struct nfs4_clnt_odstate *co;
649 struct nfs4_client *cl;
650
651 if (!new)
652 return NULL;
653
654 cl = new->co_client;
655
656 spin_lock(&fp->fi_lock);
657 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
658 if (co->co_client == cl) {
659 get_clnt_odstate(co);
660 goto out;
661 }
662 }
663 co = new;
664 co->co_file = fp;
665 hash_clnt_odstate_locked(new);
666out:
667 spin_unlock(&fp->fi_lock);
668 return co;
669}
670
671struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
672 void (*sc_free)(struct nfs4_stid *))
673{
674 struct nfs4_stid *stid;
675 int new_id;
676
677 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
678 if (!stid)
679 return NULL;
680
681 idr_preload(GFP_KERNEL);
682 spin_lock(&cl->cl_lock);
683 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
684 spin_unlock(&cl->cl_lock);
685 idr_preload_end();
686 if (new_id < 0)
687 goto out_free;
688
689 stid->sc_free = sc_free;
690 stid->sc_client = cl;
691 stid->sc_stateid.si_opaque.so_id = new_id;
692 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
693 /* Will be incremented before return to client: */
694 atomic_set(&stid->sc_count, 1);
695 spin_lock_init(&stid->sc_lock);
696
697 /*
698 * It shouldn't be a problem to reuse an opaque stateid value.
699 * I don't think it is for 4.1. But with 4.0 I worry that, for
700 * example, a stray write retransmission could be accepted by
701 * the server when it should have been rejected. Therefore,
702 * adopt a trick from the sctp code to attempt to maximize the
703 * amount of time until an id is reused, by ensuring they always
704 * "increase" (mod INT_MAX):
705 */
706 return stid;
707out_free:
708 kmem_cache_free(slab, stid);
709 return NULL;
710}
711
712static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
713{
714 struct nfs4_stid *stid;
715
716 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
717 if (!stid)
718 return NULL;
719
720 return openlockstateid(stid);
721}
722
723static void nfs4_free_deleg(struct nfs4_stid *stid)
724{
725 kmem_cache_free(deleg_slab, stid);
726 atomic_long_dec(&num_delegations);
727}
728
729/*
730 * When we recall a delegation, we should be careful not to hand it
731 * out again straight away.
732 * To ensure this we keep a pair of bloom filters ('new' and 'old')
733 * in which the filehandles of recalled delegations are "stored".
734 * If a filehandle appear in either filter, a delegation is blocked.
735 * When a delegation is recalled, the filehandle is stored in the "new"
736 * filter.
737 * Every 30 seconds we swap the filters and clear the "new" one,
738 * unless both are empty of course.
739 *
740 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
741 * low 3 bytes as hash-table indices.
742 *
743 * 'blocked_delegations_lock', which is always taken in block_delegations(),
744 * is used to manage concurrent access. Testing does not need the lock
745 * except when swapping the two filters.
746 */
747static DEFINE_SPINLOCK(blocked_delegations_lock);
748static struct bloom_pair {
749 int entries, old_entries;
750 time_t swap_time;
751 int new; /* index into 'set' */
752 DECLARE_BITMAP(set[2], 256);
753} blocked_delegations;
754
755static int delegation_blocked(struct knfsd_fh *fh)
756{
757 u32 hash;
758 struct bloom_pair *bd = &blocked_delegations;
759
760 if (bd->entries == 0)
761 return 0;
762 if (seconds_since_boot() - bd->swap_time > 30) {
763 spin_lock(&blocked_delegations_lock);
764 if (seconds_since_boot() - bd->swap_time > 30) {
765 bd->entries -= bd->old_entries;
766 bd->old_entries = bd->entries;
767 memset(bd->set[bd->new], 0,
768 sizeof(bd->set[0]));
769 bd->new = 1-bd->new;
770 bd->swap_time = seconds_since_boot();
771 }
772 spin_unlock(&blocked_delegations_lock);
773 }
774 hash = jhash(&fh->fh_base, fh->fh_size, 0);
775 if (test_bit(hash&255, bd->set[0]) &&
776 test_bit((hash>>8)&255, bd->set[0]) &&
777 test_bit((hash>>16)&255, bd->set[0]))
778 return 1;
779
780 if (test_bit(hash&255, bd->set[1]) &&
781 test_bit((hash>>8)&255, bd->set[1]) &&
782 test_bit((hash>>16)&255, bd->set[1]))
783 return 1;
784
785 return 0;
786}
787
788static void block_delegations(struct knfsd_fh *fh)
789{
790 u32 hash;
791 struct bloom_pair *bd = &blocked_delegations;
792
793 hash = jhash(&fh->fh_base, fh->fh_size, 0);
794
795 spin_lock(&blocked_delegations_lock);
796 __set_bit(hash&255, bd->set[bd->new]);
797 __set_bit((hash>>8)&255, bd->set[bd->new]);
798 __set_bit((hash>>16)&255, bd->set[bd->new]);
799 if (bd->entries == 0)
800 bd->swap_time = seconds_since_boot();
801 bd->entries += 1;
802 spin_unlock(&blocked_delegations_lock);
803}
804
805static struct nfs4_delegation *
806alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
807 struct nfs4_clnt_odstate *odstate)
808{
809 struct nfs4_delegation *dp;
810 long n;
811
812 dprintk("NFSD alloc_init_deleg\n");
813 n = atomic_long_inc_return(&num_delegations);
814 if (n < 0 || n > max_delegations)
815 goto out_dec;
816 if (delegation_blocked(&current_fh->fh_handle))
817 goto out_dec;
818 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
819 if (dp == NULL)
820 goto out_dec;
821
822 /*
823 * delegation seqid's are never incremented. The 4.1 special
824 * meaning of seqid 0 isn't meaningful, really, but let's avoid
825 * 0 anyway just for consistency and use 1:
826 */
827 dp->dl_stid.sc_stateid.si_generation = 1;
828 INIT_LIST_HEAD(&dp->dl_perfile);
829 INIT_LIST_HEAD(&dp->dl_perclnt);
830 INIT_LIST_HEAD(&dp->dl_recall_lru);
831 dp->dl_clnt_odstate = odstate;
832 get_clnt_odstate(odstate);
833 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
834 dp->dl_retries = 1;
835 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
836 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
837 return dp;
838out_dec:
839 atomic_long_dec(&num_delegations);
840 return NULL;
841}
842
843void
844nfs4_put_stid(struct nfs4_stid *s)
845{
846 struct nfs4_file *fp = s->sc_file;
847 struct nfs4_client *clp = s->sc_client;
848
849 might_lock(&clp->cl_lock);
850
851 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
852 wake_up_all(&close_wq);
853 return;
854 }
855 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
856 spin_unlock(&clp->cl_lock);
857 s->sc_free(s);
858 if (fp)
859 put_nfs4_file(fp);
860}
861
862void
863nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
864{
865 stateid_t *src = &stid->sc_stateid;
866
867 spin_lock(&stid->sc_lock);
868 if (unlikely(++src->si_generation == 0))
869 src->si_generation = 1;
870 memcpy(dst, src, sizeof(*dst));
871 spin_unlock(&stid->sc_lock);
872}
873
874static void nfs4_put_deleg_lease(struct nfs4_file *fp)
875{
876 struct file *filp = NULL;
877
878 spin_lock(&fp->fi_lock);
879 if (fp->fi_deleg_file && --fp->fi_delegees == 0)
880 swap(filp, fp->fi_deleg_file);
881 spin_unlock(&fp->fi_lock);
882
883 if (filp) {
884 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
885 fput(filp);
886 }
887}
888
889void nfs4_unhash_stid(struct nfs4_stid *s)
890{
891 s->sc_type = 0;
892}
893
894/**
895 * nfs4_get_existing_delegation - Discover if this delegation already exists
896 * @clp: a pointer to the nfs4_client we're granting a delegation to
897 * @fp: a pointer to the nfs4_file we're granting a delegation on
898 *
899 * Return:
900 * On success: NULL if an existing delegation was not found.
901 *
902 * On error: -EAGAIN if one was previously granted to this nfs4_client
903 * for this nfs4_file.
904 *
905 */
906
907static int
908nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
909{
910 struct nfs4_delegation *searchdp = NULL;
911 struct nfs4_client *searchclp = NULL;
912
913 lockdep_assert_held(&state_lock);
914 lockdep_assert_held(&fp->fi_lock);
915
916 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
917 searchclp = searchdp->dl_stid.sc_client;
918 if (clp == searchclp) {
919 return -EAGAIN;
920 }
921 }
922 return 0;
923}
924
925/**
926 * hash_delegation_locked - Add a delegation to the appropriate lists
927 * @dp: a pointer to the nfs4_delegation we are adding.
928 * @fp: a pointer to the nfs4_file we're granting a delegation on
929 *
930 * Return:
931 * On success: NULL if the delegation was successfully hashed.
932 *
933 * On error: -EAGAIN if one was previously granted to this
934 * nfs4_client for this nfs4_file. Delegation is not hashed.
935 *
936 */
937
938static int
939hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
940{
941 int status;
942 struct nfs4_client *clp = dp->dl_stid.sc_client;
943
944 lockdep_assert_held(&state_lock);
945 lockdep_assert_held(&fp->fi_lock);
946
947 status = nfs4_get_existing_delegation(clp, fp);
948 if (status)
949 return status;
950 ++fp->fi_delegees;
951 atomic_inc(&dp->dl_stid.sc_count);
952 dp->dl_stid.sc_type = NFS4_DELEG_STID;
953 list_add(&dp->dl_perfile, &fp->fi_delegations);
954 list_add(&dp->dl_perclnt, &clp->cl_delegations);
955 return 0;
956}
957
958static bool
959unhash_delegation_locked(struct nfs4_delegation *dp)
960{
961 struct nfs4_file *fp = dp->dl_stid.sc_file;
962
963 lockdep_assert_held(&state_lock);
964
965 if (list_empty(&dp->dl_perfile))
966 return false;
967
968 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
969 /* Ensure that deleg break won't try to requeue it */
970 ++dp->dl_time;
971 spin_lock(&fp->fi_lock);
972 list_del_init(&dp->dl_perclnt);
973 list_del_init(&dp->dl_recall_lru);
974 list_del_init(&dp->dl_perfile);
975 spin_unlock(&fp->fi_lock);
976 return true;
977}
978
979static void destroy_delegation(struct nfs4_delegation *dp)
980{
981 bool unhashed;
982
983 spin_lock(&state_lock);
984 unhashed = unhash_delegation_locked(dp);
985 spin_unlock(&state_lock);
986 if (unhashed) {
987 put_clnt_odstate(dp->dl_clnt_odstate);
988 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
989 nfs4_put_stid(&dp->dl_stid);
990 }
991}
992
993static void revoke_delegation(struct nfs4_delegation *dp)
994{
995 struct nfs4_client *clp = dp->dl_stid.sc_client;
996
997 WARN_ON(!list_empty(&dp->dl_recall_lru));
998
999 put_clnt_odstate(dp->dl_clnt_odstate);
1000 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1001
1002 if (clp->cl_minorversion == 0)
1003 nfs4_put_stid(&dp->dl_stid);
1004 else {
1005 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1006 spin_lock(&clp->cl_lock);
1007 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1008 spin_unlock(&clp->cl_lock);
1009 }
1010}
1011
1012/*
1013 * SETCLIENTID state
1014 */
1015
1016static unsigned int clientid_hashval(u32 id)
1017{
1018 return id & CLIENT_HASH_MASK;
1019}
1020
1021static unsigned int clientstr_hashval(const char *name)
1022{
1023 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1024}
1025
1026/*
1027 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1028 * st_{access,deny}_bmap field of the stateid, in order to track not
1029 * only what share bits are currently in force, but also what
1030 * combinations of share bits previous opens have used. This allows us
1031 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1032 * return an error if the client attempt to downgrade to a combination
1033 * of share bits not explicable by closing some of its previous opens.
1034 *
1035 * XXX: This enforcement is actually incomplete, since we don't keep
1036 * track of access/deny bit combinations; so, e.g., we allow:
1037 *
1038 * OPEN allow read, deny write
1039 * OPEN allow both, deny none
1040 * DOWNGRADE allow read, deny none
1041 *
1042 * which we should reject.
1043 */
1044static unsigned int
1045bmap_to_share_mode(unsigned long bmap) {
1046 int i;
1047 unsigned int access = 0;
1048
1049 for (i = 1; i < 4; i++) {
1050 if (test_bit(i, &bmap))
1051 access |= i;
1052 }
1053 return access;
1054}
1055
1056/* set share access for a given stateid */
1057static inline void
1058set_access(u32 access, struct nfs4_ol_stateid *stp)
1059{
1060 unsigned char mask = 1 << access;
1061
1062 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1063 stp->st_access_bmap |= mask;
1064}
1065
1066/* clear share access for a given stateid */
1067static inline void
1068clear_access(u32 access, struct nfs4_ol_stateid *stp)
1069{
1070 unsigned char mask = 1 << access;
1071
1072 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1073 stp->st_access_bmap &= ~mask;
1074}
1075
1076/* test whether a given stateid has access */
1077static inline bool
1078test_access(u32 access, struct nfs4_ol_stateid *stp)
1079{
1080 unsigned char mask = 1 << access;
1081
1082 return (bool)(stp->st_access_bmap & mask);
1083}
1084
1085/* set share deny for a given stateid */
1086static inline void
1087set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1088{
1089 unsigned char mask = 1 << deny;
1090
1091 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1092 stp->st_deny_bmap |= mask;
1093}
1094
1095/* clear share deny for a given stateid */
1096static inline void
1097clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1098{
1099 unsigned char mask = 1 << deny;
1100
1101 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1102 stp->st_deny_bmap &= ~mask;
1103}
1104
1105/* test whether a given stateid is denying specific access */
1106static inline bool
1107test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1108{
1109 unsigned char mask = 1 << deny;
1110
1111 return (bool)(stp->st_deny_bmap & mask);
1112}
1113
1114static int nfs4_access_to_omode(u32 access)
1115{
1116 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1117 case NFS4_SHARE_ACCESS_READ:
1118 return O_RDONLY;
1119 case NFS4_SHARE_ACCESS_WRITE:
1120 return O_WRONLY;
1121 case NFS4_SHARE_ACCESS_BOTH:
1122 return O_RDWR;
1123 }
1124 WARN_ON_ONCE(1);
1125 return O_RDONLY;
1126}
1127
1128/*
1129 * A stateid that had a deny mode associated with it is being released
1130 * or downgraded. Recalculate the deny mode on the file.
1131 */
1132static void
1133recalculate_deny_mode(struct nfs4_file *fp)
1134{
1135 struct nfs4_ol_stateid *stp;
1136
1137 spin_lock(&fp->fi_lock);
1138 fp->fi_share_deny = 0;
1139 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1140 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1141 spin_unlock(&fp->fi_lock);
1142}
1143
1144static void
1145reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1146{
1147 int i;
1148 bool change = false;
1149
1150 for (i = 1; i < 4; i++) {
1151 if ((i & deny) != i) {
1152 change = true;
1153 clear_deny(i, stp);
1154 }
1155 }
1156
1157 /* Recalculate per-file deny mode if there was a change */
1158 if (change)
1159 recalculate_deny_mode(stp->st_stid.sc_file);
1160}
1161
1162/* release all access and file references for a given stateid */
1163static void
1164release_all_access(struct nfs4_ol_stateid *stp)
1165{
1166 int i;
1167 struct nfs4_file *fp = stp->st_stid.sc_file;
1168
1169 if (fp && stp->st_deny_bmap != 0)
1170 recalculate_deny_mode(fp);
1171
1172 for (i = 1; i < 4; i++) {
1173 if (test_access(i, stp))
1174 nfs4_file_put_access(stp->st_stid.sc_file, i);
1175 clear_access(i, stp);
1176 }
1177}
1178
1179static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1180{
1181 kfree(sop->so_owner.data);
1182 sop->so_ops->so_free(sop);
1183}
1184
1185static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1186{
1187 struct nfs4_client *clp = sop->so_client;
1188
1189 might_lock(&clp->cl_lock);
1190
1191 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1192 return;
1193 sop->so_ops->so_unhash(sop);
1194 spin_unlock(&clp->cl_lock);
1195 nfs4_free_stateowner(sop);
1196}
1197
1198static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1199{
1200 struct nfs4_file *fp = stp->st_stid.sc_file;
1201
1202 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1203
1204 if (list_empty(&stp->st_perfile))
1205 return false;
1206
1207 spin_lock(&fp->fi_lock);
1208 list_del_init(&stp->st_perfile);
1209 spin_unlock(&fp->fi_lock);
1210 list_del(&stp->st_perstateowner);
1211 return true;
1212}
1213
1214static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1215{
1216 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1217
1218 put_clnt_odstate(stp->st_clnt_odstate);
1219 release_all_access(stp);
1220 if (stp->st_stateowner)
1221 nfs4_put_stateowner(stp->st_stateowner);
1222 kmem_cache_free(stateid_slab, stid);
1223}
1224
1225static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1226{
1227 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1228 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1229 struct file *file;
1230
1231 file = find_any_file(stp->st_stid.sc_file);
1232 if (file)
1233 filp_close(file, (fl_owner_t)lo);
1234 nfs4_free_ol_stateid(stid);
1235}
1236
1237/*
1238 * Put the persistent reference to an already unhashed generic stateid, while
1239 * holding the cl_lock. If it's the last reference, then put it onto the
1240 * reaplist for later destruction.
1241 */
1242static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1243 struct list_head *reaplist)
1244{
1245 struct nfs4_stid *s = &stp->st_stid;
1246 struct nfs4_client *clp = s->sc_client;
1247
1248 lockdep_assert_held(&clp->cl_lock);
1249
1250 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1251
1252 if (!atomic_dec_and_test(&s->sc_count)) {
1253 wake_up_all(&close_wq);
1254 return;
1255 }
1256
1257 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1258 list_add(&stp->st_locks, reaplist);
1259}
1260
1261static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1262{
1263 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1264
1265 list_del_init(&stp->st_locks);
1266 nfs4_unhash_stid(&stp->st_stid);
1267 return unhash_ol_stateid(stp);
1268}
1269
1270static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1271{
1272 struct nfs4_client *clp = stp->st_stid.sc_client;
1273 bool unhashed;
1274
1275 spin_lock(&clp->cl_lock);
1276 unhashed = unhash_lock_stateid(stp);
1277 spin_unlock(&clp->cl_lock);
1278 if (unhashed)
1279 nfs4_put_stid(&stp->st_stid);
1280}
1281
1282static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1283{
1284 struct nfs4_client *clp = lo->lo_owner.so_client;
1285
1286 lockdep_assert_held(&clp->cl_lock);
1287
1288 list_del_init(&lo->lo_owner.so_strhash);
1289}
1290
1291/*
1292 * Free a list of generic stateids that were collected earlier after being
1293 * fully unhashed.
1294 */
1295static void
1296free_ol_stateid_reaplist(struct list_head *reaplist)
1297{
1298 struct nfs4_ol_stateid *stp;
1299 struct nfs4_file *fp;
1300
1301 might_sleep();
1302
1303 while (!list_empty(reaplist)) {
1304 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1305 st_locks);
1306 list_del(&stp->st_locks);
1307 fp = stp->st_stid.sc_file;
1308 stp->st_stid.sc_free(&stp->st_stid);
1309 if (fp)
1310 put_nfs4_file(fp);
1311 }
1312}
1313
1314static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1315 struct list_head *reaplist)
1316{
1317 struct nfs4_ol_stateid *stp;
1318
1319 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1320
1321 while (!list_empty(&open_stp->st_locks)) {
1322 stp = list_entry(open_stp->st_locks.next,
1323 struct nfs4_ol_stateid, st_locks);
1324 WARN_ON(!unhash_lock_stateid(stp));
1325 put_ol_stateid_locked(stp, reaplist);
1326 }
1327}
1328
1329static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1330 struct list_head *reaplist)
1331{
1332 bool unhashed;
1333
1334 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1335
1336 unhashed = unhash_ol_stateid(stp);
1337 release_open_stateid_locks(stp, reaplist);
1338 return unhashed;
1339}
1340
1341static void release_open_stateid(struct nfs4_ol_stateid *stp)
1342{
1343 LIST_HEAD(reaplist);
1344
1345 spin_lock(&stp->st_stid.sc_client->cl_lock);
1346 if (unhash_open_stateid(stp, &reaplist))
1347 put_ol_stateid_locked(stp, &reaplist);
1348 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1349 free_ol_stateid_reaplist(&reaplist);
1350}
1351
1352static void unhash_openowner_locked(struct nfs4_openowner *oo)
1353{
1354 struct nfs4_client *clp = oo->oo_owner.so_client;
1355
1356 lockdep_assert_held(&clp->cl_lock);
1357
1358 list_del_init(&oo->oo_owner.so_strhash);
1359 list_del_init(&oo->oo_perclient);
1360}
1361
1362static void release_last_closed_stateid(struct nfs4_openowner *oo)
1363{
1364 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1365 nfsd_net_id);
1366 struct nfs4_ol_stateid *s;
1367
1368 spin_lock(&nn->client_lock);
1369 s = oo->oo_last_closed_stid;
1370 if (s) {
1371 list_del_init(&oo->oo_close_lru);
1372 oo->oo_last_closed_stid = NULL;
1373 }
1374 spin_unlock(&nn->client_lock);
1375 if (s)
1376 nfs4_put_stid(&s->st_stid);
1377}
1378
1379static void release_openowner(struct nfs4_openowner *oo)
1380{
1381 struct nfs4_ol_stateid *stp;
1382 struct nfs4_client *clp = oo->oo_owner.so_client;
1383 struct list_head reaplist;
1384
1385 INIT_LIST_HEAD(&reaplist);
1386
1387 spin_lock(&clp->cl_lock);
1388 unhash_openowner_locked(oo);
1389 while (!list_empty(&oo->oo_owner.so_stateids)) {
1390 stp = list_first_entry(&oo->oo_owner.so_stateids,
1391 struct nfs4_ol_stateid, st_perstateowner);
1392 if (unhash_open_stateid(stp, &reaplist))
1393 put_ol_stateid_locked(stp, &reaplist);
1394 }
1395 spin_unlock(&clp->cl_lock);
1396 free_ol_stateid_reaplist(&reaplist);
1397 release_last_closed_stateid(oo);
1398 nfs4_put_stateowner(&oo->oo_owner);
1399}
1400
1401static inline int
1402hash_sessionid(struct nfs4_sessionid *sessionid)
1403{
1404 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1405
1406 return sid->sequence % SESSION_HASH_SIZE;
1407}
1408
1409#ifdef CONFIG_SUNRPC_DEBUG
1410static inline void
1411dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1412{
1413 u32 *ptr = (u32 *)(&sessionid->data[0]);
1414 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1415}
1416#else
1417static inline void
1418dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1419{
1420}
1421#endif
1422
1423/*
1424 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1425 * won't be used for replay.
1426 */
1427void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1428{
1429 struct nfs4_stateowner *so = cstate->replay_owner;
1430
1431 if (nfserr == nfserr_replay_me)
1432 return;
1433
1434 if (!seqid_mutating_err(ntohl(nfserr))) {
1435 nfsd4_cstate_clear_replay(cstate);
1436 return;
1437 }
1438 if (!so)
1439 return;
1440 if (so->so_is_open_owner)
1441 release_last_closed_stateid(openowner(so));
1442 so->so_seqid++;
1443 return;
1444}
1445
1446static void
1447gen_sessionid(struct nfsd4_session *ses)
1448{
1449 struct nfs4_client *clp = ses->se_client;
1450 struct nfsd4_sessionid *sid;
1451
1452 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1453 sid->clientid = clp->cl_clientid;
1454 sid->sequence = current_sessionid++;
1455 sid->reserved = 0;
1456}
1457
1458/*
1459 * The protocol defines ca_maxresponssize_cached to include the size of
1460 * the rpc header, but all we need to cache is the data starting after
1461 * the end of the initial SEQUENCE operation--the rest we regenerate
1462 * each time. Therefore we can advertise a ca_maxresponssize_cached
1463 * value that is the number of bytes in our cache plus a few additional
1464 * bytes. In order to stay on the safe side, and not promise more than
1465 * we can cache, those additional bytes must be the minimum possible: 24
1466 * bytes of rpc header (xid through accept state, with AUTH_NULL
1467 * verifier), 12 for the compound header (with zero-length tag), and 44
1468 * for the SEQUENCE op response:
1469 */
1470#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1471
1472static void
1473free_session_slots(struct nfsd4_session *ses)
1474{
1475 int i;
1476
1477 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1478 free_svc_cred(&ses->se_slots[i]->sl_cred);
1479 kfree(ses->se_slots[i]);
1480 }
1481}
1482
1483/*
1484 * We don't actually need to cache the rpc and session headers, so we
1485 * can allocate a little less for each slot:
1486 */
1487static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1488{
1489 u32 size;
1490
1491 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1492 size = 0;
1493 else
1494 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1495 return size + sizeof(struct nfsd4_slot);
1496}
1497
1498/*
1499 * XXX: If we run out of reserved DRC memory we could (up to a point)
1500 * re-negotiate active sessions and reduce their slot usage to make
1501 * room for new connections. For now we just fail the create session.
1502 */
1503static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1504{
1505 u32 slotsize = slot_bytes(ca);
1506 u32 num = ca->maxreqs;
1507 unsigned long avail, total_avail;
1508
1509 spin_lock(&nfsd_drc_lock);
1510 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1511 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1512 /*
1513 * Never use more than a third of the remaining memory,
1514 * unless it's the only way to give this client a slot:
1515 */
1516 avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
1517 num = min_t(int, num, avail / slotsize);
1518 nfsd_drc_mem_used += num * slotsize;
1519 spin_unlock(&nfsd_drc_lock);
1520
1521 return num;
1522}
1523
1524static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1525{
1526 int slotsize = slot_bytes(ca);
1527
1528 spin_lock(&nfsd_drc_lock);
1529 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1530 spin_unlock(&nfsd_drc_lock);
1531}
1532
1533static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1534 struct nfsd4_channel_attrs *battrs)
1535{
1536 int numslots = fattrs->maxreqs;
1537 int slotsize = slot_bytes(fattrs);
1538 struct nfsd4_session *new;
1539 int mem, i;
1540
1541 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1542 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1543 mem = numslots * sizeof(struct nfsd4_slot *);
1544
1545 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1546 if (!new)
1547 return NULL;
1548 /* allocate each struct nfsd4_slot and data cache in one piece */
1549 for (i = 0; i < numslots; i++) {
1550 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1551 if (!new->se_slots[i])
1552 goto out_free;
1553 }
1554
1555 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1556 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1557
1558 return new;
1559out_free:
1560 while (i--)
1561 kfree(new->se_slots[i]);
1562 kfree(new);
1563 return NULL;
1564}
1565
1566static void free_conn(struct nfsd4_conn *c)
1567{
1568 svc_xprt_put(c->cn_xprt);
1569 kfree(c);
1570}
1571
1572static void nfsd4_conn_lost(struct svc_xpt_user *u)
1573{
1574 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1575 struct nfs4_client *clp = c->cn_session->se_client;
1576
1577 spin_lock(&clp->cl_lock);
1578 if (!list_empty(&c->cn_persession)) {
1579 list_del(&c->cn_persession);
1580 free_conn(c);
1581 }
1582 nfsd4_probe_callback(clp);
1583 spin_unlock(&clp->cl_lock);
1584}
1585
1586static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1587{
1588 struct nfsd4_conn *conn;
1589
1590 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1591 if (!conn)
1592 return NULL;
1593 svc_xprt_get(rqstp->rq_xprt);
1594 conn->cn_xprt = rqstp->rq_xprt;
1595 conn->cn_flags = flags;
1596 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1597 return conn;
1598}
1599
1600static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1601{
1602 conn->cn_session = ses;
1603 list_add(&conn->cn_persession, &ses->se_conns);
1604}
1605
1606static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1607{
1608 struct nfs4_client *clp = ses->se_client;
1609
1610 spin_lock(&clp->cl_lock);
1611 __nfsd4_hash_conn(conn, ses);
1612 spin_unlock(&clp->cl_lock);
1613}
1614
1615static int nfsd4_register_conn(struct nfsd4_conn *conn)
1616{
1617 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1618 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1619}
1620
1621static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1622{
1623 int ret;
1624
1625 nfsd4_hash_conn(conn, ses);
1626 ret = nfsd4_register_conn(conn);
1627 if (ret)
1628 /* oops; xprt is already down: */
1629 nfsd4_conn_lost(&conn->cn_xpt_user);
1630 /* We may have gained or lost a callback channel: */
1631 nfsd4_probe_callback_sync(ses->se_client);
1632}
1633
1634static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1635{
1636 u32 dir = NFS4_CDFC4_FORE;
1637
1638 if (cses->flags & SESSION4_BACK_CHAN)
1639 dir |= NFS4_CDFC4_BACK;
1640 return alloc_conn(rqstp, dir);
1641}
1642
1643/* must be called under client_lock */
1644static void nfsd4_del_conns(struct nfsd4_session *s)
1645{
1646 struct nfs4_client *clp = s->se_client;
1647 struct nfsd4_conn *c;
1648
1649 spin_lock(&clp->cl_lock);
1650 while (!list_empty(&s->se_conns)) {
1651 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1652 list_del_init(&c->cn_persession);
1653 spin_unlock(&clp->cl_lock);
1654
1655 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1656 free_conn(c);
1657
1658 spin_lock(&clp->cl_lock);
1659 }
1660 spin_unlock(&clp->cl_lock);
1661}
1662
1663static void __free_session(struct nfsd4_session *ses)
1664{
1665 free_session_slots(ses);
1666 kfree(ses);
1667}
1668
1669static void free_session(struct nfsd4_session *ses)
1670{
1671 nfsd4_del_conns(ses);
1672 nfsd4_put_drc_mem(&ses->se_fchannel);
1673 __free_session(ses);
1674}
1675
1676static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1677{
1678 int idx;
1679 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1680
1681 new->se_client = clp;
1682 gen_sessionid(new);
1683
1684 INIT_LIST_HEAD(&new->se_conns);
1685
1686 new->se_cb_seq_nr = 1;
1687 new->se_flags = cses->flags;
1688 new->se_cb_prog = cses->callback_prog;
1689 new->se_cb_sec = cses->cb_sec;
1690 atomic_set(&new->se_ref, 0);
1691 idx = hash_sessionid(&new->se_sessionid);
1692 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1693 spin_lock(&clp->cl_lock);
1694 list_add(&new->se_perclnt, &clp->cl_sessions);
1695 spin_unlock(&clp->cl_lock);
1696
1697 {
1698 struct sockaddr *sa = svc_addr(rqstp);
1699 /*
1700 * This is a little silly; with sessions there's no real
1701 * use for the callback address. Use the peer address
1702 * as a reasonable default for now, but consider fixing
1703 * the rpc client not to require an address in the
1704 * future:
1705 */
1706 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1707 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1708 }
1709}
1710
1711/* caller must hold client_lock */
1712static struct nfsd4_session *
1713__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1714{
1715 struct nfsd4_session *elem;
1716 int idx;
1717 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1718
1719 lockdep_assert_held(&nn->client_lock);
1720
1721 dump_sessionid(__func__, sessionid);
1722 idx = hash_sessionid(sessionid);
1723 /* Search in the appropriate list */
1724 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1725 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1726 NFS4_MAX_SESSIONID_LEN)) {
1727 return elem;
1728 }
1729 }
1730
1731 dprintk("%s: session not found\n", __func__);
1732 return NULL;
1733}
1734
1735static struct nfsd4_session *
1736find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1737 __be32 *ret)
1738{
1739 struct nfsd4_session *session;
1740 __be32 status = nfserr_badsession;
1741
1742 session = __find_in_sessionid_hashtbl(sessionid, net);
1743 if (!session)
1744 goto out;
1745 status = nfsd4_get_session_locked(session);
1746 if (status)
1747 session = NULL;
1748out:
1749 *ret = status;
1750 return session;
1751}
1752
1753/* caller must hold client_lock */
1754static void
1755unhash_session(struct nfsd4_session *ses)
1756{
1757 struct nfs4_client *clp = ses->se_client;
1758 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1759
1760 lockdep_assert_held(&nn->client_lock);
1761
1762 list_del(&ses->se_hash);
1763 spin_lock(&ses->se_client->cl_lock);
1764 list_del(&ses->se_perclnt);
1765 spin_unlock(&ses->se_client->cl_lock);
1766}
1767
1768/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1769static int
1770STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1771{
1772 /*
1773 * We're assuming the clid was not given out from a boot
1774 * precisely 2^32 (about 136 years) before this one. That seems
1775 * a safe assumption:
1776 */
1777 if (clid->cl_boot == (u32)nn->boot_time)
1778 return 0;
1779 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1780 clid->cl_boot, clid->cl_id, nn->boot_time);
1781 return 1;
1782}
1783
1784/*
1785 * XXX Should we use a slab cache ?
1786 * This type of memory management is somewhat inefficient, but we use it
1787 * anyway since SETCLIENTID is not a common operation.
1788 */
1789static struct nfs4_client *alloc_client(struct xdr_netobj name)
1790{
1791 struct nfs4_client *clp;
1792 int i;
1793
1794 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1795 if (clp == NULL)
1796 return NULL;
1797 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1798 if (clp->cl_name.data == NULL)
1799 goto err_no_name;
1800 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1801 OWNER_HASH_SIZE, GFP_KERNEL);
1802 if (!clp->cl_ownerstr_hashtbl)
1803 goto err_no_hashtbl;
1804 for (i = 0; i < OWNER_HASH_SIZE; i++)
1805 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1806 clp->cl_name.len = name.len;
1807 INIT_LIST_HEAD(&clp->cl_sessions);
1808 idr_init(&clp->cl_stateids);
1809 atomic_set(&clp->cl_refcount, 0);
1810 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1811 INIT_LIST_HEAD(&clp->cl_idhash);
1812 INIT_LIST_HEAD(&clp->cl_openowners);
1813 INIT_LIST_HEAD(&clp->cl_delegations);
1814 INIT_LIST_HEAD(&clp->cl_lru);
1815 INIT_LIST_HEAD(&clp->cl_revoked);
1816#ifdef CONFIG_NFSD_PNFS
1817 INIT_LIST_HEAD(&clp->cl_lo_states);
1818#endif
1819 spin_lock_init(&clp->cl_lock);
1820 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1821 return clp;
1822err_no_hashtbl:
1823 kfree(clp->cl_name.data);
1824err_no_name:
1825 kfree(clp);
1826 return NULL;
1827}
1828
1829static void
1830free_client(struct nfs4_client *clp)
1831{
1832 while (!list_empty(&clp->cl_sessions)) {
1833 struct nfsd4_session *ses;
1834 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1835 se_perclnt);
1836 list_del(&ses->se_perclnt);
1837 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1838 free_session(ses);
1839 }
1840 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1841 free_svc_cred(&clp->cl_cred);
1842 kfree(clp->cl_ownerstr_hashtbl);
1843 kfree(clp->cl_name.data);
1844 idr_destroy(&clp->cl_stateids);
1845 kfree(clp);
1846}
1847
1848/* must be called under the client_lock */
1849static void
1850unhash_client_locked(struct nfs4_client *clp)
1851{
1852 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1853 struct nfsd4_session *ses;
1854
1855 lockdep_assert_held(&nn->client_lock);
1856
1857 /* Mark the client as expired! */
1858 clp->cl_time = 0;
1859 /* Make it invisible */
1860 if (!list_empty(&clp->cl_idhash)) {
1861 list_del_init(&clp->cl_idhash);
1862 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1863 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1864 else
1865 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1866 }
1867 list_del_init(&clp->cl_lru);
1868 spin_lock(&clp->cl_lock);
1869 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1870 list_del_init(&ses->se_hash);
1871 spin_unlock(&clp->cl_lock);
1872}
1873
1874static void
1875unhash_client(struct nfs4_client *clp)
1876{
1877 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1878
1879 spin_lock(&nn->client_lock);
1880 unhash_client_locked(clp);
1881 spin_unlock(&nn->client_lock);
1882}
1883
1884static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1885{
1886 if (atomic_read(&clp->cl_refcount))
1887 return nfserr_jukebox;
1888 unhash_client_locked(clp);
1889 return nfs_ok;
1890}
1891
1892static void
1893__destroy_client(struct nfs4_client *clp)
1894{
1895 int i;
1896 struct nfs4_openowner *oo;
1897 struct nfs4_delegation *dp;
1898 struct list_head reaplist;
1899
1900 INIT_LIST_HEAD(&reaplist);
1901 spin_lock(&state_lock);
1902 while (!list_empty(&clp->cl_delegations)) {
1903 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1904 WARN_ON(!unhash_delegation_locked(dp));
1905 list_add(&dp->dl_recall_lru, &reaplist);
1906 }
1907 spin_unlock(&state_lock);
1908 while (!list_empty(&reaplist)) {
1909 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1910 list_del_init(&dp->dl_recall_lru);
1911 put_clnt_odstate(dp->dl_clnt_odstate);
1912 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1913 nfs4_put_stid(&dp->dl_stid);
1914 }
1915 while (!list_empty(&clp->cl_revoked)) {
1916 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1917 list_del_init(&dp->dl_recall_lru);
1918 nfs4_put_stid(&dp->dl_stid);
1919 }
1920 while (!list_empty(&clp->cl_openowners)) {
1921 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1922 nfs4_get_stateowner(&oo->oo_owner);
1923 release_openowner(oo);
1924 }
1925 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1926 struct nfs4_stateowner *so, *tmp;
1927
1928 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1929 so_strhash) {
1930 /* Should be no openowners at this point */
1931 WARN_ON_ONCE(so->so_is_open_owner);
1932 remove_blocked_locks(lockowner(so));
1933 }
1934 }
1935 nfsd4_return_all_client_layouts(clp);
1936 nfsd4_shutdown_callback(clp);
1937 if (clp->cl_cb_conn.cb_xprt)
1938 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1939 free_client(clp);
1940}
1941
1942static void
1943destroy_client(struct nfs4_client *clp)
1944{
1945 unhash_client(clp);
1946 __destroy_client(clp);
1947}
1948
1949static void expire_client(struct nfs4_client *clp)
1950{
1951 unhash_client(clp);
1952 nfsd4_client_record_remove(clp);
1953 __destroy_client(clp);
1954}
1955
1956static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1957{
1958 memcpy(target->cl_verifier.data, source->data,
1959 sizeof(target->cl_verifier.data));
1960}
1961
1962static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1963{
1964 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1965 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1966}
1967
1968static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1969{
1970 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1971 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1972 GFP_KERNEL);
1973 if ((source->cr_principal && ! target->cr_principal) ||
1974 (source->cr_raw_principal && ! target->cr_raw_principal))
1975 return -ENOMEM;
1976
1977 target->cr_flavor = source->cr_flavor;
1978 target->cr_uid = source->cr_uid;
1979 target->cr_gid = source->cr_gid;
1980 target->cr_group_info = source->cr_group_info;
1981 get_group_info(target->cr_group_info);
1982 target->cr_gss_mech = source->cr_gss_mech;
1983 if (source->cr_gss_mech)
1984 gss_mech_get(source->cr_gss_mech);
1985 return 0;
1986}
1987
1988static int
1989compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1990{
1991 if (o1->len < o2->len)
1992 return -1;
1993 if (o1->len > o2->len)
1994 return 1;
1995 return memcmp(o1->data, o2->data, o1->len);
1996}
1997
1998static int same_name(const char *n1, const char *n2)
1999{
2000 return 0 == memcmp(n1, n2, HEXDIR_LEN);
2001}
2002
2003static int
2004same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2005{
2006 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2007}
2008
2009static int
2010same_clid(clientid_t *cl1, clientid_t *cl2)
2011{
2012 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2013}
2014
2015static bool groups_equal(struct group_info *g1, struct group_info *g2)
2016{
2017 int i;
2018
2019 if (g1->ngroups != g2->ngroups)
2020 return false;
2021 for (i=0; i<g1->ngroups; i++)
2022 if (!gid_eq(g1->gid[i], g2->gid[i]))
2023 return false;
2024 return true;
2025}
2026
2027/*
2028 * RFC 3530 language requires clid_inuse be returned when the
2029 * "principal" associated with a requests differs from that previously
2030 * used. We use uid, gid's, and gss principal string as our best
2031 * approximation. We also don't want to allow non-gss use of a client
2032 * established using gss: in theory cr_principal should catch that
2033 * change, but in practice cr_principal can be null even in the gss case
2034 * since gssd doesn't always pass down a principal string.
2035 */
2036static bool is_gss_cred(struct svc_cred *cr)
2037{
2038 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2039 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2040}
2041
2042
2043static bool
2044same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2045{
2046 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2047 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2048 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2049 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2050 return false;
2051 if (cr1->cr_principal == cr2->cr_principal)
2052 return true;
2053 if (!cr1->cr_principal || !cr2->cr_principal)
2054 return false;
2055 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2056}
2057
2058static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2059{
2060 struct svc_cred *cr = &rqstp->rq_cred;
2061 u32 service;
2062
2063 if (!cr->cr_gss_mech)
2064 return false;
2065 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2066 return service == RPC_GSS_SVC_INTEGRITY ||
2067 service == RPC_GSS_SVC_PRIVACY;
2068}
2069
2070bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2071{
2072 struct svc_cred *cr = &rqstp->rq_cred;
2073
2074 if (!cl->cl_mach_cred)
2075 return true;
2076 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2077 return false;
2078 if (!svc_rqst_integrity_protected(rqstp))
2079 return false;
2080 if (cl->cl_cred.cr_raw_principal)
2081 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2082 cr->cr_raw_principal);
2083 if (!cr->cr_principal)
2084 return false;
2085 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2086}
2087
2088static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2089{
2090 __be32 verf[2];
2091
2092 /*
2093 * This is opaque to client, so no need to byte-swap. Use
2094 * __force to keep sparse happy
2095 */
2096 verf[0] = (__force __be32)get_seconds();
2097 verf[1] = (__force __be32)nn->clverifier_counter++;
2098 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2099}
2100
2101static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2102{
2103 clp->cl_clientid.cl_boot = nn->boot_time;
2104 clp->cl_clientid.cl_id = nn->clientid_counter++;
2105 gen_confirm(clp, nn);
2106}
2107
2108static struct nfs4_stid *
2109find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2110{
2111 struct nfs4_stid *ret;
2112
2113 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2114 if (!ret || !ret->sc_type)
2115 return NULL;
2116 return ret;
2117}
2118
2119static struct nfs4_stid *
2120find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2121{
2122 struct nfs4_stid *s;
2123
2124 spin_lock(&cl->cl_lock);
2125 s = find_stateid_locked(cl, t);
2126 if (s != NULL) {
2127 if (typemask & s->sc_type)
2128 atomic_inc(&s->sc_count);
2129 else
2130 s = NULL;
2131 }
2132 spin_unlock(&cl->cl_lock);
2133 return s;
2134}
2135
2136static struct nfs4_client *create_client(struct xdr_netobj name,
2137 struct svc_rqst *rqstp, nfs4_verifier *verf)
2138{
2139 struct nfs4_client *clp;
2140 struct sockaddr *sa = svc_addr(rqstp);
2141 int ret;
2142 struct net *net = SVC_NET(rqstp);
2143
2144 clp = alloc_client(name);
2145 if (clp == NULL)
2146 return NULL;
2147
2148 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2149 if (ret) {
2150 free_client(clp);
2151 return NULL;
2152 }
2153 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2154 clp->cl_time = get_seconds();
2155 clear_bit(0, &clp->cl_cb_slot_busy);
2156 copy_verf(clp, verf);
2157 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2158 clp->cl_cb_session = NULL;
2159 clp->net = net;
2160 return clp;
2161}
2162
2163static void
2164add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2165{
2166 struct rb_node **new = &(root->rb_node), *parent = NULL;
2167 struct nfs4_client *clp;
2168
2169 while (*new) {
2170 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2171 parent = *new;
2172
2173 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2174 new = &((*new)->rb_left);
2175 else
2176 new = &((*new)->rb_right);
2177 }
2178
2179 rb_link_node(&new_clp->cl_namenode, parent, new);
2180 rb_insert_color(&new_clp->cl_namenode, root);
2181}
2182
2183static struct nfs4_client *
2184find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2185{
2186 int cmp;
2187 struct rb_node *node = root->rb_node;
2188 struct nfs4_client *clp;
2189
2190 while (node) {
2191 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2192 cmp = compare_blob(&clp->cl_name, name);
2193 if (cmp > 0)
2194 node = node->rb_left;
2195 else if (cmp < 0)
2196 node = node->rb_right;
2197 else
2198 return clp;
2199 }
2200 return NULL;
2201}
2202
2203static void
2204add_to_unconfirmed(struct nfs4_client *clp)
2205{
2206 unsigned int idhashval;
2207 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2208
2209 lockdep_assert_held(&nn->client_lock);
2210
2211 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2212 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2213 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2214 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2215 renew_client_locked(clp);
2216}
2217
2218static void
2219move_to_confirmed(struct nfs4_client *clp)
2220{
2221 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2222 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2223
2224 lockdep_assert_held(&nn->client_lock);
2225
2226 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2227 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2228 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2229 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2230 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2231 renew_client_locked(clp);
2232}
2233
2234static struct nfs4_client *
2235find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2236{
2237 struct nfs4_client *clp;
2238 unsigned int idhashval = clientid_hashval(clid->cl_id);
2239
2240 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2241 if (same_clid(&clp->cl_clientid, clid)) {
2242 if ((bool)clp->cl_minorversion != sessions)
2243 return NULL;
2244 renew_client_locked(clp);
2245 return clp;
2246 }
2247 }
2248 return NULL;
2249}
2250
2251static struct nfs4_client *
2252find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2253{
2254 struct list_head *tbl = nn->conf_id_hashtbl;
2255
2256 lockdep_assert_held(&nn->client_lock);
2257 return find_client_in_id_table(tbl, clid, sessions);
2258}
2259
2260static struct nfs4_client *
2261find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2262{
2263 struct list_head *tbl = nn->unconf_id_hashtbl;
2264
2265 lockdep_assert_held(&nn->client_lock);
2266 return find_client_in_id_table(tbl, clid, sessions);
2267}
2268
2269static bool clp_used_exchangeid(struct nfs4_client *clp)
2270{
2271 return clp->cl_exchange_flags != 0;
2272}
2273
2274static struct nfs4_client *
2275find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2276{
2277 lockdep_assert_held(&nn->client_lock);
2278 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2279}
2280
2281static struct nfs4_client *
2282find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2283{
2284 lockdep_assert_held(&nn->client_lock);
2285 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2286}
2287
2288static void
2289gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2290{
2291 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2292 struct sockaddr *sa = svc_addr(rqstp);
2293 u32 scopeid = rpc_get_scope_id(sa);
2294 unsigned short expected_family;
2295
2296 /* Currently, we only support tcp and tcp6 for the callback channel */
2297 if (se->se_callback_netid_len == 3 &&
2298 !memcmp(se->se_callback_netid_val, "tcp", 3))
2299 expected_family = AF_INET;
2300 else if (se->se_callback_netid_len == 4 &&
2301 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2302 expected_family = AF_INET6;
2303 else
2304 goto out_err;
2305
2306 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2307 se->se_callback_addr_len,
2308 (struct sockaddr *)&conn->cb_addr,
2309 sizeof(conn->cb_addr));
2310
2311 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2312 goto out_err;
2313
2314 if (conn->cb_addr.ss_family == AF_INET6)
2315 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2316
2317 conn->cb_prog = se->se_callback_prog;
2318 conn->cb_ident = se->se_callback_ident;
2319 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2320 return;
2321out_err:
2322 conn->cb_addr.ss_family = AF_UNSPEC;
2323 conn->cb_addrlen = 0;
2324 dprintk("NFSD: this client (clientid %08x/%08x) "
2325 "will not receive delegations\n",
2326 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2327
2328 return;
2329}
2330
2331/*
2332 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2333 */
2334static void
2335nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2336{
2337 struct xdr_buf *buf = resp->xdr.buf;
2338 struct nfsd4_slot *slot = resp->cstate.slot;
2339 unsigned int base;
2340
2341 dprintk("--> %s slot %p\n", __func__, slot);
2342
2343 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2344 slot->sl_opcnt = resp->opcnt;
2345 slot->sl_status = resp->cstate.status;
2346 free_svc_cred(&slot->sl_cred);
2347 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2348
2349 if (!nfsd4_cache_this(resp)) {
2350 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2351 return;
2352 }
2353 slot->sl_flags |= NFSD4_SLOT_CACHED;
2354
2355 base = resp->cstate.data_offset;
2356 slot->sl_datalen = buf->len - base;
2357 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2358 WARN(1, "%s: sessions DRC could not cache compound\n",
2359 __func__);
2360 return;
2361}
2362
2363/*
2364 * Encode the replay sequence operation from the slot values.
2365 * If cachethis is FALSE encode the uncached rep error on the next
2366 * operation which sets resp->p and increments resp->opcnt for
2367 * nfs4svc_encode_compoundres.
2368 *
2369 */
2370static __be32
2371nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2372 struct nfsd4_compoundres *resp)
2373{
2374 struct nfsd4_op *op;
2375 struct nfsd4_slot *slot = resp->cstate.slot;
2376
2377 /* Encode the replayed sequence operation */
2378 op = &args->ops[resp->opcnt - 1];
2379 nfsd4_encode_operation(resp, op);
2380
2381 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2382 return op->status;
2383 if (args->opcnt == 1) {
2384 /*
2385 * The original operation wasn't a solo sequence--we
2386 * always cache those--so this retry must not match the
2387 * original:
2388 */
2389 op->status = nfserr_seq_false_retry;
2390 } else {
2391 op = &args->ops[resp->opcnt++];
2392 op->status = nfserr_retry_uncached_rep;
2393 nfsd4_encode_operation(resp, op);
2394 }
2395 return op->status;
2396}
2397
2398/*
2399 * The sequence operation is not cached because we can use the slot and
2400 * session values.
2401 */
2402static __be32
2403nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2404 struct nfsd4_sequence *seq)
2405{
2406 struct nfsd4_slot *slot = resp->cstate.slot;
2407 struct xdr_stream *xdr = &resp->xdr;
2408 __be32 *p;
2409 __be32 status;
2410
2411 dprintk("--> %s slot %p\n", __func__, slot);
2412
2413 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2414 if (status)
2415 return status;
2416
2417 p = xdr_reserve_space(xdr, slot->sl_datalen);
2418 if (!p) {
2419 WARN_ON_ONCE(1);
2420 return nfserr_serverfault;
2421 }
2422 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2423 xdr_commit_encode(xdr);
2424
2425 resp->opcnt = slot->sl_opcnt;
2426 return slot->sl_status;
2427}
2428
2429/*
2430 * Set the exchange_id flags returned by the server.
2431 */
2432static void
2433nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2434{
2435#ifdef CONFIG_NFSD_PNFS
2436 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2437#else
2438 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2439#endif
2440
2441 /* Referrals are supported, Migration is not. */
2442 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2443
2444 /* set the wire flags to return to client. */
2445 clid->flags = new->cl_exchange_flags;
2446}
2447
2448static bool client_has_openowners(struct nfs4_client *clp)
2449{
2450 struct nfs4_openowner *oo;
2451
2452 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2453 if (!list_empty(&oo->oo_owner.so_stateids))
2454 return true;
2455 }
2456 return false;
2457}
2458
2459static bool client_has_state(struct nfs4_client *clp)
2460{
2461 return client_has_openowners(clp)
2462#ifdef CONFIG_NFSD_PNFS
2463 || !list_empty(&clp->cl_lo_states)
2464#endif
2465 || !list_empty(&clp->cl_delegations)
2466 || !list_empty(&clp->cl_sessions);
2467}
2468
2469__be32
2470nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2471 union nfsd4_op_u *u)
2472{
2473 struct nfsd4_exchange_id *exid = &u->exchange_id;
2474 struct nfs4_client *conf, *new;
2475 struct nfs4_client *unconf = NULL;
2476 __be32 status;
2477 char addr_str[INET6_ADDRSTRLEN];
2478 nfs4_verifier verf = exid->verifier;
2479 struct sockaddr *sa = svc_addr(rqstp);
2480 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2481 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2482
2483 rpc_ntop(sa, addr_str, sizeof(addr_str));
2484 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2485 "ip_addr=%s flags %x, spa_how %d\n",
2486 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2487 addr_str, exid->flags, exid->spa_how);
2488
2489 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2490 return nfserr_inval;
2491
2492 new = create_client(exid->clname, rqstp, &verf);
2493 if (new == NULL)
2494 return nfserr_jukebox;
2495
2496 switch (exid->spa_how) {
2497 case SP4_MACH_CRED:
2498 exid->spo_must_enforce[0] = 0;
2499 exid->spo_must_enforce[1] = (
2500 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2501 1 << (OP_EXCHANGE_ID - 32) |
2502 1 << (OP_CREATE_SESSION - 32) |
2503 1 << (OP_DESTROY_SESSION - 32) |
2504 1 << (OP_DESTROY_CLIENTID - 32));
2505
2506 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2507 1 << (OP_OPEN_DOWNGRADE) |
2508 1 << (OP_LOCKU) |
2509 1 << (OP_DELEGRETURN));
2510
2511 exid->spo_must_allow[1] &= (
2512 1 << (OP_TEST_STATEID - 32) |
2513 1 << (OP_FREE_STATEID - 32));
2514 if (!svc_rqst_integrity_protected(rqstp)) {
2515 status = nfserr_inval;
2516 goto out_nolock;
2517 }
2518 /*
2519 * Sometimes userspace doesn't give us a principal.
2520 * Which is a bug, really. Anyway, we can't enforce
2521 * MACH_CRED in that case, better to give up now:
2522 */
2523 if (!new->cl_cred.cr_principal &&
2524 !new->cl_cred.cr_raw_principal) {
2525 status = nfserr_serverfault;
2526 goto out_nolock;
2527 }
2528 new->cl_mach_cred = true;
2529 case SP4_NONE:
2530 break;
2531 default: /* checked by xdr code */
2532 WARN_ON_ONCE(1);
2533 case SP4_SSV:
2534 status = nfserr_encr_alg_unsupp;
2535 goto out_nolock;
2536 }
2537
2538 /* Cases below refer to rfc 5661 section 18.35.4: */
2539 spin_lock(&nn->client_lock);
2540 conf = find_confirmed_client_by_name(&exid->clname, nn);
2541 if (conf) {
2542 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2543 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2544
2545 if (update) {
2546 if (!clp_used_exchangeid(conf)) { /* buggy client */
2547 status = nfserr_inval;
2548 goto out;
2549 }
2550 if (!nfsd4_mach_creds_match(conf, rqstp)) {
2551 status = nfserr_wrong_cred;
2552 goto out;
2553 }
2554 if (!creds_match) { /* case 9 */
2555 status = nfserr_perm;
2556 goto out;
2557 }
2558 if (!verfs_match) { /* case 8 */
2559 status = nfserr_not_same;
2560 goto out;
2561 }
2562 /* case 6 */
2563 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2564 goto out_copy;
2565 }
2566 if (!creds_match) { /* case 3 */
2567 if (client_has_state(conf)) {
2568 status = nfserr_clid_inuse;
2569 goto out;
2570 }
2571 goto out_new;
2572 }
2573 if (verfs_match) { /* case 2 */
2574 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2575 goto out_copy;
2576 }
2577 /* case 5, client reboot */
2578 conf = NULL;
2579 goto out_new;
2580 }
2581
2582 if (update) { /* case 7 */
2583 status = nfserr_noent;
2584 goto out;
2585 }
2586
2587 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2588 if (unconf) /* case 4, possible retry or client restart */
2589 unhash_client_locked(unconf);
2590
2591 /* case 1 (normal case) */
2592out_new:
2593 if (conf) {
2594 status = mark_client_expired_locked(conf);
2595 if (status)
2596 goto out;
2597 }
2598 new->cl_minorversion = cstate->minorversion;
2599 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2600 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2601
2602 gen_clid(new, nn);
2603 add_to_unconfirmed(new);
2604 swap(new, conf);
2605out_copy:
2606 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2607 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2608
2609 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2610 nfsd4_set_ex_flags(conf, exid);
2611
2612 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2613 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2614 status = nfs_ok;
2615
2616out:
2617 spin_unlock(&nn->client_lock);
2618out_nolock:
2619 if (new)
2620 expire_client(new);
2621 if (unconf)
2622 expire_client(unconf);
2623 return status;
2624}
2625
2626static __be32
2627check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2628{
2629 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2630 slot_seqid);
2631
2632 /* The slot is in use, and no response has been sent. */
2633 if (slot_inuse) {
2634 if (seqid == slot_seqid)
2635 return nfserr_jukebox;
2636 else
2637 return nfserr_seq_misordered;
2638 }
2639 /* Note unsigned 32-bit arithmetic handles wraparound: */
2640 if (likely(seqid == slot_seqid + 1))
2641 return nfs_ok;
2642 if (seqid == slot_seqid)
2643 return nfserr_replay_cache;
2644 return nfserr_seq_misordered;
2645}
2646
2647/*
2648 * Cache the create session result into the create session single DRC
2649 * slot cache by saving the xdr structure. sl_seqid has been set.
2650 * Do this for solo or embedded create session operations.
2651 */
2652static void
2653nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2654 struct nfsd4_clid_slot *slot, __be32 nfserr)
2655{
2656 slot->sl_status = nfserr;
2657 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2658}
2659
2660static __be32
2661nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2662 struct nfsd4_clid_slot *slot)
2663{
2664 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2665 return slot->sl_status;
2666}
2667
2668#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2669 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2670 1 + /* MIN tag is length with zero, only length */ \
2671 3 + /* version, opcount, opcode */ \
2672 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2673 /* seqid, slotID, slotID, cache */ \
2674 4 ) * sizeof(__be32))
2675
2676#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2677 2 + /* verifier: AUTH_NULL, length 0 */\
2678 1 + /* status */ \
2679 1 + /* MIN tag is length with zero, only length */ \
2680 3 + /* opcount, opcode, opstatus*/ \
2681 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2682 /* seqid, slotID, slotID, slotID, status */ \
2683 5 ) * sizeof(__be32))
2684
2685static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2686{
2687 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2688
2689 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2690 return nfserr_toosmall;
2691 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2692 return nfserr_toosmall;
2693 ca->headerpadsz = 0;
2694 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2695 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2696 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2697 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2698 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2699 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2700 /*
2701 * Note decreasing slot size below client's request may make it
2702 * difficult for client to function correctly, whereas
2703 * decreasing the number of slots will (just?) affect
2704 * performance. When short on memory we therefore prefer to
2705 * decrease number of slots instead of their size. Clients that
2706 * request larger slots than they need will get poor results:
2707 */
2708 ca->maxreqs = nfsd4_get_drc_mem(ca);
2709 if (!ca->maxreqs)
2710 return nfserr_jukebox;
2711
2712 return nfs_ok;
2713}
2714
2715/*
2716 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2717 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2718 */
2719#define RPC_MAX_HEADER_WITH_AUTH_SYS \
2720 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2721
2722#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2723 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2724
2725#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2726 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2727#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2728 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2729 sizeof(__be32))
2730
2731static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2732{
2733 ca->headerpadsz = 0;
2734
2735 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2736 return nfserr_toosmall;
2737 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2738 return nfserr_toosmall;
2739 ca->maxresp_cached = 0;
2740 if (ca->maxops < 2)
2741 return nfserr_toosmall;
2742
2743 return nfs_ok;
2744}
2745
2746static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2747{
2748 switch (cbs->flavor) {
2749 case RPC_AUTH_NULL:
2750 case RPC_AUTH_UNIX:
2751 return nfs_ok;
2752 default:
2753 /*
2754 * GSS case: the spec doesn't allow us to return this
2755 * error. But it also doesn't allow us not to support
2756 * GSS.
2757 * I'd rather this fail hard than return some error the
2758 * client might think it can already handle:
2759 */
2760 return nfserr_encr_alg_unsupp;
2761 }
2762}
2763
2764__be32
2765nfsd4_create_session(struct svc_rqst *rqstp,
2766 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2767{
2768 struct nfsd4_create_session *cr_ses = &u->create_session;
2769 struct sockaddr *sa = svc_addr(rqstp);
2770 struct nfs4_client *conf, *unconf;
2771 struct nfs4_client *old = NULL;
2772 struct nfsd4_session *new;
2773 struct nfsd4_conn *conn;
2774 struct nfsd4_clid_slot *cs_slot = NULL;
2775 __be32 status = 0;
2776 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2777
2778 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2779 return nfserr_inval;
2780 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2781 if (status)
2782 return status;
2783 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2784 if (status)
2785 return status;
2786 status = check_backchannel_attrs(&cr_ses->back_channel);
2787 if (status)
2788 goto out_release_drc_mem;
2789 status = nfserr_jukebox;
2790 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2791 if (!new)
2792 goto out_release_drc_mem;
2793 conn = alloc_conn_from_crses(rqstp, cr_ses);
2794 if (!conn)
2795 goto out_free_session;
2796
2797 spin_lock(&nn->client_lock);
2798 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2799 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2800 WARN_ON_ONCE(conf && unconf);
2801
2802 if (conf) {
2803 status = nfserr_wrong_cred;
2804 if (!nfsd4_mach_creds_match(conf, rqstp))
2805 goto out_free_conn;
2806 cs_slot = &conf->cl_cs_slot;
2807 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2808 if (status) {
2809 if (status == nfserr_replay_cache)
2810 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2811 goto out_free_conn;
2812 }
2813 } else if (unconf) {
2814 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2815 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2816 status = nfserr_clid_inuse;
2817 goto out_free_conn;
2818 }
2819 status = nfserr_wrong_cred;
2820 if (!nfsd4_mach_creds_match(unconf, rqstp))
2821 goto out_free_conn;
2822 cs_slot = &unconf->cl_cs_slot;
2823 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2824 if (status) {
2825 /* an unconfirmed replay returns misordered */
2826 status = nfserr_seq_misordered;
2827 goto out_free_conn;
2828 }
2829 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2830 if (old) {
2831 status = mark_client_expired_locked(old);
2832 if (status) {
2833 old = NULL;
2834 goto out_free_conn;
2835 }
2836 }
2837 move_to_confirmed(unconf);
2838 conf = unconf;
2839 } else {
2840 status = nfserr_stale_clientid;
2841 goto out_free_conn;
2842 }
2843 status = nfs_ok;
2844 /* Persistent sessions are not supported */
2845 cr_ses->flags &= ~SESSION4_PERSIST;
2846 /* Upshifting from TCP to RDMA is not supported */
2847 cr_ses->flags &= ~SESSION4_RDMA;
2848
2849 init_session(rqstp, new, conf, cr_ses);
2850 nfsd4_get_session_locked(new);
2851
2852 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2853 NFS4_MAX_SESSIONID_LEN);
2854 cs_slot->sl_seqid++;
2855 cr_ses->seqid = cs_slot->sl_seqid;
2856
2857 /* cache solo and embedded create sessions under the client_lock */
2858 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2859 spin_unlock(&nn->client_lock);
2860 /* init connection and backchannel */
2861 nfsd4_init_conn(rqstp, conn, new);
2862 nfsd4_put_session(new);
2863 if (old)
2864 expire_client(old);
2865 return status;
2866out_free_conn:
2867 spin_unlock(&nn->client_lock);
2868 free_conn(conn);
2869 if (old)
2870 expire_client(old);
2871out_free_session:
2872 __free_session(new);
2873out_release_drc_mem:
2874 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2875 return status;
2876}
2877
2878static __be32 nfsd4_map_bcts_dir(u32 *dir)
2879{
2880 switch (*dir) {
2881 case NFS4_CDFC4_FORE:
2882 case NFS4_CDFC4_BACK:
2883 return nfs_ok;
2884 case NFS4_CDFC4_FORE_OR_BOTH:
2885 case NFS4_CDFC4_BACK_OR_BOTH:
2886 *dir = NFS4_CDFC4_BOTH;
2887 return nfs_ok;
2888 };
2889 return nfserr_inval;
2890}
2891
2892__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2893 struct nfsd4_compound_state *cstate,
2894 union nfsd4_op_u *u)
2895{
2896 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2897 struct nfsd4_session *session = cstate->session;
2898 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2899 __be32 status;
2900
2901 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2902 if (status)
2903 return status;
2904 spin_lock(&nn->client_lock);
2905 session->se_cb_prog = bc->bc_cb_program;
2906 session->se_cb_sec = bc->bc_cb_sec;
2907 spin_unlock(&nn->client_lock);
2908
2909 nfsd4_probe_callback(session->se_client);
2910
2911 return nfs_ok;
2912}
2913
2914__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2915 struct nfsd4_compound_state *cstate,
2916 union nfsd4_op_u *u)
2917{
2918 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2919 __be32 status;
2920 struct nfsd4_conn *conn;
2921 struct nfsd4_session *session;
2922 struct net *net = SVC_NET(rqstp);
2923 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2924
2925 if (!nfsd4_last_compound_op(rqstp))
2926 return nfserr_not_only_op;
2927 spin_lock(&nn->client_lock);
2928 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2929 spin_unlock(&nn->client_lock);
2930 if (!session)
2931 goto out_no_session;
2932 status = nfserr_wrong_cred;
2933 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2934 goto out;
2935 status = nfsd4_map_bcts_dir(&bcts->dir);
2936 if (status)
2937 goto out;
2938 conn = alloc_conn(rqstp, bcts->dir);
2939 status = nfserr_jukebox;
2940 if (!conn)
2941 goto out;
2942 nfsd4_init_conn(rqstp, conn, session);
2943 status = nfs_ok;
2944out:
2945 nfsd4_put_session(session);
2946out_no_session:
2947 return status;
2948}
2949
2950static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2951{
2952 if (!session)
2953 return 0;
2954 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2955}
2956
2957__be32
2958nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2959 union nfsd4_op_u *u)
2960{
2961 struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2962 struct nfsd4_session *ses;
2963 __be32 status;
2964 int ref_held_by_me = 0;
2965 struct net *net = SVC_NET(r);
2966 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2967
2968 status = nfserr_not_only_op;
2969 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2970 if (!nfsd4_last_compound_op(r))
2971 goto out;
2972 ref_held_by_me++;
2973 }
2974 dump_sessionid(__func__, &sessionid->sessionid);
2975 spin_lock(&nn->client_lock);
2976 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2977 if (!ses)
2978 goto out_client_lock;
2979 status = nfserr_wrong_cred;
2980 if (!nfsd4_mach_creds_match(ses->se_client, r))
2981 goto out_put_session;
2982 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2983 if (status)
2984 goto out_put_session;
2985 unhash_session(ses);
2986 spin_unlock(&nn->client_lock);
2987
2988 nfsd4_probe_callback_sync(ses->se_client);
2989
2990 spin_lock(&nn->client_lock);
2991 status = nfs_ok;
2992out_put_session:
2993 nfsd4_put_session_locked(ses);
2994out_client_lock:
2995 spin_unlock(&nn->client_lock);
2996out:
2997 return status;
2998}
2999
3000static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3001{
3002 struct nfsd4_conn *c;
3003
3004 list_for_each_entry(c, &s->se_conns, cn_persession) {
3005 if (c->cn_xprt == xpt) {
3006 return c;
3007 }
3008 }
3009 return NULL;
3010}
3011
3012static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3013{
3014 struct nfs4_client *clp = ses->se_client;
3015 struct nfsd4_conn *c;
3016 __be32 status = nfs_ok;
3017 int ret;
3018
3019 spin_lock(&clp->cl_lock);
3020 c = __nfsd4_find_conn(new->cn_xprt, ses);
3021 if (c)
3022 goto out_free;
3023 status = nfserr_conn_not_bound_to_session;
3024 if (clp->cl_mach_cred)
3025 goto out_free;
3026 __nfsd4_hash_conn(new, ses);
3027 spin_unlock(&clp->cl_lock);
3028 ret = nfsd4_register_conn(new);
3029 if (ret)
3030 /* oops; xprt is already down: */
3031 nfsd4_conn_lost(&new->cn_xpt_user);
3032 return nfs_ok;
3033out_free:
3034 spin_unlock(&clp->cl_lock);
3035 free_conn(new);
3036 return status;
3037}
3038
3039static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3040{
3041 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3042
3043 return args->opcnt > session->se_fchannel.maxops;
3044}
3045
3046static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3047 struct nfsd4_session *session)
3048{
3049 struct xdr_buf *xb = &rqstp->rq_arg;
3050
3051 return xb->len > session->se_fchannel.maxreq_sz;
3052}
3053
3054static bool replay_matches_cache(struct svc_rqst *rqstp,
3055 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3056{
3057 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3058
3059 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3060 (bool)seq->cachethis)
3061 return false;
3062 /*
3063 * If there's an error then the reply can have fewer ops than
3064 * the call.
3065 */
3066 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3067 return false;
3068 /*
3069 * But if we cached a reply with *more* ops than the call you're
3070 * sending us now, then this new call is clearly not really a
3071 * replay of the old one:
3072 */
3073 if (slot->sl_opcnt > argp->opcnt)
3074 return false;
3075 /* This is the only check explicitly called by spec: */
3076 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3077 return false;
3078 /*
3079 * There may be more comparisons we could actually do, but the
3080 * spec doesn't require us to catch every case where the calls
3081 * don't match (that would require caching the call as well as
3082 * the reply), so we don't bother.
3083 */
3084 return true;
3085}
3086
3087__be32
3088nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3089 union nfsd4_op_u *u)
3090{
3091 struct nfsd4_sequence *seq = &u->sequence;
3092 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3093 struct xdr_stream *xdr = &resp->xdr;
3094 struct nfsd4_session *session;
3095 struct nfs4_client *clp;
3096 struct nfsd4_slot *slot;
3097 struct nfsd4_conn *conn;
3098 __be32 status;
3099 int buflen;
3100 struct net *net = SVC_NET(rqstp);
3101 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3102
3103 if (resp->opcnt != 1)
3104 return nfserr_sequence_pos;
3105
3106 /*
3107 * Will be either used or freed by nfsd4_sequence_check_conn
3108 * below.
3109 */
3110 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3111 if (!conn)
3112 return nfserr_jukebox;
3113
3114 spin_lock(&nn->client_lock);
3115 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3116 if (!session)
3117 goto out_no_session;
3118 clp = session->se_client;
3119
3120 status = nfserr_too_many_ops;
3121 if (nfsd4_session_too_many_ops(rqstp, session))
3122 goto out_put_session;
3123
3124 status = nfserr_req_too_big;
3125 if (nfsd4_request_too_big(rqstp, session))
3126 goto out_put_session;
3127
3128 status = nfserr_badslot;
3129 if (seq->slotid >= session->se_fchannel.maxreqs)
3130 goto out_put_session;
3131
3132 slot = session->se_slots[seq->slotid];
3133 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3134
3135 /* We do not negotiate the number of slots yet, so set the
3136 * maxslots to the session maxreqs which is used to encode
3137 * sr_highest_slotid and the sr_target_slot id to maxslots */
3138 seq->maxslots = session->se_fchannel.maxreqs;
3139
3140 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3141 slot->sl_flags & NFSD4_SLOT_INUSE);
3142 if (status == nfserr_replay_cache) {
3143 status = nfserr_seq_misordered;
3144 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3145 goto out_put_session;
3146 status = nfserr_seq_false_retry;
3147 if (!replay_matches_cache(rqstp, seq, slot))
3148 goto out_put_session;
3149 cstate->slot = slot;
3150 cstate->session = session;
3151 cstate->clp = clp;
3152 /* Return the cached reply status and set cstate->status
3153 * for nfsd4_proc_compound processing */
3154 status = nfsd4_replay_cache_entry(resp, seq);
3155 cstate->status = nfserr_replay_cache;
3156 goto out;
3157 }
3158 if (status)
3159 goto out_put_session;
3160
3161 status = nfsd4_sequence_check_conn(conn, session);
3162 conn = NULL;
3163 if (status)
3164 goto out_put_session;
3165
3166 buflen = (seq->cachethis) ?
3167 session->se_fchannel.maxresp_cached :
3168 session->se_fchannel.maxresp_sz;
3169 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3170 nfserr_rep_too_big;
3171 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3172 goto out_put_session;
3173 svc_reserve(rqstp, buflen);
3174
3175 status = nfs_ok;
3176 /* Success! bump slot seqid */
3177 slot->sl_seqid = seq->seqid;
3178 slot->sl_flags |= NFSD4_SLOT_INUSE;
3179 if (seq->cachethis)
3180 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3181 else
3182 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3183
3184 cstate->slot = slot;
3185 cstate->session = session;
3186 cstate->clp = clp;
3187
3188out:
3189 switch (clp->cl_cb_state) {
3190 case NFSD4_CB_DOWN:
3191 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3192 break;
3193 case NFSD4_CB_FAULT:
3194 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3195 break;
3196 default:
3197 seq->status_flags = 0;
3198 }
3199 if (!list_empty(&clp->cl_revoked))
3200 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3201out_no_session:
3202 if (conn)
3203 free_conn(conn);
3204 spin_unlock(&nn->client_lock);
3205 return status;
3206out_put_session:
3207 nfsd4_put_session_locked(session);
3208 goto out_no_session;
3209}
3210
3211void
3212nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3213{
3214 struct nfsd4_compound_state *cs = &resp->cstate;
3215
3216 if (nfsd4_has_session(cs)) {
3217 if (cs->status != nfserr_replay_cache) {
3218 nfsd4_store_cache_entry(resp);
3219 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3220 }
3221 /* Drop session reference that was taken in nfsd4_sequence() */
3222 nfsd4_put_session(cs->session);
3223 } else if (cs->clp)
3224 put_client_renew(cs->clp);
3225}
3226
3227__be32
3228nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3229 struct nfsd4_compound_state *cstate,
3230 union nfsd4_op_u *u)
3231{
3232 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3233 struct nfs4_client *conf, *unconf;
3234 struct nfs4_client *clp = NULL;
3235 __be32 status = 0;
3236 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3237
3238 spin_lock(&nn->client_lock);
3239 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3240 conf = find_confirmed_client(&dc->clientid, true, nn);
3241 WARN_ON_ONCE(conf && unconf);
3242
3243 if (conf) {
3244 if (client_has_state(conf)) {
3245 status = nfserr_clientid_busy;
3246 goto out;
3247 }
3248 status = mark_client_expired_locked(conf);
3249 if (status)
3250 goto out;
3251 clp = conf;
3252 } else if (unconf)
3253 clp = unconf;
3254 else {
3255 status = nfserr_stale_clientid;
3256 goto out;
3257 }
3258 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3259 clp = NULL;
3260 status = nfserr_wrong_cred;
3261 goto out;
3262 }
3263 unhash_client_locked(clp);
3264out:
3265 spin_unlock(&nn->client_lock);
3266 if (clp)
3267 expire_client(clp);
3268 return status;
3269}
3270
3271__be32
3272nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3273 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3274{
3275 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3276 __be32 status = 0;
3277
3278 if (rc->rca_one_fs) {
3279 if (!cstate->current_fh.fh_dentry)
3280 return nfserr_nofilehandle;
3281 /*
3282 * We don't take advantage of the rca_one_fs case.
3283 * That's OK, it's optional, we can safely ignore it.
3284 */
3285 return nfs_ok;
3286 }
3287
3288 status = nfserr_complete_already;
3289 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3290 &cstate->session->se_client->cl_flags))
3291 goto out;
3292
3293 status = nfserr_stale_clientid;
3294 if (is_client_expired(cstate->session->se_client))
3295 /*
3296 * The following error isn't really legal.
3297 * But we only get here if the client just explicitly
3298 * destroyed the client. Surely it no longer cares what
3299 * error it gets back on an operation for the dead
3300 * client.
3301 */
3302 goto out;
3303
3304 status = nfs_ok;
3305 nfsd4_client_record_create(cstate->session->se_client);
3306out:
3307 return status;
3308}
3309
3310__be32
3311nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3312 union nfsd4_op_u *u)
3313{
3314 struct nfsd4_setclientid *setclid = &u->setclientid;
3315 struct xdr_netobj clname = setclid->se_name;
3316 nfs4_verifier clverifier = setclid->se_verf;
3317 struct nfs4_client *conf, *new;
3318 struct nfs4_client *unconf = NULL;
3319 __be32 status;
3320 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3321
3322 new = create_client(clname, rqstp, &clverifier);
3323 if (new == NULL)
3324 return nfserr_jukebox;
3325 /* Cases below refer to rfc 3530 section 14.2.33: */
3326 spin_lock(&nn->client_lock);
3327 conf = find_confirmed_client_by_name(&clname, nn);
3328 if (conf && client_has_state(conf)) {
3329 /* case 0: */
3330 status = nfserr_clid_inuse;
3331 if (clp_used_exchangeid(conf))
3332 goto out;
3333 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3334 char addr_str[INET6_ADDRSTRLEN];
3335 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3336 sizeof(addr_str));
3337 dprintk("NFSD: setclientid: string in use by client "
3338 "at %s\n", addr_str);
3339 goto out;
3340 }
3341 }
3342 unconf = find_unconfirmed_client_by_name(&clname, nn);
3343 if (unconf)
3344 unhash_client_locked(unconf);
3345 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3346 /* case 1: probable callback update */
3347 copy_clid(new, conf);
3348 gen_confirm(new, nn);
3349 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3350 gen_clid(new, nn);
3351 new->cl_minorversion = 0;
3352 gen_callback(new, setclid, rqstp);
3353 add_to_unconfirmed(new);
3354 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3355 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3356 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3357 new = NULL;
3358 status = nfs_ok;
3359out:
3360 spin_unlock(&nn->client_lock);
3361 if (new)
3362 free_client(new);
3363 if (unconf)
3364 expire_client(unconf);
3365 return status;
3366}
3367
3368
3369__be32
3370nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3371 struct nfsd4_compound_state *cstate,
3372 union nfsd4_op_u *u)
3373{
3374 struct nfsd4_setclientid_confirm *setclientid_confirm =
3375 &u->setclientid_confirm;
3376 struct nfs4_client *conf, *unconf;
3377 struct nfs4_client *old = NULL;
3378 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3379 clientid_t * clid = &setclientid_confirm->sc_clientid;
3380 __be32 status;
3381 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3382
3383 if (STALE_CLIENTID(clid, nn))
3384 return nfserr_stale_clientid;
3385
3386 spin_lock(&nn->client_lock);
3387 conf = find_confirmed_client(clid, false, nn);
3388 unconf = find_unconfirmed_client(clid, false, nn);
3389 /*
3390 * We try hard to give out unique clientid's, so if we get an
3391 * attempt to confirm the same clientid with a different cred,
3392 * the client may be buggy; this should never happen.
3393 *
3394 * Nevertheless, RFC 7530 recommends INUSE for this case:
3395 */
3396 status = nfserr_clid_inuse;
3397 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3398 goto out;
3399 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3400 goto out;
3401 /* cases below refer to rfc 3530 section 14.2.34: */
3402 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3403 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3404 /* case 2: probable retransmit */
3405 status = nfs_ok;
3406 } else /* case 4: client hasn't noticed we rebooted yet? */
3407 status = nfserr_stale_clientid;
3408 goto out;
3409 }
3410 status = nfs_ok;
3411 if (conf) { /* case 1: callback update */
3412 old = unconf;
3413 unhash_client_locked(old);
3414 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3415 } else { /* case 3: normal case; new or rebooted client */
3416 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3417 if (old) {
3418 status = nfserr_clid_inuse;
3419 if (client_has_state(old)
3420 && !same_creds(&unconf->cl_cred,
3421 &old->cl_cred))
3422 goto out;
3423 status = mark_client_expired_locked(old);
3424 if (status) {
3425 old = NULL;
3426 goto out;
3427 }
3428 }
3429 move_to_confirmed(unconf);
3430 conf = unconf;
3431 }
3432 get_client_locked(conf);
3433 spin_unlock(&nn->client_lock);
3434 nfsd4_probe_callback(conf);
3435 spin_lock(&nn->client_lock);
3436 put_client_renew_locked(conf);
3437out:
3438 spin_unlock(&nn->client_lock);
3439 if (old)
3440 expire_client(old);
3441 return status;
3442}
3443
3444static struct nfs4_file *nfsd4_alloc_file(void)
3445{
3446 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3447}
3448
3449/* OPEN Share state helper functions */
3450static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3451 struct nfs4_file *fp)
3452{
3453 lockdep_assert_held(&state_lock);
3454
3455 atomic_set(&fp->fi_ref, 1);
3456 spin_lock_init(&fp->fi_lock);
3457 INIT_LIST_HEAD(&fp->fi_stateids);
3458 INIT_LIST_HEAD(&fp->fi_delegations);
3459 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3460 fh_copy_shallow(&fp->fi_fhandle, fh);
3461 fp->fi_deleg_file = NULL;
3462 fp->fi_had_conflict = false;
3463 fp->fi_share_deny = 0;
3464 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3465 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3466#ifdef CONFIG_NFSD_PNFS
3467 INIT_LIST_HEAD(&fp->fi_lo_states);
3468 atomic_set(&fp->fi_lo_recalls, 0);
3469#endif
3470 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3471}
3472
3473void
3474nfsd4_free_slabs(void)
3475{
3476 kmem_cache_destroy(odstate_slab);
3477 kmem_cache_destroy(openowner_slab);
3478 kmem_cache_destroy(lockowner_slab);
3479 kmem_cache_destroy(file_slab);
3480 kmem_cache_destroy(stateid_slab);
3481 kmem_cache_destroy(deleg_slab);
3482}
3483
3484int
3485nfsd4_init_slabs(void)
3486{
3487 openowner_slab = kmem_cache_create("nfsd4_openowners",
3488 sizeof(struct nfs4_openowner), 0, 0, NULL);
3489 if (openowner_slab == NULL)
3490 goto out;
3491 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3492 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3493 if (lockowner_slab == NULL)
3494 goto out_free_openowner_slab;
3495 file_slab = kmem_cache_create("nfsd4_files",
3496 sizeof(struct nfs4_file), 0, 0, NULL);
3497 if (file_slab == NULL)
3498 goto out_free_lockowner_slab;
3499 stateid_slab = kmem_cache_create("nfsd4_stateids",
3500 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3501 if (stateid_slab == NULL)
3502 goto out_free_file_slab;
3503 deleg_slab = kmem_cache_create("nfsd4_delegations",
3504 sizeof(struct nfs4_delegation), 0, 0, NULL);
3505 if (deleg_slab == NULL)
3506 goto out_free_stateid_slab;
3507 odstate_slab = kmem_cache_create("nfsd4_odstate",
3508 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3509 if (odstate_slab == NULL)
3510 goto out_free_deleg_slab;
3511 return 0;
3512
3513out_free_deleg_slab:
3514 kmem_cache_destroy(deleg_slab);
3515out_free_stateid_slab:
3516 kmem_cache_destroy(stateid_slab);
3517out_free_file_slab:
3518 kmem_cache_destroy(file_slab);
3519out_free_lockowner_slab:
3520 kmem_cache_destroy(lockowner_slab);
3521out_free_openowner_slab:
3522 kmem_cache_destroy(openowner_slab);
3523out:
3524 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3525 return -ENOMEM;
3526}
3527
3528static void init_nfs4_replay(struct nfs4_replay *rp)
3529{
3530 rp->rp_status = nfserr_serverfault;
3531 rp->rp_buflen = 0;
3532 rp->rp_buf = rp->rp_ibuf;
3533 mutex_init(&rp->rp_mutex);
3534}
3535
3536static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3537 struct nfs4_stateowner *so)
3538{
3539 if (!nfsd4_has_session(cstate)) {
3540 mutex_lock(&so->so_replay.rp_mutex);
3541 cstate->replay_owner = nfs4_get_stateowner(so);
3542 }
3543}
3544
3545void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3546{
3547 struct nfs4_stateowner *so = cstate->replay_owner;
3548
3549 if (so != NULL) {
3550 cstate->replay_owner = NULL;
3551 mutex_unlock(&so->so_replay.rp_mutex);
3552 nfs4_put_stateowner(so);
3553 }
3554}
3555
3556static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3557{
3558 struct nfs4_stateowner *sop;
3559
3560 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3561 if (!sop)
3562 return NULL;
3563
3564 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3565 if (!sop->so_owner.data) {
3566 kmem_cache_free(slab, sop);
3567 return NULL;
3568 }
3569 sop->so_owner.len = owner->len;
3570
3571 INIT_LIST_HEAD(&sop->so_stateids);
3572 sop->so_client = clp;
3573 init_nfs4_replay(&sop->so_replay);
3574 atomic_set(&sop->so_count, 1);
3575 return sop;
3576}
3577
3578static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3579{
3580 lockdep_assert_held(&clp->cl_lock);
3581
3582 list_add(&oo->oo_owner.so_strhash,
3583 &clp->cl_ownerstr_hashtbl[strhashval]);
3584 list_add(&oo->oo_perclient, &clp->cl_openowners);
3585}
3586
3587static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3588{
3589 unhash_openowner_locked(openowner(so));
3590}
3591
3592static void nfs4_free_openowner(struct nfs4_stateowner *so)
3593{
3594 struct nfs4_openowner *oo = openowner(so);
3595
3596 kmem_cache_free(openowner_slab, oo);
3597}
3598
3599static const struct nfs4_stateowner_operations openowner_ops = {
3600 .so_unhash = nfs4_unhash_openowner,
3601 .so_free = nfs4_free_openowner,
3602};
3603
3604static struct nfs4_ol_stateid *
3605nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3606{
3607 struct nfs4_ol_stateid *local, *ret = NULL;
3608 struct nfs4_openowner *oo = open->op_openowner;
3609
3610 lockdep_assert_held(&fp->fi_lock);
3611
3612 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3613 /* ignore lock owners */
3614 if (local->st_stateowner->so_is_open_owner == 0)
3615 continue;
3616 if (local->st_stateowner != &oo->oo_owner)
3617 continue;
3618 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3619 ret = local;
3620 atomic_inc(&ret->st_stid.sc_count);
3621 break;
3622 }
3623 }
3624 return ret;
3625}
3626
3627static __be32
3628nfsd4_verify_open_stid(struct nfs4_stid *s)
3629{
3630 __be32 ret = nfs_ok;
3631
3632 switch (s->sc_type) {
3633 default:
3634 break;
3635 case NFS4_CLOSED_STID:
3636 case NFS4_CLOSED_DELEG_STID:
3637 ret = nfserr_bad_stateid;
3638 break;
3639 case NFS4_REVOKED_DELEG_STID:
3640 ret = nfserr_deleg_revoked;
3641 }
3642 return ret;
3643}
3644
3645/* Lock the stateid st_mutex, and deal with races with CLOSE */
3646static __be32
3647nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3648{
3649 __be32 ret;
3650
3651 mutex_lock(&stp->st_mutex);
3652 ret = nfsd4_verify_open_stid(&stp->st_stid);
3653 if (ret != nfs_ok)
3654 mutex_unlock(&stp->st_mutex);
3655 return ret;
3656}
3657
3658static struct nfs4_ol_stateid *
3659nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3660{
3661 struct nfs4_ol_stateid *stp;
3662 for (;;) {
3663 spin_lock(&fp->fi_lock);
3664 stp = nfsd4_find_existing_open(fp, open);
3665 spin_unlock(&fp->fi_lock);
3666 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3667 break;
3668 nfs4_put_stid(&stp->st_stid);
3669 }
3670 return stp;
3671}
3672
3673static struct nfs4_openowner *
3674alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3675 struct nfsd4_compound_state *cstate)
3676{
3677 struct nfs4_client *clp = cstate->clp;
3678 struct nfs4_openowner *oo, *ret;
3679
3680 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3681 if (!oo)
3682 return NULL;
3683 oo->oo_owner.so_ops = &openowner_ops;
3684 oo->oo_owner.so_is_open_owner = 1;
3685 oo->oo_owner.so_seqid = open->op_seqid;
3686 oo->oo_flags = 0;
3687 if (nfsd4_has_session(cstate))
3688 oo->oo_flags |= NFS4_OO_CONFIRMED;
3689 oo->oo_time = 0;
3690 oo->oo_last_closed_stid = NULL;
3691 INIT_LIST_HEAD(&oo->oo_close_lru);
3692 spin_lock(&clp->cl_lock);
3693 ret = find_openstateowner_str_locked(strhashval, open, clp);
3694 if (ret == NULL) {
3695 hash_openowner(oo, clp, strhashval);
3696 ret = oo;
3697 } else
3698 nfs4_free_stateowner(&oo->oo_owner);
3699
3700 spin_unlock(&clp->cl_lock);
3701 return ret;
3702}
3703
3704static struct nfs4_ol_stateid *
3705init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3706{
3707
3708 struct nfs4_openowner *oo = open->op_openowner;
3709 struct nfs4_ol_stateid *retstp = NULL;
3710 struct nfs4_ol_stateid *stp;
3711
3712 stp = open->op_stp;
3713 /* We are moving these outside of the spinlocks to avoid the warnings */
3714 mutex_init(&stp->st_mutex);
3715 mutex_lock(&stp->st_mutex);
3716
3717retry:
3718 spin_lock(&oo->oo_owner.so_client->cl_lock);
3719 spin_lock(&fp->fi_lock);
3720
3721 retstp = nfsd4_find_existing_open(fp, open);
3722 if (retstp)
3723 goto out_unlock;
3724
3725 open->op_stp = NULL;
3726 atomic_inc(&stp->st_stid.sc_count);
3727 stp->st_stid.sc_type = NFS4_OPEN_STID;
3728 INIT_LIST_HEAD(&stp->st_locks);
3729 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3730 get_nfs4_file(fp);
3731 stp->st_stid.sc_file = fp;
3732 stp->st_access_bmap = 0;
3733 stp->st_deny_bmap = 0;
3734 stp->st_openstp = NULL;
3735 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3736 list_add(&stp->st_perfile, &fp->fi_stateids);
3737
3738out_unlock:
3739 spin_unlock(&fp->fi_lock);
3740 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3741 if (retstp) {
3742 /* Handle races with CLOSE */
3743 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3744 nfs4_put_stid(&retstp->st_stid);
3745 goto retry;
3746 }
3747 /* To keep mutex tracking happy */
3748 mutex_unlock(&stp->st_mutex);
3749 stp = retstp;
3750 }
3751 return stp;
3752}
3753
3754/*
3755 * In the 4.0 case we need to keep the owners around a little while to handle
3756 * CLOSE replay. We still do need to release any file access that is held by
3757 * them before returning however.
3758 */
3759static void
3760move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3761{
3762 struct nfs4_ol_stateid *last;
3763 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3764 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3765 nfsd_net_id);
3766
3767 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3768
3769 /*
3770 * We know that we hold one reference via nfsd4_close, and another
3771 * "persistent" reference for the client. If the refcount is higher
3772 * than 2, then there are still calls in progress that are using this
3773 * stateid. We can't put the sc_file reference until they are finished.
3774 * Wait for the refcount to drop to 2. Since it has been unhashed,
3775 * there should be no danger of the refcount going back up again at
3776 * this point.
3777 */
3778 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3779
3780 release_all_access(s);
3781 if (s->st_stid.sc_file) {
3782 put_nfs4_file(s->st_stid.sc_file);
3783 s->st_stid.sc_file = NULL;
3784 }
3785
3786 spin_lock(&nn->client_lock);
3787 last = oo->oo_last_closed_stid;
3788 oo->oo_last_closed_stid = s;
3789 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3790 oo->oo_time = get_seconds();
3791 spin_unlock(&nn->client_lock);
3792 if (last)
3793 nfs4_put_stid(&last->st_stid);
3794}
3795
3796/* search file_hashtbl[] for file */
3797static struct nfs4_file *
3798find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3799{
3800 struct nfs4_file *fp;
3801
3802 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3803 if (fh_match(&fp->fi_fhandle, fh)) {
3804 if (atomic_inc_not_zero(&fp->fi_ref))
3805 return fp;
3806 }
3807 }
3808 return NULL;
3809}
3810
3811struct nfs4_file *
3812find_file(struct knfsd_fh *fh)
3813{
3814 struct nfs4_file *fp;
3815 unsigned int hashval = file_hashval(fh);
3816
3817 rcu_read_lock();
3818 fp = find_file_locked(fh, hashval);
3819 rcu_read_unlock();
3820 return fp;
3821}
3822
3823static struct nfs4_file *
3824find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3825{
3826 struct nfs4_file *fp;
3827 unsigned int hashval = file_hashval(fh);
3828
3829 rcu_read_lock();
3830 fp = find_file_locked(fh, hashval);
3831 rcu_read_unlock();
3832 if (fp)
3833 return fp;
3834
3835 spin_lock(&state_lock);
3836 fp = find_file_locked(fh, hashval);
3837 if (likely(fp == NULL)) {
3838 nfsd4_init_file(fh, hashval, new);
3839 fp = new;
3840 }
3841 spin_unlock(&state_lock);
3842
3843 return fp;
3844}
3845
3846/*
3847 * Called to check deny when READ with all zero stateid or
3848 * WRITE with all zero or all one stateid
3849 */
3850static __be32
3851nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3852{
3853 struct nfs4_file *fp;
3854 __be32 ret = nfs_ok;
3855
3856 fp = find_file(&current_fh->fh_handle);
3857 if (!fp)
3858 return ret;
3859 /* Check for conflicting share reservations */
3860 spin_lock(&fp->fi_lock);
3861 if (fp->fi_share_deny & deny_type)
3862 ret = nfserr_locked;
3863 spin_unlock(&fp->fi_lock);
3864 put_nfs4_file(fp);
3865 return ret;
3866}
3867
3868static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3869{
3870 struct nfs4_delegation *dp = cb_to_delegation(cb);
3871 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3872 nfsd_net_id);
3873
3874 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3875
3876 /*
3877 * We can't do this in nfsd_break_deleg_cb because it is
3878 * already holding inode->i_lock.
3879 *
3880 * If the dl_time != 0, then we know that it has already been
3881 * queued for a lease break. Don't queue it again.
3882 */
3883 spin_lock(&state_lock);
3884 if (dp->dl_time == 0) {
3885 dp->dl_time = get_seconds();
3886 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3887 }
3888 spin_unlock(&state_lock);
3889}
3890
3891static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3892 struct rpc_task *task)
3893{
3894 struct nfs4_delegation *dp = cb_to_delegation(cb);
3895
3896 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3897 return 1;
3898
3899 switch (task->tk_status) {
3900 case 0:
3901 return 1;
3902 case -EBADHANDLE:
3903 case -NFS4ERR_BAD_STATEID:
3904 /*
3905 * Race: client probably got cb_recall before open reply
3906 * granting delegation.
3907 */
3908 if (dp->dl_retries--) {
3909 rpc_delay(task, 2 * HZ);
3910 return 0;
3911 }
3912 /*FALLTHRU*/
3913 default:
3914 return -1;
3915 }
3916}
3917
3918static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3919{
3920 struct nfs4_delegation *dp = cb_to_delegation(cb);
3921
3922 nfs4_put_stid(&dp->dl_stid);
3923}
3924
3925static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3926 .prepare = nfsd4_cb_recall_prepare,
3927 .done = nfsd4_cb_recall_done,
3928 .release = nfsd4_cb_recall_release,
3929};
3930
3931static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3932{
3933 /*
3934 * We're assuming the state code never drops its reference
3935 * without first removing the lease. Since we're in this lease
3936 * callback (and since the lease code is serialized by the kernel
3937 * lock) we know the server hasn't removed the lease yet, we know
3938 * it's safe to take a reference.
3939 */
3940 atomic_inc(&dp->dl_stid.sc_count);
3941 nfsd4_run_cb(&dp->dl_recall);
3942}
3943
3944/* Called from break_lease() with i_lock held. */
3945static bool
3946nfsd_break_deleg_cb(struct file_lock *fl)
3947{
3948 bool ret = false;
3949 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3950 struct nfs4_delegation *dp;
3951
3952 if (!fp) {
3953 WARN(1, "(%p)->fl_owner NULL\n", fl);
3954 return ret;
3955 }
3956 if (fp->fi_had_conflict) {
3957 WARN(1, "duplicate break on %p\n", fp);
3958 return ret;
3959 }
3960 /*
3961 * We don't want the locks code to timeout the lease for us;
3962 * we'll remove it ourself if a delegation isn't returned
3963 * in time:
3964 */
3965 fl->fl_break_time = 0;
3966
3967 spin_lock(&fp->fi_lock);
3968 fp->fi_had_conflict = true;
3969 /*
3970 * If there are no delegations on the list, then return true
3971 * so that the lease code will go ahead and delete it.
3972 */
3973 if (list_empty(&fp->fi_delegations))
3974 ret = true;
3975 else
3976 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3977 nfsd_break_one_deleg(dp);
3978 spin_unlock(&fp->fi_lock);
3979 return ret;
3980}
3981
3982static int
3983nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3984 struct list_head *dispose)
3985{
3986 if (arg & F_UNLCK)
3987 return lease_modify(onlist, arg, dispose);
3988 else
3989 return -EAGAIN;
3990}
3991
3992static const struct lock_manager_operations nfsd_lease_mng_ops = {
3993 .lm_break = nfsd_break_deleg_cb,
3994 .lm_change = nfsd_change_deleg_cb,
3995};
3996
3997static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3998{
3999 if (nfsd4_has_session(cstate))
4000 return nfs_ok;
4001 if (seqid == so->so_seqid - 1)
4002 return nfserr_replay_me;
4003 if (seqid == so->so_seqid)
4004 return nfs_ok;
4005 return nfserr_bad_seqid;
4006}
4007
4008static __be32 lookup_clientid(clientid_t *clid,
4009 struct nfsd4_compound_state *cstate,
4010 struct nfsd_net *nn)
4011{
4012 struct nfs4_client *found;
4013
4014 if (cstate->clp) {
4015 found = cstate->clp;
4016 if (!same_clid(&found->cl_clientid, clid))
4017 return nfserr_stale_clientid;
4018 return nfs_ok;
4019 }
4020
4021 if (STALE_CLIENTID(clid, nn))
4022 return nfserr_stale_clientid;
4023
4024 /*
4025 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4026 * cached already then we know this is for is for v4.0 and "sessions"
4027 * will be false.
4028 */
4029 WARN_ON_ONCE(cstate->session);
4030 spin_lock(&nn->client_lock);
4031 found = find_confirmed_client(clid, false, nn);
4032 if (!found) {
4033 spin_unlock(&nn->client_lock);
4034 return nfserr_expired;
4035 }
4036 atomic_inc(&found->cl_refcount);
4037 spin_unlock(&nn->client_lock);
4038
4039 /* Cache the nfs4_client in cstate! */
4040 cstate->clp = found;
4041 return nfs_ok;
4042}
4043
4044__be32
4045nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4046 struct nfsd4_open *open, struct nfsd_net *nn)
4047{
4048 clientid_t *clientid = &open->op_clientid;
4049 struct nfs4_client *clp = NULL;
4050 unsigned int strhashval;
4051 struct nfs4_openowner *oo = NULL;
4052 __be32 status;
4053
4054 if (STALE_CLIENTID(&open->op_clientid, nn))
4055 return nfserr_stale_clientid;
4056 /*
4057 * In case we need it later, after we've already created the
4058 * file and don't want to risk a further failure:
4059 */
4060 open->op_file = nfsd4_alloc_file();
4061 if (open->op_file == NULL)
4062 return nfserr_jukebox;
4063
4064 status = lookup_clientid(clientid, cstate, nn);
4065 if (status)
4066 return status;
4067 clp = cstate->clp;
4068
4069 strhashval = ownerstr_hashval(&open->op_owner);
4070 oo = find_openstateowner_str(strhashval, open, clp);
4071 open->op_openowner = oo;
4072 if (!oo) {
4073 goto new_owner;
4074 }
4075 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4076 /* Replace unconfirmed owners without checking for replay. */
4077 release_openowner(oo);
4078 open->op_openowner = NULL;
4079 goto new_owner;
4080 }
4081 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4082 if (status)
4083 return status;
4084 goto alloc_stateid;
4085new_owner:
4086 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4087 if (oo == NULL)
4088 return nfserr_jukebox;
4089 open->op_openowner = oo;
4090alloc_stateid:
4091 open->op_stp = nfs4_alloc_open_stateid(clp);
4092 if (!open->op_stp)
4093 return nfserr_jukebox;
4094
4095 if (nfsd4_has_session(cstate) &&
4096 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4097 open->op_odstate = alloc_clnt_odstate(clp);
4098 if (!open->op_odstate)
4099 return nfserr_jukebox;
4100 }
4101
4102 return nfs_ok;
4103}
4104
4105static inline __be32
4106nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4107{
4108 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4109 return nfserr_openmode;
4110 else
4111 return nfs_ok;
4112}
4113
4114static int share_access_to_flags(u32 share_access)
4115{
4116 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4117}
4118
4119static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4120{
4121 struct nfs4_stid *ret;
4122
4123 ret = find_stateid_by_type(cl, s,
4124 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4125 if (!ret)
4126 return NULL;
4127 return delegstateid(ret);
4128}
4129
4130static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4131{
4132 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4133 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4134}
4135
4136static __be32
4137nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4138 struct nfs4_delegation **dp)
4139{
4140 int flags;
4141 __be32 status = nfserr_bad_stateid;
4142 struct nfs4_delegation *deleg;
4143
4144 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4145 if (deleg == NULL)
4146 goto out;
4147 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4148 nfs4_put_stid(&deleg->dl_stid);
4149 if (cl->cl_minorversion)
4150 status = nfserr_deleg_revoked;
4151 goto out;
4152 }
4153 flags = share_access_to_flags(open->op_share_access);
4154 status = nfs4_check_delegmode(deleg, flags);
4155 if (status) {
4156 nfs4_put_stid(&deleg->dl_stid);
4157 goto out;
4158 }
4159 *dp = deleg;
4160out:
4161 if (!nfsd4_is_deleg_cur(open))
4162 return nfs_ok;
4163 if (status)
4164 return status;
4165 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4166 return nfs_ok;
4167}
4168
4169static inline int nfs4_access_to_access(u32 nfs4_access)
4170{
4171 int flags = 0;
4172
4173 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4174 flags |= NFSD_MAY_READ;
4175 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4176 flags |= NFSD_MAY_WRITE;
4177 return flags;
4178}
4179
4180static inline __be32
4181nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4182 struct nfsd4_open *open)
4183{
4184 struct iattr iattr = {
4185 .ia_valid = ATTR_SIZE,
4186 .ia_size = 0,
4187 };
4188 if (!open->op_truncate)
4189 return 0;
4190 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4191 return nfserr_inval;
4192 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4193}
4194
4195static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4196 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4197 struct nfsd4_open *open)
4198{
4199 struct file *filp = NULL;
4200 __be32 status;
4201 int oflag = nfs4_access_to_omode(open->op_share_access);
4202 int access = nfs4_access_to_access(open->op_share_access);
4203 unsigned char old_access_bmap, old_deny_bmap;
4204
4205 spin_lock(&fp->fi_lock);
4206
4207 /*
4208 * Are we trying to set a deny mode that would conflict with
4209 * current access?
4210 */
4211 status = nfs4_file_check_deny(fp, open->op_share_deny);
4212 if (status != nfs_ok) {
4213 spin_unlock(&fp->fi_lock);
4214 goto out;
4215 }
4216
4217 /* set access to the file */
4218 status = nfs4_file_get_access(fp, open->op_share_access);
4219 if (status != nfs_ok) {
4220 spin_unlock(&fp->fi_lock);
4221 goto out;
4222 }
4223
4224 /* Set access bits in stateid */
4225 old_access_bmap = stp->st_access_bmap;
4226 set_access(open->op_share_access, stp);
4227
4228 /* Set new deny mask */
4229 old_deny_bmap = stp->st_deny_bmap;
4230 set_deny(open->op_share_deny, stp);
4231 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4232
4233 if (!fp->fi_fds[oflag]) {
4234 spin_unlock(&fp->fi_lock);
4235 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4236 if (status)
4237 goto out_put_access;
4238 spin_lock(&fp->fi_lock);
4239 if (!fp->fi_fds[oflag]) {
4240 fp->fi_fds[oflag] = filp;
4241 filp = NULL;
4242 }
4243 }
4244 spin_unlock(&fp->fi_lock);
4245 if (filp)
4246 fput(filp);
4247
4248 status = nfsd4_truncate(rqstp, cur_fh, open);
4249 if (status)
4250 goto out_put_access;
4251out:
4252 return status;
4253out_put_access:
4254 stp->st_access_bmap = old_access_bmap;
4255 nfs4_file_put_access(fp, open->op_share_access);
4256 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4257 goto out;
4258}
4259
4260static __be32
4261nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4262{
4263 __be32 status;
4264 unsigned char old_deny_bmap = stp->st_deny_bmap;
4265
4266 if (!test_access(open->op_share_access, stp))
4267 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4268
4269 /* test and set deny mode */
4270 spin_lock(&fp->fi_lock);
4271 status = nfs4_file_check_deny(fp, open->op_share_deny);
4272 if (status == nfs_ok) {
4273 set_deny(open->op_share_deny, stp);
4274 fp->fi_share_deny |=
4275 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4276 }
4277 spin_unlock(&fp->fi_lock);
4278
4279 if (status != nfs_ok)
4280 return status;
4281
4282 status = nfsd4_truncate(rqstp, cur_fh, open);
4283 if (status != nfs_ok)
4284 reset_union_bmap_deny(old_deny_bmap, stp);
4285 return status;
4286}
4287
4288/* Should we give out recallable state?: */
4289static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4290{
4291 if (clp->cl_cb_state == NFSD4_CB_UP)
4292 return true;
4293 /*
4294 * In the sessions case, since we don't have to establish a
4295 * separate connection for callbacks, we assume it's OK
4296 * until we hear otherwise:
4297 */
4298 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4299}
4300
4301static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4302{
4303 struct file_lock *fl;
4304
4305 fl = locks_alloc_lock();
4306 if (!fl)
4307 return NULL;
4308 fl->fl_lmops = &nfsd_lease_mng_ops;
4309 fl->fl_flags = FL_DELEG;
4310 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4311 fl->fl_end = OFFSET_MAX;
4312 fl->fl_owner = (fl_owner_t)fp;
4313 fl->fl_pid = current->tgid;
4314 return fl;
4315}
4316
4317/**
4318 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4319 * @dp: a pointer to the nfs4_delegation we're adding.
4320 *
4321 * Return:
4322 * On success: Return code will be 0 on success.
4323 *
4324 * On error: -EAGAIN if there was an existing delegation.
4325 * nonzero if there is an error in other cases.
4326 *
4327 */
4328
4329static int nfs4_setlease(struct nfs4_delegation *dp)
4330{
4331 struct nfs4_file *fp = dp->dl_stid.sc_file;
4332 struct file_lock *fl;
4333 struct file *filp;
4334 int status = 0;
4335
4336 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4337 if (!fl)
4338 return -ENOMEM;
4339 filp = find_readable_file(fp);
4340 if (!filp) {
4341 /* We should always have a readable file here */
4342 WARN_ON_ONCE(1);
4343 locks_free_lock(fl);
4344 return -EBADF;
4345 }
4346 fl->fl_file = filp;
4347 status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4348 if (fl)
4349 locks_free_lock(fl);
4350 if (status)
4351 goto out_fput;
4352 spin_lock(&state_lock);
4353 spin_lock(&fp->fi_lock);
4354 /* Did the lease get broken before we took the lock? */
4355 status = -EAGAIN;
4356 if (fp->fi_had_conflict)
4357 goto out_unlock;
4358 /* Race breaker */
4359 if (fp->fi_deleg_file) {
4360 status = hash_delegation_locked(dp, fp);
4361 goto out_unlock;
4362 }
4363 fp->fi_deleg_file = filp;
4364 fp->fi_delegees = 0;
4365 status = hash_delegation_locked(dp, fp);
4366 spin_unlock(&fp->fi_lock);
4367 spin_unlock(&state_lock);
4368 if (status) {
4369 /* Should never happen, this is a new fi_deleg_file */
4370 WARN_ON_ONCE(1);
4371 goto out_fput;
4372 }
4373 return 0;
4374out_unlock:
4375 spin_unlock(&fp->fi_lock);
4376 spin_unlock(&state_lock);
4377out_fput:
4378 fput(filp);
4379 return status;
4380}
4381
4382static struct nfs4_delegation *
4383nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4384 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4385{
4386 int status;
4387 struct nfs4_delegation *dp;
4388
4389 if (fp->fi_had_conflict)
4390 return ERR_PTR(-EAGAIN);
4391
4392 spin_lock(&state_lock);
4393 spin_lock(&fp->fi_lock);
4394 status = nfs4_get_existing_delegation(clp, fp);
4395 spin_unlock(&fp->fi_lock);
4396 spin_unlock(&state_lock);
4397
4398 if (status)
4399 return ERR_PTR(status);
4400
4401 dp = alloc_init_deleg(clp, fh, odstate);
4402 if (!dp)
4403 return ERR_PTR(-ENOMEM);
4404
4405 get_nfs4_file(fp);
4406 spin_lock(&state_lock);
4407 spin_lock(&fp->fi_lock);
4408 dp->dl_stid.sc_file = fp;
4409 if (!fp->fi_deleg_file) {
4410 spin_unlock(&fp->fi_lock);
4411 spin_unlock(&state_lock);
4412 status = nfs4_setlease(dp);
4413 goto out;
4414 }
4415 if (fp->fi_had_conflict) {
4416 status = -EAGAIN;
4417 goto out_unlock;
4418 }
4419 status = hash_delegation_locked(dp, fp);
4420out_unlock:
4421 spin_unlock(&fp->fi_lock);
4422 spin_unlock(&state_lock);
4423out:
4424 if (status) {
4425 put_clnt_odstate(dp->dl_clnt_odstate);
4426 nfs4_put_stid(&dp->dl_stid);
4427 return ERR_PTR(status);
4428 }
4429 return dp;
4430}
4431
4432static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4433{
4434 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4435 if (status == -EAGAIN)
4436 open->op_why_no_deleg = WND4_CONTENTION;
4437 else {
4438 open->op_why_no_deleg = WND4_RESOURCE;
4439 switch (open->op_deleg_want) {
4440 case NFS4_SHARE_WANT_READ_DELEG:
4441 case NFS4_SHARE_WANT_WRITE_DELEG:
4442 case NFS4_SHARE_WANT_ANY_DELEG:
4443 break;
4444 case NFS4_SHARE_WANT_CANCEL:
4445 open->op_why_no_deleg = WND4_CANCELLED;
4446 break;
4447 case NFS4_SHARE_WANT_NO_DELEG:
4448 WARN_ON_ONCE(1);
4449 }
4450 }
4451}
4452
4453/*
4454 * Attempt to hand out a delegation.
4455 *
4456 * Note we don't support write delegations, and won't until the vfs has
4457 * proper support for them.
4458 */
4459static void
4460nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4461 struct nfs4_ol_stateid *stp)
4462{
4463 struct nfs4_delegation *dp;
4464 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4465 struct nfs4_client *clp = stp->st_stid.sc_client;
4466 int cb_up;
4467 int status = 0;
4468
4469 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4470 open->op_recall = 0;
4471 switch (open->op_claim_type) {
4472 case NFS4_OPEN_CLAIM_PREVIOUS:
4473 if (!cb_up)
4474 open->op_recall = 1;
4475 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4476 goto out_no_deleg;
4477 break;
4478 case NFS4_OPEN_CLAIM_NULL:
4479 case NFS4_OPEN_CLAIM_FH:
4480 /*
4481 * Let's not give out any delegations till everyone's
4482 * had the chance to reclaim theirs, *and* until
4483 * NLM locks have all been reclaimed:
4484 */
4485 if (locks_in_grace(clp->net))
4486 goto out_no_deleg;
4487 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4488 goto out_no_deleg;
4489 /*
4490 * Also, if the file was opened for write or
4491 * create, there's a good chance the client's
4492 * about to write to it, resulting in an
4493 * immediate recall (since we don't support
4494 * write delegations):
4495 */
4496 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4497 goto out_no_deleg;
4498 if (open->op_create == NFS4_OPEN_CREATE)
4499 goto out_no_deleg;
4500 break;
4501 default:
4502 goto out_no_deleg;
4503 }
4504 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4505 if (IS_ERR(dp))
4506 goto out_no_deleg;
4507
4508 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4509
4510 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4511 STATEID_VAL(&dp->dl_stid.sc_stateid));
4512 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4513 nfs4_put_stid(&dp->dl_stid);
4514 return;
4515out_no_deleg:
4516 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4517 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4518 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4519 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4520 open->op_recall = 1;
4521 }
4522
4523 /* 4.1 client asking for a delegation? */
4524 if (open->op_deleg_want)
4525 nfsd4_open_deleg_none_ext(open, status);
4526 return;
4527}
4528
4529static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4530 struct nfs4_delegation *dp)
4531{
4532 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4533 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4534 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4535 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4536 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4537 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4538 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4539 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4540 }
4541 /* Otherwise the client must be confused wanting a delegation
4542 * it already has, therefore we don't return
4543 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4544 */
4545}
4546
4547__be32
4548nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4549{
4550 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4551 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4552 struct nfs4_file *fp = NULL;
4553 struct nfs4_ol_stateid *stp = NULL;
4554 struct nfs4_delegation *dp = NULL;
4555 __be32 status;
4556 bool new_stp = false;
4557
4558 /*
4559 * Lookup file; if found, lookup stateid and check open request,
4560 * and check for delegations in the process of being recalled.
4561 * If not found, create the nfs4_file struct
4562 */
4563 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4564 if (fp != open->op_file) {
4565 status = nfs4_check_deleg(cl, open, &dp);
4566 if (status)
4567 goto out;
4568 stp = nfsd4_find_and_lock_existing_open(fp, open);
4569 } else {
4570 open->op_file = NULL;
4571 status = nfserr_bad_stateid;
4572 if (nfsd4_is_deleg_cur(open))
4573 goto out;
4574 }
4575
4576 if (!stp) {
4577 stp = init_open_stateid(fp, open);
4578 if (!open->op_stp)
4579 new_stp = true;
4580 }
4581
4582 /*
4583 * OPEN the file, or upgrade an existing OPEN.
4584 * If truncate fails, the OPEN fails.
4585 *
4586 * stp is already locked.
4587 */
4588 if (!new_stp) {
4589 /* Stateid was found, this is an OPEN upgrade */
4590 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4591 if (status) {
4592 mutex_unlock(&stp->st_mutex);
4593 goto out;
4594 }
4595 } else {
4596 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4597 if (status) {
4598 stp->st_stid.sc_type = NFS4_CLOSED_STID;
4599 release_open_stateid(stp);
4600 mutex_unlock(&stp->st_mutex);
4601 goto out;
4602 }
4603
4604 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4605 open->op_odstate);
4606 if (stp->st_clnt_odstate == open->op_odstate)
4607 open->op_odstate = NULL;
4608 }
4609
4610 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4611 mutex_unlock(&stp->st_mutex);
4612
4613 if (nfsd4_has_session(&resp->cstate)) {
4614 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4615 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4616 open->op_why_no_deleg = WND4_NOT_WANTED;
4617 goto nodeleg;
4618 }
4619 }
4620
4621 /*
4622 * Attempt to hand out a delegation. No error return, because the
4623 * OPEN succeeds even if we fail.
4624 */
4625 nfs4_open_delegation(current_fh, open, stp);
4626nodeleg:
4627 status = nfs_ok;
4628
4629 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4630 STATEID_VAL(&stp->st_stid.sc_stateid));
4631out:
4632 /* 4.1 client trying to upgrade/downgrade delegation? */
4633 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4634 open->op_deleg_want)
4635 nfsd4_deleg_xgrade_none_ext(open, dp);
4636
4637 if (fp)
4638 put_nfs4_file(fp);
4639 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4640 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4641 /*
4642 * To finish the open response, we just need to set the rflags.
4643 */
4644 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4645 if (nfsd4_has_session(&resp->cstate))
4646 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4647 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4648 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4649
4650 if (dp)
4651 nfs4_put_stid(&dp->dl_stid);
4652 if (stp)
4653 nfs4_put_stid(&stp->st_stid);
4654
4655 return status;
4656}
4657
4658void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4659 struct nfsd4_open *open)
4660{
4661 if (open->op_openowner) {
4662 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4663
4664 nfsd4_cstate_assign_replay(cstate, so);
4665 nfs4_put_stateowner(so);
4666 }
4667 if (open->op_file)
4668 kmem_cache_free(file_slab, open->op_file);
4669 if (open->op_stp)
4670 nfs4_put_stid(&open->op_stp->st_stid);
4671 if (open->op_odstate)
4672 kmem_cache_free(odstate_slab, open->op_odstate);
4673}
4674
4675__be32
4676nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4677 union nfsd4_op_u *u)
4678{
4679 clientid_t *clid = &u->renew;
4680 struct nfs4_client *clp;
4681 __be32 status;
4682 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4683
4684 dprintk("process_renew(%08x/%08x): starting\n",
4685 clid->cl_boot, clid->cl_id);
4686 status = lookup_clientid(clid, cstate, nn);
4687 if (status)
4688 goto out;
4689 clp = cstate->clp;
4690 status = nfserr_cb_path_down;
4691 if (!list_empty(&clp->cl_delegations)
4692 && clp->cl_cb_state != NFSD4_CB_UP)
4693 goto out;
4694 status = nfs_ok;
4695out:
4696 return status;
4697}
4698
4699void
4700nfsd4_end_grace(struct nfsd_net *nn)
4701{
4702 /* do nothing if grace period already ended */
4703 if (nn->grace_ended)
4704 return;
4705
4706 dprintk("NFSD: end of grace period\n");
4707 nn->grace_ended = true;
4708 /*
4709 * If the server goes down again right now, an NFSv4
4710 * client will still be allowed to reclaim after it comes back up,
4711 * even if it hasn't yet had a chance to reclaim state this time.
4712 *
4713 */
4714 nfsd4_record_grace_done(nn);
4715 /*
4716 * At this point, NFSv4 clients can still reclaim. But if the
4717 * server crashes, any that have not yet reclaimed will be out
4718 * of luck on the next boot.
4719 *
4720 * (NFSv4.1+ clients are considered to have reclaimed once they
4721 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4722 * have reclaimed after their first OPEN.)
4723 */
4724 locks_end_grace(&nn->nfsd4_manager);
4725 /*
4726 * At this point, and once lockd and/or any other containers
4727 * exit their grace period, further reclaims will fail and
4728 * regular locking can resume.
4729 */
4730}
4731
4732static time_t
4733nfs4_laundromat(struct nfsd_net *nn)
4734{
4735 struct nfs4_client *clp;
4736 struct nfs4_openowner *oo;
4737 struct nfs4_delegation *dp;
4738 struct nfs4_ol_stateid *stp;
4739 struct nfsd4_blocked_lock *nbl;
4740 struct list_head *pos, *next, reaplist;
4741 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4742 time_t t, new_timeo = nn->nfsd4_lease;
4743
4744 dprintk("NFSD: laundromat service - starting\n");
4745 nfsd4_end_grace(nn);
4746 INIT_LIST_HEAD(&reaplist);
4747 spin_lock(&nn->client_lock);
4748 list_for_each_safe(pos, next, &nn->client_lru) {
4749 clp = list_entry(pos, struct nfs4_client, cl_lru);
4750 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4751 t = clp->cl_time - cutoff;
4752 new_timeo = min(new_timeo, t);
4753 break;
4754 }
4755 if (mark_client_expired_locked(clp)) {
4756 dprintk("NFSD: client in use (clientid %08x)\n",
4757 clp->cl_clientid.cl_id);
4758 continue;
4759 }
4760 list_add(&clp->cl_lru, &reaplist);
4761 }
4762 spin_unlock(&nn->client_lock);
4763 list_for_each_safe(pos, next, &reaplist) {
4764 clp = list_entry(pos, struct nfs4_client, cl_lru);
4765 dprintk("NFSD: purging unused client (clientid %08x)\n",
4766 clp->cl_clientid.cl_id);
4767 list_del_init(&clp->cl_lru);
4768 expire_client(clp);
4769 }
4770 spin_lock(&state_lock);
4771 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4772 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4773 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4774 t = dp->dl_time - cutoff;
4775 new_timeo = min(new_timeo, t);
4776 break;
4777 }
4778 WARN_ON(!unhash_delegation_locked(dp));
4779 list_add(&dp->dl_recall_lru, &reaplist);
4780 }
4781 spin_unlock(&state_lock);
4782 while (!list_empty(&reaplist)) {
4783 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4784 dl_recall_lru);
4785 list_del_init(&dp->dl_recall_lru);
4786 revoke_delegation(dp);
4787 }
4788
4789 spin_lock(&nn->client_lock);
4790 while (!list_empty(&nn->close_lru)) {
4791 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4792 oo_close_lru);
4793 if (time_after((unsigned long)oo->oo_time,
4794 (unsigned long)cutoff)) {
4795 t = oo->oo_time - cutoff;
4796 new_timeo = min(new_timeo, t);
4797 break;
4798 }
4799 list_del_init(&oo->oo_close_lru);
4800 stp = oo->oo_last_closed_stid;
4801 oo->oo_last_closed_stid = NULL;
4802 spin_unlock(&nn->client_lock);
4803 nfs4_put_stid(&stp->st_stid);
4804 spin_lock(&nn->client_lock);
4805 }
4806 spin_unlock(&nn->client_lock);
4807
4808 /*
4809 * It's possible for a client to try and acquire an already held lock
4810 * that is being held for a long time, and then lose interest in it.
4811 * So, we clean out any un-revisited request after a lease period
4812 * under the assumption that the client is no longer interested.
4813 *
4814 * RFC5661, sec. 9.6 states that the client must not rely on getting
4815 * notifications and must continue to poll for locks, even when the
4816 * server supports them. Thus this shouldn't lead to clients blocking
4817 * indefinitely once the lock does become free.
4818 */
4819 BUG_ON(!list_empty(&reaplist));
4820 spin_lock(&nn->blocked_locks_lock);
4821 while (!list_empty(&nn->blocked_locks_lru)) {
4822 nbl = list_first_entry(&nn->blocked_locks_lru,
4823 struct nfsd4_blocked_lock, nbl_lru);
4824 if (time_after((unsigned long)nbl->nbl_time,
4825 (unsigned long)cutoff)) {
4826 t = nbl->nbl_time - cutoff;
4827 new_timeo = min(new_timeo, t);
4828 break;
4829 }
4830 list_move(&nbl->nbl_lru, &reaplist);
4831 list_del_init(&nbl->nbl_list);
4832 }
4833 spin_unlock(&nn->blocked_locks_lock);
4834
4835 while (!list_empty(&reaplist)) {
4836 nbl = list_first_entry(&reaplist,
4837 struct nfsd4_blocked_lock, nbl_lru);
4838 list_del_init(&nbl->nbl_lru);
4839 posix_unblock_lock(&nbl->nbl_lock);
4840 free_blocked_lock(nbl);
4841 }
4842
4843 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4844 return new_timeo;
4845}
4846
4847static struct workqueue_struct *laundry_wq;
4848static void laundromat_main(struct work_struct *);
4849
4850static void
4851laundromat_main(struct work_struct *laundry)
4852{
4853 time_t t;
4854 struct delayed_work *dwork = to_delayed_work(laundry);
4855 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4856 laundromat_work);
4857
4858 t = nfs4_laundromat(nn);
4859 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4860 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4861}
4862
4863static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4864{
4865 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4866 return nfserr_bad_stateid;
4867 return nfs_ok;
4868}
4869
4870static inline int
4871access_permit_read(struct nfs4_ol_stateid *stp)
4872{
4873 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4874 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4875 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4876}
4877
4878static inline int
4879access_permit_write(struct nfs4_ol_stateid *stp)
4880{
4881 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4882 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4883}
4884
4885static
4886__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4887{
4888 __be32 status = nfserr_openmode;
4889
4890 /* For lock stateid's, we test the parent open, not the lock: */
4891 if (stp->st_openstp)
4892 stp = stp->st_openstp;
4893 if ((flags & WR_STATE) && !access_permit_write(stp))
4894 goto out;
4895 if ((flags & RD_STATE) && !access_permit_read(stp))
4896 goto out;
4897 status = nfs_ok;
4898out:
4899 return status;
4900}
4901
4902static inline __be32
4903check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4904{
4905 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4906 return nfs_ok;
4907 else if (opens_in_grace(net)) {
4908 /* Answer in remaining cases depends on existence of
4909 * conflicting state; so we must wait out the grace period. */
4910 return nfserr_grace;
4911 } else if (flags & WR_STATE)
4912 return nfs4_share_conflict(current_fh,
4913 NFS4_SHARE_DENY_WRITE);
4914 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4915 return nfs4_share_conflict(current_fh,
4916 NFS4_SHARE_DENY_READ);
4917}
4918
4919/*
4920 * Allow READ/WRITE during grace period on recovered state only for files
4921 * that are not able to provide mandatory locking.
4922 */
4923static inline int
4924grace_disallows_io(struct net *net, struct inode *inode)
4925{
4926 return opens_in_grace(net) && mandatory_lock(inode);
4927}
4928
4929static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4930{
4931 /*
4932 * When sessions are used the stateid generation number is ignored
4933 * when it is zero.
4934 */
4935 if (has_session && in->si_generation == 0)
4936 return nfs_ok;
4937
4938 if (in->si_generation == ref->si_generation)
4939 return nfs_ok;
4940
4941 /* If the client sends us a stateid from the future, it's buggy: */
4942 if (nfsd4_stateid_generation_after(in, ref))
4943 return nfserr_bad_stateid;
4944 /*
4945 * However, we could see a stateid from the past, even from a
4946 * non-buggy client. For example, if the client sends a lock
4947 * while some IO is outstanding, the lock may bump si_generation
4948 * while the IO is still in flight. The client could avoid that
4949 * situation by waiting for responses on all the IO requests,
4950 * but better performance may result in retrying IO that
4951 * receives an old_stateid error if requests are rarely
4952 * reordered in flight:
4953 */
4954 return nfserr_old_stateid;
4955}
4956
4957static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4958{
4959 if (ols->st_stateowner->so_is_open_owner &&
4960 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4961 return nfserr_bad_stateid;
4962 return nfs_ok;
4963}
4964
4965static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4966{
4967 struct nfs4_stid *s;
4968 __be32 status = nfserr_bad_stateid;
4969
4970 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4971 CLOSE_STATEID(stateid))
4972 return status;
4973 /* Client debugging aid. */
4974 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4975 char addr_str[INET6_ADDRSTRLEN];
4976 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4977 sizeof(addr_str));
4978 pr_warn_ratelimited("NFSD: client %s testing state ID "
4979 "with incorrect client ID\n", addr_str);
4980 return status;
4981 }
4982 spin_lock(&cl->cl_lock);
4983 s = find_stateid_locked(cl, stateid);
4984 if (!s)
4985 goto out_unlock;
4986 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4987 if (status)
4988 goto out_unlock;
4989 switch (s->sc_type) {
4990 case NFS4_DELEG_STID:
4991 status = nfs_ok;
4992 break;
4993 case NFS4_REVOKED_DELEG_STID:
4994 status = nfserr_deleg_revoked;
4995 break;
4996 case NFS4_OPEN_STID:
4997 case NFS4_LOCK_STID:
4998 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4999 break;
5000 default:
5001 printk("unknown stateid type %x\n", s->sc_type);
5002 /* Fallthrough */
5003 case NFS4_CLOSED_STID:
5004 case NFS4_CLOSED_DELEG_STID:
5005 status = nfserr_bad_stateid;
5006 }
5007out_unlock:
5008 spin_unlock(&cl->cl_lock);
5009 return status;
5010}
5011
5012__be32
5013nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5014 stateid_t *stateid, unsigned char typemask,
5015 struct nfs4_stid **s, struct nfsd_net *nn)
5016{
5017 __be32 status;
5018 bool return_revoked = false;
5019
5020 /*
5021 * only return revoked delegations if explicitly asked.
5022 * otherwise we report revoked or bad_stateid status.
5023 */
5024 if (typemask & NFS4_REVOKED_DELEG_STID)
5025 return_revoked = true;
5026 else if (typemask & NFS4_DELEG_STID)
5027 typemask |= NFS4_REVOKED_DELEG_STID;
5028
5029 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5030 CLOSE_STATEID(stateid))
5031 return nfserr_bad_stateid;
5032 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5033 if (status == nfserr_stale_clientid) {
5034 if (cstate->session)
5035 return nfserr_bad_stateid;
5036 return nfserr_stale_stateid;
5037 }
5038 if (status)
5039 return status;
5040 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5041 if (!*s)
5042 return nfserr_bad_stateid;
5043 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5044 nfs4_put_stid(*s);
5045 if (cstate->minorversion)
5046 return nfserr_deleg_revoked;
5047 return nfserr_bad_stateid;
5048 }
5049 return nfs_ok;
5050}
5051
5052static struct file *
5053nfs4_find_file(struct nfs4_stid *s, int flags)
5054{
5055 if (!s)
5056 return NULL;
5057
5058 switch (s->sc_type) {
5059 case NFS4_DELEG_STID:
5060 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5061 return NULL;
5062 return get_file(s->sc_file->fi_deleg_file);
5063 case NFS4_OPEN_STID:
5064 case NFS4_LOCK_STID:
5065 if (flags & RD_STATE)
5066 return find_readable_file(s->sc_file);
5067 else
5068 return find_writeable_file(s->sc_file);
5069 break;
5070 }
5071
5072 return NULL;
5073}
5074
5075static __be32
5076nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5077{
5078 __be32 status;
5079
5080 status = nfsd4_check_openowner_confirmed(ols);
5081 if (status)
5082 return status;
5083 return nfs4_check_openmode(ols, flags);
5084}
5085
5086static __be32
5087nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5088 struct file **filpp, bool *tmp_file, int flags)
5089{
5090 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5091 struct file *file;
5092 __be32 status;
5093
5094 file = nfs4_find_file(s, flags);
5095 if (file) {
5096 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5097 acc | NFSD_MAY_OWNER_OVERRIDE);
5098 if (status) {
5099 fput(file);
5100 return status;
5101 }
5102
5103 *filpp = file;
5104 } else {
5105 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5106 if (status)
5107 return status;
5108
5109 if (tmp_file)
5110 *tmp_file = true;
5111 }
5112
5113 return 0;
5114}
5115
5116/*
5117 * Checks for stateid operations
5118 */
5119__be32
5120nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5121 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5122 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5123{
5124 struct inode *ino = d_inode(fhp->fh_dentry);
5125 struct net *net = SVC_NET(rqstp);
5126 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5127 struct nfs4_stid *s = NULL;
5128 __be32 status;
5129
5130 if (filpp)
5131 *filpp = NULL;
5132 if (tmp_file)
5133 *tmp_file = false;
5134
5135 if (grace_disallows_io(net, ino))
5136 return nfserr_grace;
5137
5138 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5139 status = check_special_stateids(net, fhp, stateid, flags);
5140 goto done;
5141 }
5142
5143 status = nfsd4_lookup_stateid(cstate, stateid,
5144 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5145 &s, nn);
5146 if (status)
5147 return status;
5148 status = check_stateid_generation(stateid, &s->sc_stateid,
5149 nfsd4_has_session(cstate));
5150 if (status)
5151 goto out;
5152
5153 switch (s->sc_type) {
5154 case NFS4_DELEG_STID:
5155 status = nfs4_check_delegmode(delegstateid(s), flags);
5156 break;
5157 case NFS4_OPEN_STID:
5158 case NFS4_LOCK_STID:
5159 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5160 break;
5161 default:
5162 status = nfserr_bad_stateid;
5163 break;
5164 }
5165 if (status)
5166 goto out;
5167 status = nfs4_check_fh(fhp, s);
5168
5169done:
5170 if (!status && filpp)
5171 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5172out:
5173 if (s)
5174 nfs4_put_stid(s);
5175 return status;
5176}
5177
5178/*
5179 * Test if the stateid is valid
5180 */
5181__be32
5182nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5183 union nfsd4_op_u *u)
5184{
5185 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5186 struct nfsd4_test_stateid_id *stateid;
5187 struct nfs4_client *cl = cstate->session->se_client;
5188
5189 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5190 stateid->ts_id_status =
5191 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5192
5193 return nfs_ok;
5194}
5195
5196static __be32
5197nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5198{
5199 struct nfs4_ol_stateid *stp = openlockstateid(s);
5200 __be32 ret;
5201
5202 mutex_lock(&stp->st_mutex);
5203
5204 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5205 if (ret)
5206 goto out;
5207
5208 ret = nfserr_locks_held;
5209 if (check_for_locks(stp->st_stid.sc_file,
5210 lockowner(stp->st_stateowner)))
5211 goto out;
5212
5213 release_lock_stateid(stp);
5214 ret = nfs_ok;
5215
5216out:
5217 mutex_unlock(&stp->st_mutex);
5218 nfs4_put_stid(s);
5219 return ret;
5220}
5221
5222__be32
5223nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5224 union nfsd4_op_u *u)
5225{
5226 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5227 stateid_t *stateid = &free_stateid->fr_stateid;
5228 struct nfs4_stid *s;
5229 struct nfs4_delegation *dp;
5230 struct nfs4_client *cl = cstate->session->se_client;
5231 __be32 ret = nfserr_bad_stateid;
5232
5233 spin_lock(&cl->cl_lock);
5234 s = find_stateid_locked(cl, stateid);
5235 if (!s)
5236 goto out_unlock;
5237 switch (s->sc_type) {
5238 case NFS4_DELEG_STID:
5239 ret = nfserr_locks_held;
5240 break;
5241 case NFS4_OPEN_STID:
5242 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5243 if (ret)
5244 break;
5245 ret = nfserr_locks_held;
5246 break;
5247 case NFS4_LOCK_STID:
5248 atomic_inc(&s->sc_count);
5249 spin_unlock(&cl->cl_lock);
5250 ret = nfsd4_free_lock_stateid(stateid, s);
5251 goto out;
5252 case NFS4_REVOKED_DELEG_STID:
5253 dp = delegstateid(s);
5254 list_del_init(&dp->dl_recall_lru);
5255 spin_unlock(&cl->cl_lock);
5256 nfs4_put_stid(s);
5257 ret = nfs_ok;
5258 goto out;
5259 /* Default falls through and returns nfserr_bad_stateid */
5260 }
5261out_unlock:
5262 spin_unlock(&cl->cl_lock);
5263out:
5264 return ret;
5265}
5266
5267static inline int
5268setlkflg (int type)
5269{
5270 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5271 RD_STATE : WR_STATE;
5272}
5273
5274static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5275{
5276 struct svc_fh *current_fh = &cstate->current_fh;
5277 struct nfs4_stateowner *sop = stp->st_stateowner;
5278 __be32 status;
5279
5280 status = nfsd4_check_seqid(cstate, sop, seqid);
5281 if (status)
5282 return status;
5283 status = nfsd4_lock_ol_stateid(stp);
5284 if (status != nfs_ok)
5285 return status;
5286 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5287 if (status == nfs_ok)
5288 status = nfs4_check_fh(current_fh, &stp->st_stid);
5289 if (status != nfs_ok)
5290 mutex_unlock(&stp->st_mutex);
5291 return status;
5292}
5293
5294/*
5295 * Checks for sequence id mutating operations.
5296 */
5297static __be32
5298nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5299 stateid_t *stateid, char typemask,
5300 struct nfs4_ol_stateid **stpp,
5301 struct nfsd_net *nn)
5302{
5303 __be32 status;
5304 struct nfs4_stid *s;
5305 struct nfs4_ol_stateid *stp = NULL;
5306
5307 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5308 seqid, STATEID_VAL(stateid));
5309
5310 *stpp = NULL;
5311 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5312 if (status)
5313 return status;
5314 stp = openlockstateid(s);
5315 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5316
5317 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5318 if (!status)
5319 *stpp = stp;
5320 else
5321 nfs4_put_stid(&stp->st_stid);
5322 return status;
5323}
5324
5325static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5326 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5327{
5328 __be32 status;
5329 struct nfs4_openowner *oo;
5330 struct nfs4_ol_stateid *stp;
5331
5332 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5333 NFS4_OPEN_STID, &stp, nn);
5334 if (status)
5335 return status;
5336 oo = openowner(stp->st_stateowner);
5337 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5338 mutex_unlock(&stp->st_mutex);
5339 nfs4_put_stid(&stp->st_stid);
5340 return nfserr_bad_stateid;
5341 }
5342 *stpp = stp;
5343 return nfs_ok;
5344}
5345
5346__be32
5347nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5348 union nfsd4_op_u *u)
5349{
5350 struct nfsd4_open_confirm *oc = &u->open_confirm;
5351 __be32 status;
5352 struct nfs4_openowner *oo;
5353 struct nfs4_ol_stateid *stp;
5354 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5355
5356 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5357 cstate->current_fh.fh_dentry);
5358
5359 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5360 if (status)
5361 return status;
5362
5363 status = nfs4_preprocess_seqid_op(cstate,
5364 oc->oc_seqid, &oc->oc_req_stateid,
5365 NFS4_OPEN_STID, &stp, nn);
5366 if (status)
5367 goto out;
5368 oo = openowner(stp->st_stateowner);
5369 status = nfserr_bad_stateid;
5370 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5371 mutex_unlock(&stp->st_mutex);
5372 goto put_stateid;
5373 }
5374 oo->oo_flags |= NFS4_OO_CONFIRMED;
5375 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5376 mutex_unlock(&stp->st_mutex);
5377 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5378 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5379
5380 nfsd4_client_record_create(oo->oo_owner.so_client);
5381 status = nfs_ok;
5382put_stateid:
5383 nfs4_put_stid(&stp->st_stid);
5384out:
5385 nfsd4_bump_seqid(cstate, status);
5386 return status;
5387}
5388
5389static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5390{
5391 if (!test_access(access, stp))
5392 return;
5393 nfs4_file_put_access(stp->st_stid.sc_file, access);
5394 clear_access(access, stp);
5395}
5396
5397static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5398{
5399 switch (to_access) {
5400 case NFS4_SHARE_ACCESS_READ:
5401 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5402 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5403 break;
5404 case NFS4_SHARE_ACCESS_WRITE:
5405 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5406 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5407 break;
5408 case NFS4_SHARE_ACCESS_BOTH:
5409 break;
5410 default:
5411 WARN_ON_ONCE(1);
5412 }
5413}
5414
5415__be32
5416nfsd4_open_downgrade(struct svc_rqst *rqstp,
5417 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5418{
5419 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5420 __be32 status;
5421 struct nfs4_ol_stateid *stp;
5422 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5423
5424 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5425 cstate->current_fh.fh_dentry);
5426
5427 /* We don't yet support WANT bits: */
5428 if (od->od_deleg_want)
5429 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5430 od->od_deleg_want);
5431
5432 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5433 &od->od_stateid, &stp, nn);
5434 if (status)
5435 goto out;
5436 status = nfserr_inval;
5437 if (!test_access(od->od_share_access, stp)) {
5438 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5439 stp->st_access_bmap, od->od_share_access);
5440 goto put_stateid;
5441 }
5442 if (!test_deny(od->od_share_deny, stp)) {
5443 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5444 stp->st_deny_bmap, od->od_share_deny);
5445 goto put_stateid;
5446 }
5447 nfs4_stateid_downgrade(stp, od->od_share_access);
5448 reset_union_bmap_deny(od->od_share_deny, stp);
5449 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5450 status = nfs_ok;
5451put_stateid:
5452 mutex_unlock(&stp->st_mutex);
5453 nfs4_put_stid(&stp->st_stid);
5454out:
5455 nfsd4_bump_seqid(cstate, status);
5456 return status;
5457}
5458
5459static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5460{
5461 struct nfs4_client *clp = s->st_stid.sc_client;
5462 bool unhashed;
5463 LIST_HEAD(reaplist);
5464
5465 spin_lock(&clp->cl_lock);
5466 unhashed = unhash_open_stateid(s, &reaplist);
5467
5468 if (clp->cl_minorversion) {
5469 if (unhashed)
5470 put_ol_stateid_locked(s, &reaplist);
5471 spin_unlock(&clp->cl_lock);
5472 free_ol_stateid_reaplist(&reaplist);
5473 } else {
5474 spin_unlock(&clp->cl_lock);
5475 free_ol_stateid_reaplist(&reaplist);
5476 if (unhashed)
5477 move_to_close_lru(s, clp->net);
5478 }
5479}
5480
5481/*
5482 * nfs4_unlock_state() called after encode
5483 */
5484__be32
5485nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5486 union nfsd4_op_u *u)
5487{
5488 struct nfsd4_close *close = &u->close;
5489 __be32 status;
5490 struct nfs4_ol_stateid *stp;
5491 struct net *net = SVC_NET(rqstp);
5492 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5493
5494 dprintk("NFSD: nfsd4_close on file %pd\n",
5495 cstate->current_fh.fh_dentry);
5496
5497 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5498 &close->cl_stateid,
5499 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5500 &stp, nn);
5501 nfsd4_bump_seqid(cstate, status);
5502 if (status)
5503 goto out;
5504
5505 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5506 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5507
5508 nfsd4_close_open_stateid(stp);
5509 mutex_unlock(&stp->st_mutex);
5510
5511 /* See RFC5661 sectionm 18.2.4 */
5512 if (stp->st_stid.sc_client->cl_minorversion)
5513 memcpy(&close->cl_stateid, &close_stateid,
5514 sizeof(close->cl_stateid));
5515
5516 /* put reference from nfs4_preprocess_seqid_op */
5517 nfs4_put_stid(&stp->st_stid);
5518out:
5519 return status;
5520}
5521
5522__be32
5523nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5524 union nfsd4_op_u *u)
5525{
5526 struct nfsd4_delegreturn *dr = &u->delegreturn;
5527 struct nfs4_delegation *dp;
5528 stateid_t *stateid = &dr->dr_stateid;
5529 struct nfs4_stid *s;
5530 __be32 status;
5531 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5532
5533 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5534 return status;
5535
5536 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5537 if (status)
5538 goto out;
5539 dp = delegstateid(s);
5540 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
5541 if (status)
5542 goto put_stateid;
5543
5544 destroy_delegation(dp);
5545put_stateid:
5546 nfs4_put_stid(&dp->dl_stid);
5547out:
5548 return status;
5549}
5550
5551static inline u64
5552end_offset(u64 start, u64 len)
5553{
5554 u64 end;
5555
5556 end = start + len;
5557 return end >= start ? end: NFS4_MAX_UINT64;
5558}
5559
5560/* last octet in a range */
5561static inline u64
5562last_byte_offset(u64 start, u64 len)
5563{
5564 u64 end;
5565
5566 WARN_ON_ONCE(!len);
5567 end = start + len;
5568 return end > start ? end - 1: NFS4_MAX_UINT64;
5569}
5570
5571/*
5572 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5573 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5574 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5575 * locking, this prevents us from being completely protocol-compliant. The
5576 * real solution to this problem is to start using unsigned file offsets in
5577 * the VFS, but this is a very deep change!
5578 */
5579static inline void
5580nfs4_transform_lock_offset(struct file_lock *lock)
5581{
5582 if (lock->fl_start < 0)
5583 lock->fl_start = OFFSET_MAX;
5584 if (lock->fl_end < 0)
5585 lock->fl_end = OFFSET_MAX;
5586}
5587
5588static fl_owner_t
5589nfsd4_fl_get_owner(fl_owner_t owner)
5590{
5591 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5592
5593 nfs4_get_stateowner(&lo->lo_owner);
5594 return owner;
5595}
5596
5597static void
5598nfsd4_fl_put_owner(fl_owner_t owner)
5599{
5600 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5601
5602 if (lo)
5603 nfs4_put_stateowner(&lo->lo_owner);
5604}
5605
5606static void
5607nfsd4_lm_notify(struct file_lock *fl)
5608{
5609 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5610 struct net *net = lo->lo_owner.so_client->net;
5611 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5612 struct nfsd4_blocked_lock *nbl = container_of(fl,
5613 struct nfsd4_blocked_lock, nbl_lock);
5614 bool queue = false;
5615
5616 /* An empty list means that something else is going to be using it */
5617 spin_lock(&nn->blocked_locks_lock);
5618 if (!list_empty(&nbl->nbl_list)) {
5619 list_del_init(&nbl->nbl_list);
5620 list_del_init(&nbl->nbl_lru);
5621 queue = true;
5622 }
5623 spin_unlock(&nn->blocked_locks_lock);
5624
5625 if (queue)
5626 nfsd4_run_cb(&nbl->nbl_cb);
5627}
5628
5629static const struct lock_manager_operations nfsd_posix_mng_ops = {
5630 .lm_notify = nfsd4_lm_notify,
5631 .lm_get_owner = nfsd4_fl_get_owner,
5632 .lm_put_owner = nfsd4_fl_put_owner,
5633};
5634
5635static inline void
5636nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5637{
5638 struct nfs4_lockowner *lo;
5639
5640 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5641 lo = (struct nfs4_lockowner *) fl->fl_owner;
5642 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5643 lo->lo_owner.so_owner.len, GFP_KERNEL);
5644 if (!deny->ld_owner.data)
5645 /* We just don't care that much */
5646 goto nevermind;
5647 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5648 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5649 } else {
5650nevermind:
5651 deny->ld_owner.len = 0;
5652 deny->ld_owner.data = NULL;
5653 deny->ld_clientid.cl_boot = 0;
5654 deny->ld_clientid.cl_id = 0;
5655 }
5656 deny->ld_start = fl->fl_start;
5657 deny->ld_length = NFS4_MAX_UINT64;
5658 if (fl->fl_end != NFS4_MAX_UINT64)
5659 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5660 deny->ld_type = NFS4_READ_LT;
5661 if (fl->fl_type != F_RDLCK)
5662 deny->ld_type = NFS4_WRITE_LT;
5663}
5664
5665static struct nfs4_lockowner *
5666find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5667{
5668 unsigned int strhashval = ownerstr_hashval(owner);
5669 struct nfs4_stateowner *so;
5670
5671 lockdep_assert_held(&clp->cl_lock);
5672
5673 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5674 so_strhash) {
5675 if (so->so_is_open_owner)
5676 continue;
5677 if (same_owner_str(so, owner))
5678 return lockowner(nfs4_get_stateowner(so));
5679 }
5680 return NULL;
5681}
5682
5683static struct nfs4_lockowner *
5684find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5685{
5686 struct nfs4_lockowner *lo;
5687
5688 spin_lock(&clp->cl_lock);
5689 lo = find_lockowner_str_locked(clp, owner);
5690 spin_unlock(&clp->cl_lock);
5691 return lo;
5692}
5693
5694static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5695{
5696 unhash_lockowner_locked(lockowner(sop));
5697}
5698
5699static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5700{
5701 struct nfs4_lockowner *lo = lockowner(sop);
5702
5703 kmem_cache_free(lockowner_slab, lo);
5704}
5705
5706static const struct nfs4_stateowner_operations lockowner_ops = {
5707 .so_unhash = nfs4_unhash_lockowner,
5708 .so_free = nfs4_free_lockowner,
5709};
5710
5711/*
5712 * Alloc a lock owner structure.
5713 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5714 * occurred.
5715 *
5716 * strhashval = ownerstr_hashval
5717 */
5718static struct nfs4_lockowner *
5719alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5720 struct nfs4_ol_stateid *open_stp,
5721 struct nfsd4_lock *lock)
5722{
5723 struct nfs4_lockowner *lo, *ret;
5724
5725 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5726 if (!lo)
5727 return NULL;
5728 INIT_LIST_HEAD(&lo->lo_blocked);
5729 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5730 lo->lo_owner.so_is_open_owner = 0;
5731 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5732 lo->lo_owner.so_ops = &lockowner_ops;
5733 spin_lock(&clp->cl_lock);
5734 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5735 if (ret == NULL) {
5736 list_add(&lo->lo_owner.so_strhash,
5737 &clp->cl_ownerstr_hashtbl[strhashval]);
5738 ret = lo;
5739 } else
5740 nfs4_free_stateowner(&lo->lo_owner);
5741
5742 spin_unlock(&clp->cl_lock);
5743 return ret;
5744}
5745
5746static void
5747init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5748 struct nfs4_file *fp, struct inode *inode,
5749 struct nfs4_ol_stateid *open_stp)
5750{
5751 struct nfs4_client *clp = lo->lo_owner.so_client;
5752
5753 lockdep_assert_held(&clp->cl_lock);
5754
5755 atomic_inc(&stp->st_stid.sc_count);
5756 stp->st_stid.sc_type = NFS4_LOCK_STID;
5757 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5758 get_nfs4_file(fp);
5759 stp->st_stid.sc_file = fp;
5760 stp->st_access_bmap = 0;
5761 stp->st_deny_bmap = open_stp->st_deny_bmap;
5762 stp->st_openstp = open_stp;
5763 mutex_init(&stp->st_mutex);
5764 list_add(&stp->st_locks, &open_stp->st_locks);
5765 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5766 spin_lock(&fp->fi_lock);
5767 list_add(&stp->st_perfile, &fp->fi_stateids);
5768 spin_unlock(&fp->fi_lock);
5769}
5770
5771static struct nfs4_ol_stateid *
5772find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5773{
5774 struct nfs4_ol_stateid *lst;
5775 struct nfs4_client *clp = lo->lo_owner.so_client;
5776
5777 lockdep_assert_held(&clp->cl_lock);
5778
5779 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5780 if (lst->st_stid.sc_file == fp) {
5781 atomic_inc(&lst->st_stid.sc_count);
5782 return lst;
5783 }
5784 }
5785 return NULL;
5786}
5787
5788static struct nfs4_ol_stateid *
5789find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5790 struct inode *inode, struct nfs4_ol_stateid *ost,
5791 bool *new)
5792{
5793 struct nfs4_stid *ns = NULL;
5794 struct nfs4_ol_stateid *lst;
5795 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5796 struct nfs4_client *clp = oo->oo_owner.so_client;
5797
5798 spin_lock(&clp->cl_lock);
5799 lst = find_lock_stateid(lo, fi);
5800 if (lst == NULL) {
5801 spin_unlock(&clp->cl_lock);
5802 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5803 if (ns == NULL)
5804 return NULL;
5805
5806 spin_lock(&clp->cl_lock);
5807 lst = find_lock_stateid(lo, fi);
5808 if (likely(!lst)) {
5809 lst = openlockstateid(ns);
5810 init_lock_stateid(lst, lo, fi, inode, ost);
5811 ns = NULL;
5812 *new = true;
5813 }
5814 }
5815 spin_unlock(&clp->cl_lock);
5816 if (ns)
5817 nfs4_put_stid(ns);
5818 return lst;
5819}
5820
5821static int
5822check_lock_length(u64 offset, u64 length)
5823{
5824 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5825 (length > ~offset)));
5826}
5827
5828static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5829{
5830 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5831
5832 lockdep_assert_held(&fp->fi_lock);
5833
5834 if (test_access(access, lock_stp))
5835 return;
5836 __nfs4_file_get_access(fp, access);
5837 set_access(access, lock_stp);
5838}
5839
5840static __be32
5841lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5842 struct nfs4_ol_stateid *ost,
5843 struct nfsd4_lock *lock,
5844 struct nfs4_ol_stateid **plst, bool *new)
5845{
5846 __be32 status;
5847 struct nfs4_file *fi = ost->st_stid.sc_file;
5848 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5849 struct nfs4_client *cl = oo->oo_owner.so_client;
5850 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5851 struct nfs4_lockowner *lo;
5852 struct nfs4_ol_stateid *lst;
5853 unsigned int strhashval;
5854 bool hashed;
5855
5856 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5857 if (!lo) {
5858 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5859 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5860 if (lo == NULL)
5861 return nfserr_jukebox;
5862 } else {
5863 /* with an existing lockowner, seqids must be the same */
5864 status = nfserr_bad_seqid;
5865 if (!cstate->minorversion &&
5866 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5867 goto out;
5868 }
5869
5870retry:
5871 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5872 if (lst == NULL) {
5873 status = nfserr_jukebox;
5874 goto out;
5875 }
5876
5877 mutex_lock(&lst->st_mutex);
5878
5879 /* See if it's still hashed to avoid race with FREE_STATEID */
5880 spin_lock(&cl->cl_lock);
5881 hashed = !list_empty(&lst->st_perfile);
5882 spin_unlock(&cl->cl_lock);
5883
5884 if (!hashed) {
5885 mutex_unlock(&lst->st_mutex);
5886 nfs4_put_stid(&lst->st_stid);
5887 goto retry;
5888 }
5889 status = nfs_ok;
5890 *plst = lst;
5891out:
5892 nfs4_put_stateowner(&lo->lo_owner);
5893 return status;
5894}
5895
5896/*
5897 * LOCK operation
5898 */
5899__be32
5900nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5901 union nfsd4_op_u *u)
5902{
5903 struct nfsd4_lock *lock = &u->lock;
5904 struct nfs4_openowner *open_sop = NULL;
5905 struct nfs4_lockowner *lock_sop = NULL;
5906 struct nfs4_ol_stateid *lock_stp = NULL;
5907 struct nfs4_ol_stateid *open_stp = NULL;
5908 struct nfs4_file *fp;
5909 struct file *filp = NULL;
5910 struct nfsd4_blocked_lock *nbl = NULL;
5911 struct file_lock *file_lock = NULL;
5912 struct file_lock *conflock = NULL;
5913 __be32 status = 0;
5914 int lkflg;
5915 int err;
5916 bool new = false;
5917 unsigned char fl_type;
5918 unsigned int fl_flags = FL_POSIX;
5919 struct net *net = SVC_NET(rqstp);
5920 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5921
5922 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5923 (long long) lock->lk_offset,
5924 (long long) lock->lk_length);
5925
5926 if (check_lock_length(lock->lk_offset, lock->lk_length))
5927 return nfserr_inval;
5928
5929 if ((status = fh_verify(rqstp, &cstate->current_fh,
5930 S_IFREG, NFSD_MAY_LOCK))) {
5931 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5932 return status;
5933 }
5934
5935 if (lock->lk_is_new) {
5936 if (nfsd4_has_session(cstate))
5937 /* See rfc 5661 18.10.3: given clientid is ignored: */
5938 memcpy(&lock->lk_new_clientid,
5939 &cstate->session->se_client->cl_clientid,
5940 sizeof(clientid_t));
5941
5942 status = nfserr_stale_clientid;
5943 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5944 goto out;
5945
5946 /* validate and update open stateid and open seqid */
5947 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5948 lock->lk_new_open_seqid,
5949 &lock->lk_new_open_stateid,
5950 &open_stp, nn);
5951 if (status)
5952 goto out;
5953 mutex_unlock(&open_stp->st_mutex);
5954 open_sop = openowner(open_stp->st_stateowner);
5955 status = nfserr_bad_stateid;
5956 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5957 &lock->lk_new_clientid))
5958 goto out;
5959 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5960 &lock_stp, &new);
5961 } else {
5962 status = nfs4_preprocess_seqid_op(cstate,
5963 lock->lk_old_lock_seqid,
5964 &lock->lk_old_lock_stateid,
5965 NFS4_LOCK_STID, &lock_stp, nn);
5966 }
5967 if (status)
5968 goto out;
5969 lock_sop = lockowner(lock_stp->st_stateowner);
5970
5971 lkflg = setlkflg(lock->lk_type);
5972 status = nfs4_check_openmode(lock_stp, lkflg);
5973 if (status)
5974 goto out;
5975
5976 status = nfserr_grace;
5977 if (locks_in_grace(net) && !lock->lk_reclaim)
5978 goto out;
5979 status = nfserr_no_grace;
5980 if (!locks_in_grace(net) && lock->lk_reclaim)
5981 goto out;
5982
5983 fp = lock_stp->st_stid.sc_file;
5984 switch (lock->lk_type) {
5985 case NFS4_READW_LT:
5986 if (nfsd4_has_session(cstate))
5987 fl_flags |= FL_SLEEP;
5988 /* Fallthrough */
5989 case NFS4_READ_LT:
5990 spin_lock(&fp->fi_lock);
5991 filp = find_readable_file_locked(fp);
5992 if (filp)
5993 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5994 spin_unlock(&fp->fi_lock);
5995 fl_type = F_RDLCK;
5996 break;
5997 case NFS4_WRITEW_LT:
5998 if (nfsd4_has_session(cstate))
5999 fl_flags |= FL_SLEEP;
6000 /* Fallthrough */
6001 case NFS4_WRITE_LT:
6002 spin_lock(&fp->fi_lock);
6003 filp = find_writeable_file_locked(fp);
6004 if (filp)
6005 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6006 spin_unlock(&fp->fi_lock);
6007 fl_type = F_WRLCK;
6008 break;
6009 default:
6010 status = nfserr_inval;
6011 goto out;
6012 }
6013
6014 if (!filp) {
6015 status = nfserr_openmode;
6016 goto out;
6017 }
6018
6019 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6020 if (!nbl) {
6021 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6022 status = nfserr_jukebox;
6023 goto out;
6024 }
6025
6026 file_lock = &nbl->nbl_lock;
6027 file_lock->fl_type = fl_type;
6028 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6029 file_lock->fl_pid = current->tgid;
6030 file_lock->fl_file = filp;
6031 file_lock->fl_flags = fl_flags;
6032 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6033 file_lock->fl_start = lock->lk_offset;
6034 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6035 nfs4_transform_lock_offset(file_lock);
6036
6037 conflock = locks_alloc_lock();
6038 if (!conflock) {
6039 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6040 status = nfserr_jukebox;
6041 goto out;
6042 }
6043
6044 if (fl_flags & FL_SLEEP) {
6045 nbl->nbl_time = get_seconds();
6046 spin_lock(&nn->blocked_locks_lock);
6047 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6048 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6049 spin_unlock(&nn->blocked_locks_lock);
6050 }
6051
6052 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6053 switch (err) {
6054 case 0: /* success! */
6055 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6056 status = 0;
6057 break;
6058 case FILE_LOCK_DEFERRED:
6059 nbl = NULL;
6060 /* Fallthrough */
6061 case -EAGAIN: /* conflock holds conflicting lock */
6062 status = nfserr_denied;
6063 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6064 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6065 break;
6066 case -EDEADLK:
6067 status = nfserr_deadlock;
6068 break;
6069 default:
6070 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6071 status = nfserrno(err);
6072 break;
6073 }
6074out:
6075 if (nbl) {
6076 /* dequeue it if we queued it before */
6077 if (fl_flags & FL_SLEEP) {
6078 spin_lock(&nn->blocked_locks_lock);
6079 list_del_init(&nbl->nbl_list);
6080 list_del_init(&nbl->nbl_lru);
6081 spin_unlock(&nn->blocked_locks_lock);
6082 }
6083 free_blocked_lock(nbl);
6084 }
6085 if (filp)
6086 fput(filp);
6087 if (lock_stp) {
6088 /* Bump seqid manually if the 4.0 replay owner is openowner */
6089 if (cstate->replay_owner &&
6090 cstate->replay_owner != &lock_sop->lo_owner &&
6091 seqid_mutating_err(ntohl(status)))
6092 lock_sop->lo_owner.so_seqid++;
6093
6094 mutex_unlock(&lock_stp->st_mutex);
6095
6096 /*
6097 * If this is a new, never-before-used stateid, and we are
6098 * returning an error, then just go ahead and release it.
6099 */
6100 if (status && new)
6101 release_lock_stateid(lock_stp);
6102
6103 nfs4_put_stid(&lock_stp->st_stid);
6104 }
6105 if (open_stp)
6106 nfs4_put_stid(&open_stp->st_stid);
6107 nfsd4_bump_seqid(cstate, status);
6108 if (conflock)
6109 locks_free_lock(conflock);
6110 return status;
6111}
6112
6113/*
6114 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6115 * so we do a temporary open here just to get an open file to pass to
6116 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6117 * inode operation.)
6118 */
6119static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6120{
6121 struct file *file;
6122 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6123 if (!err) {
6124 err = nfserrno(vfs_test_lock(file, lock));
6125 fput(file);
6126 }
6127 return err;
6128}
6129
6130/*
6131 * LOCKT operation
6132 */
6133__be32
6134nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6135 union nfsd4_op_u *u)
6136{
6137 struct nfsd4_lockt *lockt = &u->lockt;
6138 struct file_lock *file_lock = NULL;
6139 struct nfs4_lockowner *lo = NULL;
6140 __be32 status;
6141 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6142
6143 if (locks_in_grace(SVC_NET(rqstp)))
6144 return nfserr_grace;
6145
6146 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6147 return nfserr_inval;
6148
6149 if (!nfsd4_has_session(cstate)) {
6150 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6151 if (status)
6152 goto out;
6153 }
6154
6155 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6156 goto out;
6157
6158 file_lock = locks_alloc_lock();
6159 if (!file_lock) {
6160 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6161 status = nfserr_jukebox;
6162 goto out;
6163 }
6164
6165 switch (lockt->lt_type) {
6166 case NFS4_READ_LT:
6167 case NFS4_READW_LT:
6168 file_lock->fl_type = F_RDLCK;
6169 break;
6170 case NFS4_WRITE_LT:
6171 case NFS4_WRITEW_LT:
6172 file_lock->fl_type = F_WRLCK;
6173 break;
6174 default:
6175 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6176 status = nfserr_inval;
6177 goto out;
6178 }
6179
6180 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6181 if (lo)
6182 file_lock->fl_owner = (fl_owner_t)lo;
6183 file_lock->fl_pid = current->tgid;
6184 file_lock->fl_flags = FL_POSIX;
6185
6186 file_lock->fl_start = lockt->lt_offset;
6187 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6188
6189 nfs4_transform_lock_offset(file_lock);
6190
6191 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6192 if (status)
6193 goto out;
6194
6195 if (file_lock->fl_type != F_UNLCK) {
6196 status = nfserr_denied;
6197 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6198 }
6199out:
6200 if (lo)
6201 nfs4_put_stateowner(&lo->lo_owner);
6202 if (file_lock)
6203 locks_free_lock(file_lock);
6204 return status;
6205}
6206
6207__be32
6208nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6209 union nfsd4_op_u *u)
6210{
6211 struct nfsd4_locku *locku = &u->locku;
6212 struct nfs4_ol_stateid *stp;
6213 struct file *filp = NULL;
6214 struct file_lock *file_lock = NULL;
6215 __be32 status;
6216 int err;
6217 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6218
6219 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6220 (long long) locku->lu_offset,
6221 (long long) locku->lu_length);
6222
6223 if (check_lock_length(locku->lu_offset, locku->lu_length))
6224 return nfserr_inval;
6225
6226 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6227 &locku->lu_stateid, NFS4_LOCK_STID,
6228 &stp, nn);
6229 if (status)
6230 goto out;
6231 filp = find_any_file(stp->st_stid.sc_file);
6232 if (!filp) {
6233 status = nfserr_lock_range;
6234 goto put_stateid;
6235 }
6236 file_lock = locks_alloc_lock();
6237 if (!file_lock) {
6238 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6239 status = nfserr_jukebox;
6240 goto fput;
6241 }
6242
6243 file_lock->fl_type = F_UNLCK;
6244 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6245 file_lock->fl_pid = current->tgid;
6246 file_lock->fl_file = filp;
6247 file_lock->fl_flags = FL_POSIX;
6248 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6249 file_lock->fl_start = locku->lu_offset;
6250
6251 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6252 locku->lu_length);
6253 nfs4_transform_lock_offset(file_lock);
6254
6255 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6256 if (err) {
6257 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6258 goto out_nfserr;
6259 }
6260 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6261fput:
6262 fput(filp);
6263put_stateid:
6264 mutex_unlock(&stp->st_mutex);
6265 nfs4_put_stid(&stp->st_stid);
6266out:
6267 nfsd4_bump_seqid(cstate, status);
6268 if (file_lock)
6269 locks_free_lock(file_lock);
6270 return status;
6271
6272out_nfserr:
6273 status = nfserrno(err);
6274 goto fput;
6275}
6276
6277/*
6278 * returns
6279 * true: locks held by lockowner
6280 * false: no locks held by lockowner
6281 */
6282static bool
6283check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6284{
6285 struct file_lock *fl;
6286 int status = false;
6287 struct file *filp = find_any_file(fp);
6288 struct inode *inode;
6289 struct file_lock_context *flctx;
6290
6291 if (!filp) {
6292 /* Any valid lock stateid should have some sort of access */
6293 WARN_ON_ONCE(1);
6294 return status;
6295 }
6296
6297 inode = file_inode(filp);
6298 flctx = inode->i_flctx;
6299
6300 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6301 spin_lock(&flctx->flc_lock);
6302 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6303 if (fl->fl_owner == (fl_owner_t)lowner) {
6304 status = true;
6305 break;
6306 }
6307 }
6308 spin_unlock(&flctx->flc_lock);
6309 }
6310 fput(filp);
6311 return status;
6312}
6313
6314__be32
6315nfsd4_release_lockowner(struct svc_rqst *rqstp,
6316 struct nfsd4_compound_state *cstate,
6317 union nfsd4_op_u *u)
6318{
6319 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6320 clientid_t *clid = &rlockowner->rl_clientid;
6321 struct nfs4_stateowner *sop;
6322 struct nfs4_lockowner *lo = NULL;
6323 struct nfs4_ol_stateid *stp;
6324 struct xdr_netobj *owner = &rlockowner->rl_owner;
6325 unsigned int hashval = ownerstr_hashval(owner);
6326 __be32 status;
6327 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6328 struct nfs4_client *clp;
6329 LIST_HEAD (reaplist);
6330
6331 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6332 clid->cl_boot, clid->cl_id);
6333
6334 status = lookup_clientid(clid, cstate, nn);
6335 if (status)
6336 return status;
6337
6338 clp = cstate->clp;
6339 /* Find the matching lock stateowner */
6340 spin_lock(&clp->cl_lock);
6341 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6342 so_strhash) {
6343
6344 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6345 continue;
6346
6347 /* see if there are still any locks associated with it */
6348 lo = lockowner(sop);
6349 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6350 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6351 status = nfserr_locks_held;
6352 spin_unlock(&clp->cl_lock);
6353 return status;
6354 }
6355 }
6356
6357 nfs4_get_stateowner(sop);
6358 break;
6359 }
6360 if (!lo) {
6361 spin_unlock(&clp->cl_lock);
6362 return status;
6363 }
6364
6365 unhash_lockowner_locked(lo);
6366 while (!list_empty(&lo->lo_owner.so_stateids)) {
6367 stp = list_first_entry(&lo->lo_owner.so_stateids,
6368 struct nfs4_ol_stateid,
6369 st_perstateowner);
6370 WARN_ON(!unhash_lock_stateid(stp));
6371 put_ol_stateid_locked(stp, &reaplist);
6372 }
6373 spin_unlock(&clp->cl_lock);
6374 free_ol_stateid_reaplist(&reaplist);
6375 remove_blocked_locks(lo);
6376 nfs4_put_stateowner(&lo->lo_owner);
6377
6378 return status;
6379}
6380
6381static inline struct nfs4_client_reclaim *
6382alloc_reclaim(void)
6383{
6384 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6385}
6386
6387bool
6388nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6389{
6390 struct nfs4_client_reclaim *crp;
6391
6392 crp = nfsd4_find_reclaim_client(name, nn);
6393 return (crp && crp->cr_clp);
6394}
6395
6396/*
6397 * failure => all reset bets are off, nfserr_no_grace...
6398 */
6399struct nfs4_client_reclaim *
6400nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6401{
6402 unsigned int strhashval;
6403 struct nfs4_client_reclaim *crp;
6404
6405 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6406 crp = alloc_reclaim();
6407 if (crp) {
6408 strhashval = clientstr_hashval(name);
6409 INIT_LIST_HEAD(&crp->cr_strhash);
6410 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6411 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6412 crp->cr_clp = NULL;
6413 nn->reclaim_str_hashtbl_size++;
6414 }
6415 return crp;
6416}
6417
6418void
6419nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6420{
6421 list_del(&crp->cr_strhash);
6422 kfree(crp);
6423 nn->reclaim_str_hashtbl_size--;
6424}
6425
6426void
6427nfs4_release_reclaim(struct nfsd_net *nn)
6428{
6429 struct nfs4_client_reclaim *crp = NULL;
6430 int i;
6431
6432 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6433 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6434 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6435 struct nfs4_client_reclaim, cr_strhash);
6436 nfs4_remove_reclaim_record(crp, nn);
6437 }
6438 }
6439 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6440}
6441
6442/*
6443 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6444struct nfs4_client_reclaim *
6445nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6446{
6447 unsigned int strhashval;
6448 struct nfs4_client_reclaim *crp = NULL;
6449
6450 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6451
6452 strhashval = clientstr_hashval(recdir);
6453 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6454 if (same_name(crp->cr_recdir, recdir)) {
6455 return crp;
6456 }
6457 }
6458 return NULL;
6459}
6460
6461/*
6462* Called from OPEN. Look for clientid in reclaim list.
6463*/
6464__be32
6465nfs4_check_open_reclaim(clientid_t *clid,
6466 struct nfsd4_compound_state *cstate,
6467 struct nfsd_net *nn)
6468{
6469 __be32 status;
6470
6471 /* find clientid in conf_id_hashtbl */
6472 status = lookup_clientid(clid, cstate, nn);
6473 if (status)
6474 return nfserr_reclaim_bad;
6475
6476 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6477 return nfserr_no_grace;
6478
6479 if (nfsd4_client_record_check(cstate->clp))
6480 return nfserr_reclaim_bad;
6481
6482 return nfs_ok;
6483}
6484
6485#ifdef CONFIG_NFSD_FAULT_INJECTION
6486static inline void
6487put_client(struct nfs4_client *clp)
6488{
6489 atomic_dec(&clp->cl_refcount);
6490}
6491
6492static struct nfs4_client *
6493nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6494{
6495 struct nfs4_client *clp;
6496 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6497 nfsd_net_id);
6498
6499 if (!nfsd_netns_ready(nn))
6500 return NULL;
6501
6502 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6503 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6504 return clp;
6505 }
6506 return NULL;
6507}
6508
6509u64
6510nfsd_inject_print_clients(void)
6511{
6512 struct nfs4_client *clp;
6513 u64 count = 0;
6514 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6515 nfsd_net_id);
6516 char buf[INET6_ADDRSTRLEN];
6517
6518 if (!nfsd_netns_ready(nn))
6519 return 0;
6520
6521 spin_lock(&nn->client_lock);
6522 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6523 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6524 pr_info("NFS Client: %s\n", buf);
6525 ++count;
6526 }
6527 spin_unlock(&nn->client_lock);
6528
6529 return count;
6530}
6531
6532u64
6533nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6534{
6535 u64 count = 0;
6536 struct nfs4_client *clp;
6537 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6538 nfsd_net_id);
6539
6540 if (!nfsd_netns_ready(nn))
6541 return count;
6542
6543 spin_lock(&nn->client_lock);
6544 clp = nfsd_find_client(addr, addr_size);
6545 if (clp) {
6546 if (mark_client_expired_locked(clp) == nfs_ok)
6547 ++count;
6548 else
6549 clp = NULL;
6550 }
6551 spin_unlock(&nn->client_lock);
6552
6553 if (clp)
6554 expire_client(clp);
6555
6556 return count;
6557}
6558
6559u64
6560nfsd_inject_forget_clients(u64 max)
6561{
6562 u64 count = 0;
6563 struct nfs4_client *clp, *next;
6564 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6565 nfsd_net_id);
6566 LIST_HEAD(reaplist);
6567
6568 if (!nfsd_netns_ready(nn))
6569 return count;
6570
6571 spin_lock(&nn->client_lock);
6572 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6573 if (mark_client_expired_locked(clp) == nfs_ok) {
6574 list_add(&clp->cl_lru, &reaplist);
6575 if (max != 0 && ++count >= max)
6576 break;
6577 }
6578 }
6579 spin_unlock(&nn->client_lock);
6580
6581 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6582 expire_client(clp);
6583
6584 return count;
6585}
6586
6587static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6588 const char *type)
6589{
6590 char buf[INET6_ADDRSTRLEN];
6591 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6592 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6593}
6594
6595static void
6596nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6597 struct list_head *collect)
6598{
6599 struct nfs4_client *clp = lst->st_stid.sc_client;
6600 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6601 nfsd_net_id);
6602
6603 if (!collect)
6604 return;
6605
6606 lockdep_assert_held(&nn->client_lock);
6607 atomic_inc(&clp->cl_refcount);
6608 list_add(&lst->st_locks, collect);
6609}
6610
6611static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6612 struct list_head *collect,
6613 bool (*func)(struct nfs4_ol_stateid *))
6614{
6615 struct nfs4_openowner *oop;
6616 struct nfs4_ol_stateid *stp, *st_next;
6617 struct nfs4_ol_stateid *lst, *lst_next;
6618 u64 count = 0;
6619
6620 spin_lock(&clp->cl_lock);
6621 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6622 list_for_each_entry_safe(stp, st_next,
6623 &oop->oo_owner.so_stateids, st_perstateowner) {
6624 list_for_each_entry_safe(lst, lst_next,
6625 &stp->st_locks, st_locks) {
6626 if (func) {
6627 if (func(lst))
6628 nfsd_inject_add_lock_to_list(lst,
6629 collect);
6630 }
6631 ++count;
6632 /*
6633 * Despite the fact that these functions deal
6634 * with 64-bit integers for "count", we must
6635 * ensure that it doesn't blow up the
6636 * clp->cl_refcount. Throw a warning if we
6637 * start to approach INT_MAX here.
6638 */
6639 WARN_ON_ONCE(count == (INT_MAX / 2));
6640 if (count == max)
6641 goto out;
6642 }
6643 }
6644 }
6645out:
6646 spin_unlock(&clp->cl_lock);
6647
6648 return count;
6649}
6650
6651static u64
6652nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6653 u64 max)
6654{
6655 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6656}
6657
6658static u64
6659nfsd_print_client_locks(struct nfs4_client *clp)
6660{
6661 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6662 nfsd_print_count(clp, count, "locked files");
6663 return count;
6664}
6665
6666u64
6667nfsd_inject_print_locks(void)
6668{
6669 struct nfs4_client *clp;
6670 u64 count = 0;
6671 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6672 nfsd_net_id);
6673
6674 if (!nfsd_netns_ready(nn))
6675 return 0;
6676
6677 spin_lock(&nn->client_lock);
6678 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6679 count += nfsd_print_client_locks(clp);
6680 spin_unlock(&nn->client_lock);
6681
6682 return count;
6683}
6684
6685static void
6686nfsd_reap_locks(struct list_head *reaplist)
6687{
6688 struct nfs4_client *clp;
6689 struct nfs4_ol_stateid *stp, *next;
6690
6691 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6692 list_del_init(&stp->st_locks);
6693 clp = stp->st_stid.sc_client;
6694 nfs4_put_stid(&stp->st_stid);
6695 put_client(clp);
6696 }
6697}
6698
6699u64
6700nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6701{
6702 unsigned int count = 0;
6703 struct nfs4_client *clp;
6704 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6705 nfsd_net_id);
6706 LIST_HEAD(reaplist);
6707
6708 if (!nfsd_netns_ready(nn))
6709 return count;
6710
6711 spin_lock(&nn->client_lock);
6712 clp = nfsd_find_client(addr, addr_size);
6713 if (clp)
6714 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6715 spin_unlock(&nn->client_lock);
6716 nfsd_reap_locks(&reaplist);
6717 return count;
6718}
6719
6720u64
6721nfsd_inject_forget_locks(u64 max)
6722{
6723 u64 count = 0;
6724 struct nfs4_client *clp;
6725 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6726 nfsd_net_id);
6727 LIST_HEAD(reaplist);
6728
6729 if (!nfsd_netns_ready(nn))
6730 return count;
6731
6732 spin_lock(&nn->client_lock);
6733 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6734 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6735 if (max != 0 && count >= max)
6736 break;
6737 }
6738 spin_unlock(&nn->client_lock);
6739 nfsd_reap_locks(&reaplist);
6740 return count;
6741}
6742
6743static u64
6744nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6745 struct list_head *collect,
6746 void (*func)(struct nfs4_openowner *))
6747{
6748 struct nfs4_openowner *oop, *next;
6749 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6750 nfsd_net_id);
6751 u64 count = 0;
6752
6753 lockdep_assert_held(&nn->client_lock);
6754
6755 spin_lock(&clp->cl_lock);
6756 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6757 if (func) {
6758 func(oop);
6759 if (collect) {
6760 atomic_inc(&clp->cl_refcount);
6761 list_add(&oop->oo_perclient, collect);
6762 }
6763 }
6764 ++count;
6765 /*
6766 * Despite the fact that these functions deal with
6767 * 64-bit integers for "count", we must ensure that
6768 * it doesn't blow up the clp->cl_refcount. Throw a
6769 * warning if we start to approach INT_MAX here.
6770 */
6771 WARN_ON_ONCE(count == (INT_MAX / 2));
6772 if (count == max)
6773 break;
6774 }
6775 spin_unlock(&clp->cl_lock);
6776
6777 return count;
6778}
6779
6780static u64
6781nfsd_print_client_openowners(struct nfs4_client *clp)
6782{
6783 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6784
6785 nfsd_print_count(clp, count, "openowners");
6786 return count;
6787}
6788
6789static u64
6790nfsd_collect_client_openowners(struct nfs4_client *clp,
6791 struct list_head *collect, u64 max)
6792{
6793 return nfsd_foreach_client_openowner(clp, max, collect,
6794 unhash_openowner_locked);
6795}
6796
6797u64
6798nfsd_inject_print_openowners(void)
6799{
6800 struct nfs4_client *clp;
6801 u64 count = 0;
6802 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6803 nfsd_net_id);
6804
6805 if (!nfsd_netns_ready(nn))
6806 return 0;
6807
6808 spin_lock(&nn->client_lock);
6809 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6810 count += nfsd_print_client_openowners(clp);
6811 spin_unlock(&nn->client_lock);
6812
6813 return count;
6814}
6815
6816static void
6817nfsd_reap_openowners(struct list_head *reaplist)
6818{
6819 struct nfs4_client *clp;
6820 struct nfs4_openowner *oop, *next;
6821
6822 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6823 list_del_init(&oop->oo_perclient);
6824 clp = oop->oo_owner.so_client;
6825 release_openowner(oop);
6826 put_client(clp);
6827 }
6828}
6829
6830u64
6831nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6832 size_t addr_size)
6833{
6834 unsigned int count = 0;
6835 struct nfs4_client *clp;
6836 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6837 nfsd_net_id);
6838 LIST_HEAD(reaplist);
6839
6840 if (!nfsd_netns_ready(nn))
6841 return count;
6842
6843 spin_lock(&nn->client_lock);
6844 clp = nfsd_find_client(addr, addr_size);
6845 if (clp)
6846 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6847 spin_unlock(&nn->client_lock);
6848 nfsd_reap_openowners(&reaplist);
6849 return count;
6850}
6851
6852u64
6853nfsd_inject_forget_openowners(u64 max)
6854{
6855 u64 count = 0;
6856 struct nfs4_client *clp;
6857 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6858 nfsd_net_id);
6859 LIST_HEAD(reaplist);
6860
6861 if (!nfsd_netns_ready(nn))
6862 return count;
6863
6864 spin_lock(&nn->client_lock);
6865 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6866 count += nfsd_collect_client_openowners(clp, &reaplist,
6867 max - count);
6868 if (max != 0 && count >= max)
6869 break;
6870 }
6871 spin_unlock(&nn->client_lock);
6872 nfsd_reap_openowners(&reaplist);
6873 return count;
6874}
6875
6876static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6877 struct list_head *victims)
6878{
6879 struct nfs4_delegation *dp, *next;
6880 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6881 nfsd_net_id);
6882 u64 count = 0;
6883
6884 lockdep_assert_held(&nn->client_lock);
6885
6886 spin_lock(&state_lock);
6887 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6888 if (victims) {
6889 /*
6890 * It's not safe to mess with delegations that have a
6891 * non-zero dl_time. They might have already been broken
6892 * and could be processed by the laundromat outside of
6893 * the state_lock. Just leave them be.
6894 */
6895 if (dp->dl_time != 0)
6896 continue;
6897
6898 atomic_inc(&clp->cl_refcount);
6899 WARN_ON(!unhash_delegation_locked(dp));
6900 list_add(&dp->dl_recall_lru, victims);
6901 }
6902 ++count;
6903 /*
6904 * Despite the fact that these functions deal with
6905 * 64-bit integers for "count", we must ensure that
6906 * it doesn't blow up the clp->cl_refcount. Throw a
6907 * warning if we start to approach INT_MAX here.
6908 */
6909 WARN_ON_ONCE(count == (INT_MAX / 2));
6910 if (count == max)
6911 break;
6912 }
6913 spin_unlock(&state_lock);
6914 return count;
6915}
6916
6917static u64
6918nfsd_print_client_delegations(struct nfs4_client *clp)
6919{
6920 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6921
6922 nfsd_print_count(clp, count, "delegations");
6923 return count;
6924}
6925
6926u64
6927nfsd_inject_print_delegations(void)
6928{
6929 struct nfs4_client *clp;
6930 u64 count = 0;
6931 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6932 nfsd_net_id);
6933
6934 if (!nfsd_netns_ready(nn))
6935 return 0;
6936
6937 spin_lock(&nn->client_lock);
6938 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6939 count += nfsd_print_client_delegations(clp);
6940 spin_unlock(&nn->client_lock);
6941
6942 return count;
6943}
6944
6945static void
6946nfsd_forget_delegations(struct list_head *reaplist)
6947{
6948 struct nfs4_client *clp;
6949 struct nfs4_delegation *dp, *next;
6950
6951 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6952 list_del_init(&dp->dl_recall_lru);
6953 clp = dp->dl_stid.sc_client;
6954 revoke_delegation(dp);
6955 put_client(clp);
6956 }
6957}
6958
6959u64
6960nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6961 size_t addr_size)
6962{
6963 u64 count = 0;
6964 struct nfs4_client *clp;
6965 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6966 nfsd_net_id);
6967 LIST_HEAD(reaplist);
6968
6969 if (!nfsd_netns_ready(nn))
6970 return count;
6971
6972 spin_lock(&nn->client_lock);
6973 clp = nfsd_find_client(addr, addr_size);
6974 if (clp)
6975 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6976 spin_unlock(&nn->client_lock);
6977
6978 nfsd_forget_delegations(&reaplist);
6979 return count;
6980}
6981
6982u64
6983nfsd_inject_forget_delegations(u64 max)
6984{
6985 u64 count = 0;
6986 struct nfs4_client *clp;
6987 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6988 nfsd_net_id);
6989 LIST_HEAD(reaplist);
6990
6991 if (!nfsd_netns_ready(nn))
6992 return count;
6993
6994 spin_lock(&nn->client_lock);
6995 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6996 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6997 if (max != 0 && count >= max)
6998 break;
6999 }
7000 spin_unlock(&nn->client_lock);
7001 nfsd_forget_delegations(&reaplist);
7002 return count;
7003}
7004
7005static void
7006nfsd_recall_delegations(struct list_head *reaplist)
7007{
7008 struct nfs4_client *clp;
7009 struct nfs4_delegation *dp, *next;
7010
7011 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7012 list_del_init(&dp->dl_recall_lru);
7013 clp = dp->dl_stid.sc_client;
7014 /*
7015 * We skipped all entries that had a zero dl_time before,
7016 * so we can now reset the dl_time back to 0. If a delegation
7017 * break comes in now, then it won't make any difference since
7018 * we're recalling it either way.
7019 */
7020 spin_lock(&state_lock);
7021 dp->dl_time = 0;
7022 spin_unlock(&state_lock);
7023 nfsd_break_one_deleg(dp);
7024 put_client(clp);
7025 }
7026}
7027
7028u64
7029nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7030 size_t addr_size)
7031{
7032 u64 count = 0;
7033 struct nfs4_client *clp;
7034 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7035 nfsd_net_id);
7036 LIST_HEAD(reaplist);
7037
7038 if (!nfsd_netns_ready(nn))
7039 return count;
7040
7041 spin_lock(&nn->client_lock);
7042 clp = nfsd_find_client(addr, addr_size);
7043 if (clp)
7044 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7045 spin_unlock(&nn->client_lock);
7046
7047 nfsd_recall_delegations(&reaplist);
7048 return count;
7049}
7050
7051u64
7052nfsd_inject_recall_delegations(u64 max)
7053{
7054 u64 count = 0;
7055 struct nfs4_client *clp, *next;
7056 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7057 nfsd_net_id);
7058 LIST_HEAD(reaplist);
7059
7060 if (!nfsd_netns_ready(nn))
7061 return count;
7062
7063 spin_lock(&nn->client_lock);
7064 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7065 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7066 if (max != 0 && ++count >= max)
7067 break;
7068 }
7069 spin_unlock(&nn->client_lock);
7070 nfsd_recall_delegations(&reaplist);
7071 return count;
7072}
7073#endif /* CONFIG_NFSD_FAULT_INJECTION */
7074
7075/*
7076 * Since the lifetime of a delegation isn't limited to that of an open, a
7077 * client may quite reasonably hang on to a delegation as long as it has
7078 * the inode cached. This becomes an obvious problem the first time a
7079 * client's inode cache approaches the size of the server's total memory.
7080 *
7081 * For now we avoid this problem by imposing a hard limit on the number
7082 * of delegations, which varies according to the server's memory size.
7083 */
7084static void
7085set_max_delegations(void)
7086{
7087 /*
7088 * Allow at most 4 delegations per megabyte of RAM. Quick
7089 * estimates suggest that in the worst case (where every delegation
7090 * is for a different inode), a delegation could take about 1.5K,
7091 * giving a worst case usage of about 6% of memory.
7092 */
7093 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7094}
7095
7096static int nfs4_state_create_net(struct net *net)
7097{
7098 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7099 int i;
7100
7101 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7102 CLIENT_HASH_SIZE, GFP_KERNEL);
7103 if (!nn->conf_id_hashtbl)
7104 goto err;
7105 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7106 CLIENT_HASH_SIZE, GFP_KERNEL);
7107 if (!nn->unconf_id_hashtbl)
7108 goto err_unconf_id;
7109 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7110 SESSION_HASH_SIZE, GFP_KERNEL);
7111 if (!nn->sessionid_hashtbl)
7112 goto err_sessionid;
7113
7114 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7115 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7116 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7117 }
7118 for (i = 0; i < SESSION_HASH_SIZE; i++)
7119 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7120 nn->conf_name_tree = RB_ROOT;
7121 nn->unconf_name_tree = RB_ROOT;
7122 nn->boot_time = get_seconds();
7123 nn->grace_ended = false;
7124 nn->nfsd4_manager.block_opens = true;
7125 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7126 INIT_LIST_HEAD(&nn->client_lru);
7127 INIT_LIST_HEAD(&nn->close_lru);
7128 INIT_LIST_HEAD(&nn->del_recall_lru);
7129 spin_lock_init(&nn->client_lock);
7130
7131 spin_lock_init(&nn->blocked_locks_lock);
7132 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7133
7134 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7135 get_net(net);
7136
7137 return 0;
7138
7139err_sessionid:
7140 kfree(nn->unconf_id_hashtbl);
7141err_unconf_id:
7142 kfree(nn->conf_id_hashtbl);
7143err:
7144 return -ENOMEM;
7145}
7146
7147static void
7148nfs4_state_destroy_net(struct net *net)
7149{
7150 int i;
7151 struct nfs4_client *clp = NULL;
7152 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7153
7154 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7155 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7156 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7157 destroy_client(clp);
7158 }
7159 }
7160
7161 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7162
7163 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7164 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7165 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7166 destroy_client(clp);
7167 }
7168 }
7169
7170 kfree(nn->sessionid_hashtbl);
7171 kfree(nn->unconf_id_hashtbl);
7172 kfree(nn->conf_id_hashtbl);
7173 put_net(net);
7174}
7175
7176int
7177nfs4_state_start_net(struct net *net)
7178{
7179 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7180 int ret;
7181
7182 ret = nfs4_state_create_net(net);
7183 if (ret)
7184 return ret;
7185 locks_start_grace(net, &nn->nfsd4_manager);
7186 nfsd4_client_tracking_init(net);
7187 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
7188 nn->nfsd4_grace, net);
7189 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7190 return 0;
7191}
7192
7193/* initialization to perform when the nfsd service is started: */
7194
7195int
7196nfs4_state_start(void)
7197{
7198 int ret;
7199
7200 ret = set_callback_cred();
7201 if (ret)
7202 return ret;
7203
7204 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7205 if (laundry_wq == NULL) {
7206 ret = -ENOMEM;
7207 goto out_cleanup_cred;
7208 }
7209 ret = nfsd4_create_callback_queue();
7210 if (ret)
7211 goto out_free_laundry;
7212
7213 set_max_delegations();
7214 return 0;
7215
7216out_free_laundry:
7217 destroy_workqueue(laundry_wq);
7218out_cleanup_cred:
7219 cleanup_callback_cred();
7220 return ret;
7221}
7222
7223void
7224nfs4_state_shutdown_net(struct net *net)
7225{
7226 struct nfs4_delegation *dp = NULL;
7227 struct list_head *pos, *next, reaplist;
7228 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7229
7230 cancel_delayed_work_sync(&nn->laundromat_work);
7231 locks_end_grace(&nn->nfsd4_manager);
7232
7233 INIT_LIST_HEAD(&reaplist);
7234 spin_lock(&state_lock);
7235 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7236 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7237 WARN_ON(!unhash_delegation_locked(dp));
7238 list_add(&dp->dl_recall_lru, &reaplist);
7239 }
7240 spin_unlock(&state_lock);
7241 list_for_each_safe(pos, next, &reaplist) {
7242 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7243 list_del_init(&dp->dl_recall_lru);
7244 put_clnt_odstate(dp->dl_clnt_odstate);
7245 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
7246 nfs4_put_stid(&dp->dl_stid);
7247 }
7248
7249 nfsd4_client_tracking_exit(net);
7250 nfs4_state_destroy_net(net);
7251}
7252
7253void
7254nfs4_state_shutdown(void)
7255{
7256 destroy_workqueue(laundry_wq);
7257 nfsd4_destroy_callback_queue();
7258 cleanup_callback_cred();
7259}
7260
7261static void
7262get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7263{
7264 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7265 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7266}
7267
7268static void
7269put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7270{
7271 if (cstate->minorversion) {
7272 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7273 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7274 }
7275}
7276
7277void
7278clear_current_stateid(struct nfsd4_compound_state *cstate)
7279{
7280 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7281}
7282
7283/*
7284 * functions to set current state id
7285 */
7286void
7287nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7288 union nfsd4_op_u *u)
7289{
7290 put_stateid(cstate, &u->open_downgrade.od_stateid);
7291}
7292
7293void
7294nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7295 union nfsd4_op_u *u)
7296{
7297 put_stateid(cstate, &u->open.op_stateid);
7298}
7299
7300void
7301nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7302 union nfsd4_op_u *u)
7303{
7304 put_stateid(cstate, &u->close.cl_stateid);
7305}
7306
7307void
7308nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7309 union nfsd4_op_u *u)
7310{
7311 put_stateid(cstate, &u->lock.lk_resp_stateid);
7312}
7313
7314/*
7315 * functions to consume current state id
7316 */
7317
7318void
7319nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7320 union nfsd4_op_u *u)
7321{
7322 get_stateid(cstate, &u->open_downgrade.od_stateid);
7323}
7324
7325void
7326nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7327 union nfsd4_op_u *u)
7328{
7329 get_stateid(cstate, &u->delegreturn.dr_stateid);
7330}
7331
7332void
7333nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7334 union nfsd4_op_u *u)
7335{
7336 get_stateid(cstate, &u->free_stateid.fr_stateid);
7337}
7338
7339void
7340nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7341 union nfsd4_op_u *u)
7342{
7343 get_stateid(cstate, &u->setattr.sa_stateid);
7344}
7345
7346void
7347nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7348 union nfsd4_op_u *u)
7349{
7350 get_stateid(cstate, &u->close.cl_stateid);
7351}
7352
7353void
7354nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7355 union nfsd4_op_u *u)
7356{
7357 get_stateid(cstate, &u->locku.lu_stateid);
7358}
7359
7360void
7361nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7362 union nfsd4_op_u *u)
7363{
7364 get_stateid(cstate, &u->read.rd_stateid);
7365}
7366
7367void
7368nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7369 union nfsd4_op_u *u)
7370{
7371 get_stateid(cstate, &u->write.wr_stateid);
7372}