2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/nfs_ssc.h>
51 #include "current_stateid.h"
55 #include "filecache.h"
58 #define NFSDDBG_FACILITY NFSDDBG_PROC
60 #define all_ones {{~0,~0},~0}
61 static const stateid_t one_stateid = {
63 .si_opaque = all_ones,
65 static const stateid_t zero_stateid = {
68 static const stateid_t currentstateid = {
71 static const stateid_t close_stateid = {
72 .si_generation = 0xffffffffU,
75 static u64 current_sessionid = 1;
77 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
78 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
79 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
80 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
82 /* forward declarations */
83 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
84 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
85 void nfsd4_end_grace(struct nfsd_net *nn);
86 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
91 * Currently used for the del_recall_lru and file hash table. In an
92 * effort to decrease the scope of the client_mutex, this spinlock may
93 * eventually cover more:
95 static DEFINE_SPINLOCK(state_lock);
97 enum nfsd4_st_mutex_lock_subclass {
98 OPEN_STATEID_MUTEX = 0,
99 LOCK_STATEID_MUTEX = 1,
103 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
104 * the refcount on the open stateid to drop.
106 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
109 * A waitqueue where a writer to clients/#/ctl destroying a client can
110 * wait for cl_rpc_users to drop to 0 and then for the client to be
113 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
115 static struct kmem_cache *client_slab;
116 static struct kmem_cache *openowner_slab;
117 static struct kmem_cache *lockowner_slab;
118 static struct kmem_cache *file_slab;
119 static struct kmem_cache *stateid_slab;
120 static struct kmem_cache *deleg_slab;
121 static struct kmem_cache *odstate_slab;
123 static void free_session(struct nfsd4_session *);
125 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
126 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
128 static bool is_session_dead(struct nfsd4_session *ses)
130 return ses->se_flags & NFS4_SESSION_DEAD;
133 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
135 if (atomic_read(&ses->se_ref) > ref_held_by_me)
136 return nfserr_jukebox;
137 ses->se_flags |= NFS4_SESSION_DEAD;
141 static bool is_client_expired(struct nfs4_client *clp)
143 return clp->cl_time == 0;
146 static __be32 get_client_locked(struct nfs4_client *clp)
148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
150 lockdep_assert_held(&nn->client_lock);
152 if (is_client_expired(clp))
153 return nfserr_expired;
154 atomic_inc(&clp->cl_rpc_users);
158 /* must be called under the client_lock */
160 renew_client_locked(struct nfs4_client *clp)
162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
164 if (is_client_expired(clp)) {
166 printk("%s: client (clientid %08x/%08x) already expired\n",
168 clp->cl_clientid.cl_boot,
169 clp->cl_clientid.cl_id);
173 list_move_tail(&clp->cl_lru, &nn->client_lru);
174 clp->cl_time = ktime_get_boottime_seconds();
177 static void put_client_renew_locked(struct nfs4_client *clp)
179 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
181 lockdep_assert_held(&nn->client_lock);
183 if (!atomic_dec_and_test(&clp->cl_rpc_users))
185 if (!is_client_expired(clp))
186 renew_client_locked(clp);
188 wake_up_all(&expiry_wq);
191 static void put_client_renew(struct nfs4_client *clp)
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
195 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
197 if (!is_client_expired(clp))
198 renew_client_locked(clp);
200 wake_up_all(&expiry_wq);
201 spin_unlock(&nn->client_lock);
204 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
208 if (is_session_dead(ses))
209 return nfserr_badsession;
210 status = get_client_locked(ses->se_client);
213 atomic_inc(&ses->se_ref);
217 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
219 struct nfs4_client *clp = ses->se_client;
220 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
222 lockdep_assert_held(&nn->client_lock);
224 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
226 put_client_renew_locked(clp);
229 static void nfsd4_put_session(struct nfsd4_session *ses)
231 struct nfs4_client *clp = ses->se_client;
232 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
234 spin_lock(&nn->client_lock);
235 nfsd4_put_session_locked(ses);
236 spin_unlock(&nn->client_lock);
239 static struct nfsd4_blocked_lock *
240 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
243 struct nfsd4_blocked_lock *cur, *found = NULL;
245 spin_lock(&nn->blocked_locks_lock);
246 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
247 if (fh_match(fh, &cur->nbl_fh)) {
248 list_del_init(&cur->nbl_list);
249 list_del_init(&cur->nbl_lru);
254 spin_unlock(&nn->blocked_locks_lock);
256 locks_delete_block(&found->nbl_lock);
260 static struct nfsd4_blocked_lock *
261 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
264 struct nfsd4_blocked_lock *nbl;
266 nbl = find_blocked_lock(lo, fh, nn);
268 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
270 INIT_LIST_HEAD(&nbl->nbl_list);
271 INIT_LIST_HEAD(&nbl->nbl_lru);
272 fh_copy_shallow(&nbl->nbl_fh, fh);
273 locks_init_lock(&nbl->nbl_lock);
274 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
275 &nfsd4_cb_notify_lock_ops,
276 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
283 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
285 locks_delete_block(&nbl->nbl_lock);
286 locks_release_private(&nbl->nbl_lock);
291 remove_blocked_locks(struct nfs4_lockowner *lo)
293 struct nfs4_client *clp = lo->lo_owner.so_client;
294 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
295 struct nfsd4_blocked_lock *nbl;
298 /* Dequeue all blocked locks */
299 spin_lock(&nn->blocked_locks_lock);
300 while (!list_empty(&lo->lo_blocked)) {
301 nbl = list_first_entry(&lo->lo_blocked,
302 struct nfsd4_blocked_lock,
304 list_del_init(&nbl->nbl_list);
305 list_move(&nbl->nbl_lru, &reaplist);
307 spin_unlock(&nn->blocked_locks_lock);
310 while (!list_empty(&reaplist)) {
311 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
313 list_del_init(&nbl->nbl_lru);
314 free_blocked_lock(nbl);
319 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
321 struct nfsd4_blocked_lock *nbl = container_of(cb,
322 struct nfsd4_blocked_lock, nbl_cb);
323 locks_delete_block(&nbl->nbl_lock);
327 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
330 * Since this is just an optimization, we don't try very hard if it
331 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
332 * just quit trying on anything else.
334 switch (task->tk_status) {
336 rpc_delay(task, 1 * HZ);
344 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
346 struct nfsd4_blocked_lock *nbl = container_of(cb,
347 struct nfsd4_blocked_lock, nbl_cb);
349 free_blocked_lock(nbl);
352 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
353 .prepare = nfsd4_cb_notify_lock_prepare,
354 .done = nfsd4_cb_notify_lock_done,
355 .release = nfsd4_cb_notify_lock_release,
359 * We store the NONE, READ, WRITE, and BOTH bits separately in the
360 * st_{access,deny}_bmap field of the stateid, in order to track not
361 * only what share bits are currently in force, but also what
362 * combinations of share bits previous opens have used. This allows us
363 * to enforce the recommendation of rfc 3530 14.2.19 that the server
364 * return an error if the client attempt to downgrade to a combination
365 * of share bits not explicable by closing some of its previous opens.
367 * XXX: This enforcement is actually incomplete, since we don't keep
368 * track of access/deny bit combinations; so, e.g., we allow:
370 * OPEN allow read, deny write
371 * OPEN allow both, deny none
372 * DOWNGRADE allow read, deny none
374 * which we should reject.
377 bmap_to_share_mode(unsigned long bmap)
380 unsigned int access = 0;
382 for (i = 1; i < 4; i++) {
383 if (test_bit(i, &bmap))
389 /* set share access for a given stateid */
391 set_access(u32 access, struct nfs4_ol_stateid *stp)
393 unsigned char mask = 1 << access;
395 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
396 stp->st_access_bmap |= mask;
399 /* clear share access for a given stateid */
401 clear_access(u32 access, struct nfs4_ol_stateid *stp)
403 unsigned char mask = 1 << access;
405 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
406 stp->st_access_bmap &= ~mask;
409 /* test whether a given stateid has access */
411 test_access(u32 access, struct nfs4_ol_stateid *stp)
413 unsigned char mask = 1 << access;
415 return (bool)(stp->st_access_bmap & mask);
418 /* set share deny for a given stateid */
420 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
422 unsigned char mask = 1 << deny;
424 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
425 stp->st_deny_bmap |= mask;
428 /* clear share deny for a given stateid */
430 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
432 unsigned char mask = 1 << deny;
434 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
435 stp->st_deny_bmap &= ~mask;
438 /* test whether a given stateid is denying specific access */
440 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
442 unsigned char mask = 1 << deny;
444 return (bool)(stp->st_deny_bmap & mask);
447 static int nfs4_access_to_omode(u32 access)
449 switch (access & NFS4_SHARE_ACCESS_BOTH) {
450 case NFS4_SHARE_ACCESS_READ:
452 case NFS4_SHARE_ACCESS_WRITE:
454 case NFS4_SHARE_ACCESS_BOTH:
462 access_permit_read(struct nfs4_ol_stateid *stp)
464 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
465 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
466 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
470 access_permit_write(struct nfs4_ol_stateid *stp)
472 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
473 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
476 static inline struct nfs4_stateowner *
477 nfs4_get_stateowner(struct nfs4_stateowner *sop)
479 atomic_inc(&sop->so_count);
484 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
486 return (sop->so_owner.len == owner->len) &&
487 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
490 static struct nfs4_openowner *
491 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
492 struct nfs4_client *clp)
494 struct nfs4_stateowner *so;
496 lockdep_assert_held(&clp->cl_lock);
498 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
500 if (!so->so_is_open_owner)
502 if (same_owner_str(so, &open->op_owner))
503 return openowner(nfs4_get_stateowner(so));
508 static struct nfs4_openowner *
509 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
510 struct nfs4_client *clp)
512 struct nfs4_openowner *oo;
514 spin_lock(&clp->cl_lock);
515 oo = find_openstateowner_str_locked(hashval, open, clp);
516 spin_unlock(&clp->cl_lock);
521 opaque_hashval(const void *ptr, int nbytes)
523 unsigned char *cptr = (unsigned char *) ptr;
533 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
535 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
537 kmem_cache_free(file_slab, fp);
541 put_nfs4_file(struct nfs4_file *fi)
543 might_lock(&state_lock);
545 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
546 hlist_del_rcu(&fi->fi_hash);
547 spin_unlock(&state_lock);
548 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
549 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
550 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
554 static struct nfsd_file *
555 __nfs4_get_fd(struct nfs4_file *f, int oflag)
557 if (f->fi_fds[oflag])
558 return nfsd_file_get(f->fi_fds[oflag]);
562 static struct nfsd_file *
563 find_writeable_file_locked(struct nfs4_file *f)
565 struct nfsd_file *ret;
567 lockdep_assert_held(&f->fi_lock);
569 ret = __nfs4_get_fd(f, O_WRONLY);
571 ret = __nfs4_get_fd(f, O_RDWR);
575 static struct nfsd_file *
576 find_writeable_file(struct nfs4_file *f)
578 struct nfsd_file *ret;
580 spin_lock(&f->fi_lock);
581 ret = find_writeable_file_locked(f);
582 spin_unlock(&f->fi_lock);
587 static struct nfsd_file *
588 find_readable_file_locked(struct nfs4_file *f)
590 struct nfsd_file *ret;
592 lockdep_assert_held(&f->fi_lock);
594 ret = __nfs4_get_fd(f, O_RDONLY);
596 ret = __nfs4_get_fd(f, O_RDWR);
600 static struct nfsd_file *
601 find_readable_file(struct nfs4_file *f)
603 struct nfsd_file *ret;
605 spin_lock(&f->fi_lock);
606 ret = find_readable_file_locked(f);
607 spin_unlock(&f->fi_lock);
613 find_any_file(struct nfs4_file *f)
615 struct nfsd_file *ret;
619 spin_lock(&f->fi_lock);
620 ret = __nfs4_get_fd(f, O_RDWR);
622 ret = __nfs4_get_fd(f, O_WRONLY);
624 ret = __nfs4_get_fd(f, O_RDONLY);
626 spin_unlock(&f->fi_lock);
630 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
632 struct nfsd_file *ret = NULL;
634 spin_lock(&f->fi_lock);
635 if (f->fi_deleg_file)
636 ret = nfsd_file_get(f->fi_deleg_file);
637 spin_unlock(&f->fi_lock);
641 static atomic_long_t num_delegations;
642 unsigned long max_delegations;
645 * Open owner state (share locks)
648 /* hash tables for lock and open owners */
649 #define OWNER_HASH_BITS 8
650 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
651 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
653 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
657 ret = opaque_hashval(ownername->data, ownername->len);
658 return ret & OWNER_HASH_MASK;
661 /* hash table for nfs4_file */
662 #define FILE_HASH_BITS 8
663 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
665 static unsigned int file_hashval(struct svc_fh *fh)
667 struct inode *inode = d_inode(fh->fh_dentry);
669 /* XXX: why not (here & in file cache) use inode? */
670 return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
673 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
676 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
678 lockdep_assert_held(&fp->fi_lock);
680 if (access & NFS4_SHARE_ACCESS_WRITE)
681 atomic_inc(&fp->fi_access[O_WRONLY]);
682 if (access & NFS4_SHARE_ACCESS_READ)
683 atomic_inc(&fp->fi_access[O_RDONLY]);
687 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
689 lockdep_assert_held(&fp->fi_lock);
691 /* Does this access mode make sense? */
692 if (access & ~NFS4_SHARE_ACCESS_BOTH)
695 /* Does it conflict with a deny mode already set? */
696 if ((access & fp->fi_share_deny) != 0)
697 return nfserr_share_denied;
699 __nfs4_file_get_access(fp, access);
703 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
705 /* Common case is that there is no deny mode. */
707 /* Does this deny mode make sense? */
708 if (deny & ~NFS4_SHARE_DENY_BOTH)
711 if ((deny & NFS4_SHARE_DENY_READ) &&
712 atomic_read(&fp->fi_access[O_RDONLY]))
713 return nfserr_share_denied;
715 if ((deny & NFS4_SHARE_DENY_WRITE) &&
716 atomic_read(&fp->fi_access[O_WRONLY]))
717 return nfserr_share_denied;
722 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
724 might_lock(&fp->fi_lock);
726 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
727 struct nfsd_file *f1 = NULL;
728 struct nfsd_file *f2 = NULL;
730 swap(f1, fp->fi_fds[oflag]);
731 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
732 swap(f2, fp->fi_fds[O_RDWR]);
733 spin_unlock(&fp->fi_lock);
741 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
743 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
745 if (access & NFS4_SHARE_ACCESS_WRITE)
746 __nfs4_file_put_access(fp, O_WRONLY);
747 if (access & NFS4_SHARE_ACCESS_READ)
748 __nfs4_file_put_access(fp, O_RDONLY);
752 * Allocate a new open/delegation state counter. This is needed for
753 * pNFS for proper return on close semantics.
755 * Note that we only allocate it for pNFS-enabled exports, otherwise
756 * all pointers to struct nfs4_clnt_odstate are always NULL.
758 static struct nfs4_clnt_odstate *
759 alloc_clnt_odstate(struct nfs4_client *clp)
761 struct nfs4_clnt_odstate *co;
763 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
766 refcount_set(&co->co_odcount, 1);
772 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
774 struct nfs4_file *fp = co->co_file;
776 lockdep_assert_held(&fp->fi_lock);
777 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
781 get_clnt_odstate(struct nfs4_clnt_odstate *co)
784 refcount_inc(&co->co_odcount);
788 put_clnt_odstate(struct nfs4_clnt_odstate *co)
790 struct nfs4_file *fp;
796 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
797 list_del(&co->co_perfile);
798 spin_unlock(&fp->fi_lock);
800 nfsd4_return_all_file_layouts(co->co_client, fp);
801 kmem_cache_free(odstate_slab, co);
805 static struct nfs4_clnt_odstate *
806 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
808 struct nfs4_clnt_odstate *co;
809 struct nfs4_client *cl;
816 spin_lock(&fp->fi_lock);
817 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
818 if (co->co_client == cl) {
819 get_clnt_odstate(co);
825 hash_clnt_odstate_locked(new);
827 spin_unlock(&fp->fi_lock);
831 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
832 void (*sc_free)(struct nfs4_stid *))
834 struct nfs4_stid *stid;
837 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
841 idr_preload(GFP_KERNEL);
842 spin_lock(&cl->cl_lock);
843 /* Reserving 0 for start of file in nfsdfs "states" file: */
844 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
845 spin_unlock(&cl->cl_lock);
850 stid->sc_free = sc_free;
851 stid->sc_client = cl;
852 stid->sc_stateid.si_opaque.so_id = new_id;
853 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
854 /* Will be incremented before return to client: */
855 refcount_set(&stid->sc_count, 1);
856 spin_lock_init(&stid->sc_lock);
857 INIT_LIST_HEAD(&stid->sc_cp_list);
860 * It shouldn't be a problem to reuse an opaque stateid value.
861 * I don't think it is for 4.1. But with 4.0 I worry that, for
862 * example, a stray write retransmission could be accepted by
863 * the server when it should have been rejected. Therefore,
864 * adopt a trick from the sctp code to attempt to maximize the
865 * amount of time until an id is reused, by ensuring they always
866 * "increase" (mod INT_MAX):
870 kmem_cache_free(slab, stid);
875 * Create a unique stateid_t to represent each COPY.
877 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
878 unsigned char sc_type)
882 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
883 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
884 stid->sc_type = sc_type;
886 idr_preload(GFP_KERNEL);
887 spin_lock(&nn->s2s_cp_lock);
888 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
889 stid->stid.si_opaque.so_id = new_id;
890 stid->stid.si_generation = 1;
891 spin_unlock(&nn->s2s_cp_lock);
898 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
900 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
903 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
904 struct nfs4_stid *p_stid)
906 struct nfs4_cpntf_state *cps;
908 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
911 cps->cpntf_time = ktime_get_boottime_seconds();
912 refcount_set(&cps->cp_stateid.sc_count, 1);
913 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
915 spin_lock(&nn->s2s_cp_lock);
916 list_add(&cps->cp_list, &p_stid->sc_cp_list);
917 spin_unlock(&nn->s2s_cp_lock);
924 void nfs4_free_copy_state(struct nfsd4_copy *copy)
928 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
929 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
930 spin_lock(&nn->s2s_cp_lock);
931 idr_remove(&nn->s2s_cp_stateids,
932 copy->cp_stateid.stid.si_opaque.so_id);
933 spin_unlock(&nn->s2s_cp_lock);
936 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
938 struct nfs4_cpntf_state *cps;
941 nn = net_generic(net, nfsd_net_id);
942 spin_lock(&nn->s2s_cp_lock);
943 while (!list_empty(&stid->sc_cp_list)) {
944 cps = list_first_entry(&stid->sc_cp_list,
945 struct nfs4_cpntf_state, cp_list);
946 _free_cpntf_state_locked(nn, cps);
948 spin_unlock(&nn->s2s_cp_lock);
951 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
953 struct nfs4_stid *stid;
955 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
959 return openlockstateid(stid);
962 static void nfs4_free_deleg(struct nfs4_stid *stid)
964 kmem_cache_free(deleg_slab, stid);
965 atomic_long_dec(&num_delegations);
969 * When we recall a delegation, we should be careful not to hand it
970 * out again straight away.
971 * To ensure this we keep a pair of bloom filters ('new' and 'old')
972 * in which the filehandles of recalled delegations are "stored".
973 * If a filehandle appear in either filter, a delegation is blocked.
974 * When a delegation is recalled, the filehandle is stored in the "new"
976 * Every 30 seconds we swap the filters and clear the "new" one,
977 * unless both are empty of course.
979 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
980 * low 3 bytes as hash-table indices.
982 * 'blocked_delegations_lock', which is always taken in block_delegations(),
983 * is used to manage concurrent access. Testing does not need the lock
984 * except when swapping the two filters.
986 static DEFINE_SPINLOCK(blocked_delegations_lock);
987 static struct bloom_pair {
988 int entries, old_entries;
990 int new; /* index into 'set' */
991 DECLARE_BITMAP(set[2], 256);
992 } blocked_delegations;
994 static int delegation_blocked(struct knfsd_fh *fh)
997 struct bloom_pair *bd = &blocked_delegations;
999 if (bd->entries == 0)
1001 if (ktime_get_seconds() - bd->swap_time > 30) {
1002 spin_lock(&blocked_delegations_lock);
1003 if (ktime_get_seconds() - bd->swap_time > 30) {
1004 bd->entries -= bd->old_entries;
1005 bd->old_entries = bd->entries;
1006 memset(bd->set[bd->new], 0,
1007 sizeof(bd->set[0]));
1008 bd->new = 1-bd->new;
1009 bd->swap_time = ktime_get_seconds();
1011 spin_unlock(&blocked_delegations_lock);
1013 hash = jhash(&fh->fh_base, fh->fh_size, 0);
1014 if (test_bit(hash&255, bd->set[0]) &&
1015 test_bit((hash>>8)&255, bd->set[0]) &&
1016 test_bit((hash>>16)&255, bd->set[0]))
1019 if (test_bit(hash&255, bd->set[1]) &&
1020 test_bit((hash>>8)&255, bd->set[1]) &&
1021 test_bit((hash>>16)&255, bd->set[1]))
1027 static void block_delegations(struct knfsd_fh *fh)
1030 struct bloom_pair *bd = &blocked_delegations;
1032 hash = jhash(&fh->fh_base, fh->fh_size, 0);
1034 spin_lock(&blocked_delegations_lock);
1035 __set_bit(hash&255, bd->set[bd->new]);
1036 __set_bit((hash>>8)&255, bd->set[bd->new]);
1037 __set_bit((hash>>16)&255, bd->set[bd->new]);
1038 if (bd->entries == 0)
1039 bd->swap_time = ktime_get_seconds();
1041 spin_unlock(&blocked_delegations_lock);
1044 static struct nfs4_delegation *
1045 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1046 struct svc_fh *current_fh,
1047 struct nfs4_clnt_odstate *odstate)
1049 struct nfs4_delegation *dp;
1052 dprintk("NFSD alloc_init_deleg\n");
1053 n = atomic_long_inc_return(&num_delegations);
1054 if (n < 0 || n > max_delegations)
1056 if (delegation_blocked(¤t_fh->fh_handle))
1058 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1063 * delegation seqid's are never incremented. The 4.1 special
1064 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1065 * 0 anyway just for consistency and use 1:
1067 dp->dl_stid.sc_stateid.si_generation = 1;
1068 INIT_LIST_HEAD(&dp->dl_perfile);
1069 INIT_LIST_HEAD(&dp->dl_perclnt);
1070 INIT_LIST_HEAD(&dp->dl_recall_lru);
1071 dp->dl_clnt_odstate = odstate;
1072 get_clnt_odstate(odstate);
1073 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1075 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1076 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1078 dp->dl_stid.sc_file = fp;
1081 atomic_long_dec(&num_delegations);
1086 nfs4_put_stid(struct nfs4_stid *s)
1088 struct nfs4_file *fp = s->sc_file;
1089 struct nfs4_client *clp = s->sc_client;
1091 might_lock(&clp->cl_lock);
1093 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1094 wake_up_all(&close_wq);
1097 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1098 nfs4_free_cpntf_statelist(clp->net, s);
1099 spin_unlock(&clp->cl_lock);
1106 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1108 stateid_t *src = &stid->sc_stateid;
1110 spin_lock(&stid->sc_lock);
1111 if (unlikely(++src->si_generation == 0))
1112 src->si_generation = 1;
1113 memcpy(dst, src, sizeof(*dst));
1114 spin_unlock(&stid->sc_lock);
1117 static void put_deleg_file(struct nfs4_file *fp)
1119 struct nfsd_file *nf = NULL;
1121 spin_lock(&fp->fi_lock);
1122 if (--fp->fi_delegees == 0)
1123 swap(nf, fp->fi_deleg_file);
1124 spin_unlock(&fp->fi_lock);
1130 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1132 struct nfs4_file *fp = dp->dl_stid.sc_file;
1133 struct nfsd_file *nf = fp->fi_deleg_file;
1135 WARN_ON_ONCE(!fp->fi_delegees);
1137 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1141 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1143 put_clnt_odstate(dp->dl_clnt_odstate);
1144 nfs4_unlock_deleg_lease(dp);
1145 nfs4_put_stid(&dp->dl_stid);
1148 void nfs4_unhash_stid(struct nfs4_stid *s)
1154 * nfs4_delegation_exists - Discover if this delegation already exists
1155 * @clp: a pointer to the nfs4_client we're granting a delegation to
1156 * @fp: a pointer to the nfs4_file we're granting a delegation on
1159 * On success: true iff an existing delegation is found
1163 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1165 struct nfs4_delegation *searchdp = NULL;
1166 struct nfs4_client *searchclp = NULL;
1168 lockdep_assert_held(&state_lock);
1169 lockdep_assert_held(&fp->fi_lock);
1171 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1172 searchclp = searchdp->dl_stid.sc_client;
1173 if (clp == searchclp) {
1181 * hash_delegation_locked - Add a delegation to the appropriate lists
1182 * @dp: a pointer to the nfs4_delegation we are adding.
1183 * @fp: a pointer to the nfs4_file we're granting a delegation on
1186 * On success: NULL if the delegation was successfully hashed.
1188 * On error: -EAGAIN if one was previously granted to this
1189 * nfs4_client for this nfs4_file. Delegation is not hashed.
1194 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1196 struct nfs4_client *clp = dp->dl_stid.sc_client;
1198 lockdep_assert_held(&state_lock);
1199 lockdep_assert_held(&fp->fi_lock);
1201 if (nfs4_delegation_exists(clp, fp))
1203 refcount_inc(&dp->dl_stid.sc_count);
1204 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1205 list_add(&dp->dl_perfile, &fp->fi_delegations);
1206 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1211 unhash_delegation_locked(struct nfs4_delegation *dp)
1213 struct nfs4_file *fp = dp->dl_stid.sc_file;
1215 lockdep_assert_held(&state_lock);
1217 if (list_empty(&dp->dl_perfile))
1220 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1221 /* Ensure that deleg break won't try to requeue it */
1223 spin_lock(&fp->fi_lock);
1224 list_del_init(&dp->dl_perclnt);
1225 list_del_init(&dp->dl_recall_lru);
1226 list_del_init(&dp->dl_perfile);
1227 spin_unlock(&fp->fi_lock);
1231 static void destroy_delegation(struct nfs4_delegation *dp)
1235 spin_lock(&state_lock);
1236 unhashed = unhash_delegation_locked(dp);
1237 spin_unlock(&state_lock);
1239 destroy_unhashed_deleg(dp);
1242 static void revoke_delegation(struct nfs4_delegation *dp)
1244 struct nfs4_client *clp = dp->dl_stid.sc_client;
1246 WARN_ON(!list_empty(&dp->dl_recall_lru));
1248 if (clp->cl_minorversion) {
1249 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1250 refcount_inc(&dp->dl_stid.sc_count);
1251 spin_lock(&clp->cl_lock);
1252 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1253 spin_unlock(&clp->cl_lock);
1255 destroy_unhashed_deleg(dp);
1262 static unsigned int clientid_hashval(u32 id)
1264 return id & CLIENT_HASH_MASK;
1267 static unsigned int clientstr_hashval(struct xdr_netobj name)
1269 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1273 * A stateid that had a deny mode associated with it is being released
1274 * or downgraded. Recalculate the deny mode on the file.
1277 recalculate_deny_mode(struct nfs4_file *fp)
1279 struct nfs4_ol_stateid *stp;
1281 spin_lock(&fp->fi_lock);
1282 fp->fi_share_deny = 0;
1283 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1284 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1285 spin_unlock(&fp->fi_lock);
1289 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1292 bool change = false;
1294 for (i = 1; i < 4; i++) {
1295 if ((i & deny) != i) {
1301 /* Recalculate per-file deny mode if there was a change */
1303 recalculate_deny_mode(stp->st_stid.sc_file);
1306 /* release all access and file references for a given stateid */
1308 release_all_access(struct nfs4_ol_stateid *stp)
1311 struct nfs4_file *fp = stp->st_stid.sc_file;
1313 if (fp && stp->st_deny_bmap != 0)
1314 recalculate_deny_mode(fp);
1316 for (i = 1; i < 4; i++) {
1317 if (test_access(i, stp))
1318 nfs4_file_put_access(stp->st_stid.sc_file, i);
1319 clear_access(i, stp);
1323 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1325 kfree(sop->so_owner.data);
1326 sop->so_ops->so_free(sop);
1329 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1331 struct nfs4_client *clp = sop->so_client;
1333 might_lock(&clp->cl_lock);
1335 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1337 sop->so_ops->so_unhash(sop);
1338 spin_unlock(&clp->cl_lock);
1339 nfs4_free_stateowner(sop);
1343 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1345 return list_empty(&stp->st_perfile);
1348 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1350 struct nfs4_file *fp = stp->st_stid.sc_file;
1352 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1354 if (list_empty(&stp->st_perfile))
1357 spin_lock(&fp->fi_lock);
1358 list_del_init(&stp->st_perfile);
1359 spin_unlock(&fp->fi_lock);
1360 list_del(&stp->st_perstateowner);
1364 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1366 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1368 put_clnt_odstate(stp->st_clnt_odstate);
1369 release_all_access(stp);
1370 if (stp->st_stateowner)
1371 nfs4_put_stateowner(stp->st_stateowner);
1372 kmem_cache_free(stateid_slab, stid);
1375 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1377 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1378 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1379 struct nfsd_file *nf;
1381 nf = find_any_file(stp->st_stid.sc_file);
1383 get_file(nf->nf_file);
1384 filp_close(nf->nf_file, (fl_owner_t)lo);
1387 nfs4_free_ol_stateid(stid);
1391 * Put the persistent reference to an already unhashed generic stateid, while
1392 * holding the cl_lock. If it's the last reference, then put it onto the
1393 * reaplist for later destruction.
1395 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1396 struct list_head *reaplist)
1398 struct nfs4_stid *s = &stp->st_stid;
1399 struct nfs4_client *clp = s->sc_client;
1401 lockdep_assert_held(&clp->cl_lock);
1403 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1405 if (!refcount_dec_and_test(&s->sc_count)) {
1406 wake_up_all(&close_wq);
1410 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1411 list_add(&stp->st_locks, reaplist);
1414 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1416 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1418 if (!unhash_ol_stateid(stp))
1420 list_del_init(&stp->st_locks);
1421 nfs4_unhash_stid(&stp->st_stid);
1425 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1427 struct nfs4_client *clp = stp->st_stid.sc_client;
1430 spin_lock(&clp->cl_lock);
1431 unhashed = unhash_lock_stateid(stp);
1432 spin_unlock(&clp->cl_lock);
1434 nfs4_put_stid(&stp->st_stid);
1437 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1439 struct nfs4_client *clp = lo->lo_owner.so_client;
1441 lockdep_assert_held(&clp->cl_lock);
1443 list_del_init(&lo->lo_owner.so_strhash);
1447 * Free a list of generic stateids that were collected earlier after being
1451 free_ol_stateid_reaplist(struct list_head *reaplist)
1453 struct nfs4_ol_stateid *stp;
1454 struct nfs4_file *fp;
1458 while (!list_empty(reaplist)) {
1459 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1461 list_del(&stp->st_locks);
1462 fp = stp->st_stid.sc_file;
1463 stp->st_stid.sc_free(&stp->st_stid);
1469 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1470 struct list_head *reaplist)
1472 struct nfs4_ol_stateid *stp;
1474 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1476 while (!list_empty(&open_stp->st_locks)) {
1477 stp = list_entry(open_stp->st_locks.next,
1478 struct nfs4_ol_stateid, st_locks);
1479 WARN_ON(!unhash_lock_stateid(stp));
1480 put_ol_stateid_locked(stp, reaplist);
1484 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1485 struct list_head *reaplist)
1487 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1489 if (!unhash_ol_stateid(stp))
1491 release_open_stateid_locks(stp, reaplist);
1495 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1497 LIST_HEAD(reaplist);
1499 spin_lock(&stp->st_stid.sc_client->cl_lock);
1500 if (unhash_open_stateid(stp, &reaplist))
1501 put_ol_stateid_locked(stp, &reaplist);
1502 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1503 free_ol_stateid_reaplist(&reaplist);
1506 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1508 struct nfs4_client *clp = oo->oo_owner.so_client;
1510 lockdep_assert_held(&clp->cl_lock);
1512 list_del_init(&oo->oo_owner.so_strhash);
1513 list_del_init(&oo->oo_perclient);
1516 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1518 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1520 struct nfs4_ol_stateid *s;
1522 spin_lock(&nn->client_lock);
1523 s = oo->oo_last_closed_stid;
1525 list_del_init(&oo->oo_close_lru);
1526 oo->oo_last_closed_stid = NULL;
1528 spin_unlock(&nn->client_lock);
1530 nfs4_put_stid(&s->st_stid);
1533 static void release_openowner(struct nfs4_openowner *oo)
1535 struct nfs4_ol_stateid *stp;
1536 struct nfs4_client *clp = oo->oo_owner.so_client;
1537 struct list_head reaplist;
1539 INIT_LIST_HEAD(&reaplist);
1541 spin_lock(&clp->cl_lock);
1542 unhash_openowner_locked(oo);
1543 while (!list_empty(&oo->oo_owner.so_stateids)) {
1544 stp = list_first_entry(&oo->oo_owner.so_stateids,
1545 struct nfs4_ol_stateid, st_perstateowner);
1546 if (unhash_open_stateid(stp, &reaplist))
1547 put_ol_stateid_locked(stp, &reaplist);
1549 spin_unlock(&clp->cl_lock);
1550 free_ol_stateid_reaplist(&reaplist);
1551 release_last_closed_stateid(oo);
1552 nfs4_put_stateowner(&oo->oo_owner);
1556 hash_sessionid(struct nfs4_sessionid *sessionid)
1558 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1560 return sid->sequence % SESSION_HASH_SIZE;
1563 #ifdef CONFIG_SUNRPC_DEBUG
1565 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1567 u32 *ptr = (u32 *)(&sessionid->data[0]);
1568 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1572 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1578 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1579 * won't be used for replay.
1581 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1583 struct nfs4_stateowner *so = cstate->replay_owner;
1585 if (nfserr == nfserr_replay_me)
1588 if (!seqid_mutating_err(ntohl(nfserr))) {
1589 nfsd4_cstate_clear_replay(cstate);
1594 if (so->so_is_open_owner)
1595 release_last_closed_stateid(openowner(so));
1601 gen_sessionid(struct nfsd4_session *ses)
1603 struct nfs4_client *clp = ses->se_client;
1604 struct nfsd4_sessionid *sid;
1606 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1607 sid->clientid = clp->cl_clientid;
1608 sid->sequence = current_sessionid++;
1613 * The protocol defines ca_maxresponssize_cached to include the size of
1614 * the rpc header, but all we need to cache is the data starting after
1615 * the end of the initial SEQUENCE operation--the rest we regenerate
1616 * each time. Therefore we can advertise a ca_maxresponssize_cached
1617 * value that is the number of bytes in our cache plus a few additional
1618 * bytes. In order to stay on the safe side, and not promise more than
1619 * we can cache, those additional bytes must be the minimum possible: 24
1620 * bytes of rpc header (xid through accept state, with AUTH_NULL
1621 * verifier), 12 for the compound header (with zero-length tag), and 44
1622 * for the SEQUENCE op response:
1624 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1627 free_session_slots(struct nfsd4_session *ses)
1631 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1632 free_svc_cred(&ses->se_slots[i]->sl_cred);
1633 kfree(ses->se_slots[i]);
1638 * We don't actually need to cache the rpc and session headers, so we
1639 * can allocate a little less for each slot:
1641 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1645 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1648 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1649 return size + sizeof(struct nfsd4_slot);
1653 * XXX: If we run out of reserved DRC memory we could (up to a point)
1654 * re-negotiate active sessions and reduce their slot usage to make
1655 * room for new connections. For now we just fail the create session.
1657 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1659 u32 slotsize = slot_bytes(ca);
1660 u32 num = ca->maxreqs;
1661 unsigned long avail, total_avail;
1662 unsigned int scale_factor;
1664 spin_lock(&nfsd_drc_lock);
1665 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1666 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1668 /* We have handed out more space than we chose in
1669 * set_max_drc() to allow. That isn't really a
1670 * problem as long as that doesn't make us think we
1671 * have lots more due to integer overflow.
1674 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1676 * Never use more than a fraction of the remaining memory,
1677 * unless it's the only way to give this client a slot.
1678 * The chosen fraction is either 1/8 or 1/number of threads,
1679 * whichever is smaller. This ensures there are adequate
1680 * slots to support multiple clients per thread.
1681 * Give the client one slot even if that would require
1682 * over-allocation--it is better than failure.
1684 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1686 avail = clamp_t(unsigned long, avail, slotsize,
1687 total_avail/scale_factor);
1688 num = min_t(int, num, avail / slotsize);
1689 num = max_t(int, num, 1);
1690 nfsd_drc_mem_used += num * slotsize;
1691 spin_unlock(&nfsd_drc_lock);
1696 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1698 int slotsize = slot_bytes(ca);
1700 spin_lock(&nfsd_drc_lock);
1701 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1702 spin_unlock(&nfsd_drc_lock);
1705 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1706 struct nfsd4_channel_attrs *battrs)
1708 int numslots = fattrs->maxreqs;
1709 int slotsize = slot_bytes(fattrs);
1710 struct nfsd4_session *new;
1713 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1714 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1715 mem = numslots * sizeof(struct nfsd4_slot *);
1717 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1720 /* allocate each struct nfsd4_slot and data cache in one piece */
1721 for (i = 0; i < numslots; i++) {
1722 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1723 if (!new->se_slots[i])
1727 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1728 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1733 kfree(new->se_slots[i]);
1738 static void free_conn(struct nfsd4_conn *c)
1740 svc_xprt_put(c->cn_xprt);
1744 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1746 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1747 struct nfs4_client *clp = c->cn_session->se_client;
1749 trace_nfsd_cb_lost(clp);
1751 spin_lock(&clp->cl_lock);
1752 if (!list_empty(&c->cn_persession)) {
1753 list_del(&c->cn_persession);
1756 nfsd4_probe_callback(clp);
1757 spin_unlock(&clp->cl_lock);
1760 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1762 struct nfsd4_conn *conn;
1764 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1767 svc_xprt_get(rqstp->rq_xprt);
1768 conn->cn_xprt = rqstp->rq_xprt;
1769 conn->cn_flags = flags;
1770 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1774 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1776 conn->cn_session = ses;
1777 list_add(&conn->cn_persession, &ses->se_conns);
1780 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1782 struct nfs4_client *clp = ses->se_client;
1784 spin_lock(&clp->cl_lock);
1785 __nfsd4_hash_conn(conn, ses);
1786 spin_unlock(&clp->cl_lock);
1789 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1791 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1792 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1795 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1799 nfsd4_hash_conn(conn, ses);
1800 ret = nfsd4_register_conn(conn);
1802 /* oops; xprt is already down: */
1803 nfsd4_conn_lost(&conn->cn_xpt_user);
1804 /* We may have gained or lost a callback channel: */
1805 nfsd4_probe_callback_sync(ses->se_client);
1808 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1810 u32 dir = NFS4_CDFC4_FORE;
1812 if (cses->flags & SESSION4_BACK_CHAN)
1813 dir |= NFS4_CDFC4_BACK;
1814 return alloc_conn(rqstp, dir);
1817 /* must be called under client_lock */
1818 static void nfsd4_del_conns(struct nfsd4_session *s)
1820 struct nfs4_client *clp = s->se_client;
1821 struct nfsd4_conn *c;
1823 spin_lock(&clp->cl_lock);
1824 while (!list_empty(&s->se_conns)) {
1825 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1826 list_del_init(&c->cn_persession);
1827 spin_unlock(&clp->cl_lock);
1829 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1832 spin_lock(&clp->cl_lock);
1834 spin_unlock(&clp->cl_lock);
1837 static void __free_session(struct nfsd4_session *ses)
1839 free_session_slots(ses);
1843 static void free_session(struct nfsd4_session *ses)
1845 nfsd4_del_conns(ses);
1846 nfsd4_put_drc_mem(&ses->se_fchannel);
1847 __free_session(ses);
1850 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1853 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1855 new->se_client = clp;
1858 INIT_LIST_HEAD(&new->se_conns);
1860 new->se_cb_seq_nr = 1;
1861 new->se_flags = cses->flags;
1862 new->se_cb_prog = cses->callback_prog;
1863 new->se_cb_sec = cses->cb_sec;
1864 atomic_set(&new->se_ref, 0);
1865 idx = hash_sessionid(&new->se_sessionid);
1866 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1867 spin_lock(&clp->cl_lock);
1868 list_add(&new->se_perclnt, &clp->cl_sessions);
1869 spin_unlock(&clp->cl_lock);
1872 struct sockaddr *sa = svc_addr(rqstp);
1874 * This is a little silly; with sessions there's no real
1875 * use for the callback address. Use the peer address
1876 * as a reasonable default for now, but consider fixing
1877 * the rpc client not to require an address in the
1880 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1881 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1885 /* caller must hold client_lock */
1886 static struct nfsd4_session *
1887 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1889 struct nfsd4_session *elem;
1891 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1893 lockdep_assert_held(&nn->client_lock);
1895 dump_sessionid(__func__, sessionid);
1896 idx = hash_sessionid(sessionid);
1897 /* Search in the appropriate list */
1898 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1899 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1900 NFS4_MAX_SESSIONID_LEN)) {
1905 dprintk("%s: session not found\n", __func__);
1909 static struct nfsd4_session *
1910 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1913 struct nfsd4_session *session;
1914 __be32 status = nfserr_badsession;
1916 session = __find_in_sessionid_hashtbl(sessionid, net);
1919 status = nfsd4_get_session_locked(session);
1927 /* caller must hold client_lock */
1929 unhash_session(struct nfsd4_session *ses)
1931 struct nfs4_client *clp = ses->se_client;
1932 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1934 lockdep_assert_held(&nn->client_lock);
1936 list_del(&ses->se_hash);
1937 spin_lock(&ses->se_client->cl_lock);
1938 list_del(&ses->se_perclnt);
1939 spin_unlock(&ses->se_client->cl_lock);
1942 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1944 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1947 * We're assuming the clid was not given out from a boot
1948 * precisely 2^32 (about 136 years) before this one. That seems
1949 * a safe assumption:
1951 if (clid->cl_boot == (u32)nn->boot_time)
1953 trace_nfsd_clid_stale(clid);
1958 * XXX Should we use a slab cache ?
1959 * This type of memory management is somewhat inefficient, but we use it
1960 * anyway since SETCLIENTID is not a common operation.
1962 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1964 struct nfs4_client *clp;
1967 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1970 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1971 if (clp->cl_name.data == NULL)
1973 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1974 sizeof(struct list_head),
1976 if (!clp->cl_ownerstr_hashtbl)
1977 goto err_no_hashtbl;
1978 for (i = 0; i < OWNER_HASH_SIZE; i++)
1979 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1980 INIT_LIST_HEAD(&clp->cl_sessions);
1981 idr_init(&clp->cl_stateids);
1982 atomic_set(&clp->cl_rpc_users, 0);
1983 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1984 INIT_LIST_HEAD(&clp->cl_idhash);
1985 INIT_LIST_HEAD(&clp->cl_openowners);
1986 INIT_LIST_HEAD(&clp->cl_delegations);
1987 INIT_LIST_HEAD(&clp->cl_lru);
1988 INIT_LIST_HEAD(&clp->cl_revoked);
1989 #ifdef CONFIG_NFSD_PNFS
1990 INIT_LIST_HEAD(&clp->cl_lo_states);
1992 INIT_LIST_HEAD(&clp->async_copies);
1993 spin_lock_init(&clp->async_lock);
1994 spin_lock_init(&clp->cl_lock);
1995 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1998 kfree(clp->cl_name.data);
2000 kmem_cache_free(client_slab, clp);
2004 static void __free_client(struct kref *k)
2006 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2007 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2009 free_svc_cred(&clp->cl_cred);
2010 kfree(clp->cl_ownerstr_hashtbl);
2011 kfree(clp->cl_name.data);
2012 kfree(clp->cl_nii_domain.data);
2013 kfree(clp->cl_nii_name.data);
2014 idr_destroy(&clp->cl_stateids);
2015 kmem_cache_free(client_slab, clp);
2018 static void drop_client(struct nfs4_client *clp)
2020 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2024 free_client(struct nfs4_client *clp)
2026 while (!list_empty(&clp->cl_sessions)) {
2027 struct nfsd4_session *ses;
2028 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2030 list_del(&ses->se_perclnt);
2031 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2034 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2035 if (clp->cl_nfsd_dentry) {
2036 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2037 clp->cl_nfsd_dentry = NULL;
2038 wake_up_all(&expiry_wq);
2043 /* must be called under the client_lock */
2045 unhash_client_locked(struct nfs4_client *clp)
2047 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2048 struct nfsd4_session *ses;
2050 lockdep_assert_held(&nn->client_lock);
2052 /* Mark the client as expired! */
2054 /* Make it invisible */
2055 if (!list_empty(&clp->cl_idhash)) {
2056 list_del_init(&clp->cl_idhash);
2057 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2058 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2060 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2062 list_del_init(&clp->cl_lru);
2063 spin_lock(&clp->cl_lock);
2064 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2065 list_del_init(&ses->se_hash);
2066 spin_unlock(&clp->cl_lock);
2070 unhash_client(struct nfs4_client *clp)
2072 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2074 spin_lock(&nn->client_lock);
2075 unhash_client_locked(clp);
2076 spin_unlock(&nn->client_lock);
2079 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2081 if (atomic_read(&clp->cl_rpc_users))
2082 return nfserr_jukebox;
2083 unhash_client_locked(clp);
2088 __destroy_client(struct nfs4_client *clp)
2091 struct nfs4_openowner *oo;
2092 struct nfs4_delegation *dp;
2093 struct list_head reaplist;
2095 INIT_LIST_HEAD(&reaplist);
2096 spin_lock(&state_lock);
2097 while (!list_empty(&clp->cl_delegations)) {
2098 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2099 WARN_ON(!unhash_delegation_locked(dp));
2100 list_add(&dp->dl_recall_lru, &reaplist);
2102 spin_unlock(&state_lock);
2103 while (!list_empty(&reaplist)) {
2104 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2105 list_del_init(&dp->dl_recall_lru);
2106 destroy_unhashed_deleg(dp);
2108 while (!list_empty(&clp->cl_revoked)) {
2109 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2110 list_del_init(&dp->dl_recall_lru);
2111 nfs4_put_stid(&dp->dl_stid);
2113 while (!list_empty(&clp->cl_openowners)) {
2114 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2115 nfs4_get_stateowner(&oo->oo_owner);
2116 release_openowner(oo);
2118 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2119 struct nfs4_stateowner *so, *tmp;
2121 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2123 /* Should be no openowners at this point */
2124 WARN_ON_ONCE(so->so_is_open_owner);
2125 remove_blocked_locks(lockowner(so));
2128 nfsd4_return_all_client_layouts(clp);
2129 nfsd4_shutdown_copy(clp);
2130 nfsd4_shutdown_callback(clp);
2131 if (clp->cl_cb_conn.cb_xprt)
2132 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2134 wake_up_all(&expiry_wq);
2138 destroy_client(struct nfs4_client *clp)
2141 __destroy_client(clp);
2144 static void inc_reclaim_complete(struct nfs4_client *clp)
2146 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2148 if (!nn->track_reclaim_completes)
2150 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2152 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2153 nn->reclaim_str_hashtbl_size) {
2154 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2156 nfsd4_end_grace(nn);
2160 static void expire_client(struct nfs4_client *clp)
2163 nfsd4_client_record_remove(clp);
2164 __destroy_client(clp);
2167 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2169 memcpy(target->cl_verifier.data, source->data,
2170 sizeof(target->cl_verifier.data));
2173 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2175 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2176 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2179 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2181 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2182 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2184 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2185 if ((source->cr_principal && !target->cr_principal) ||
2186 (source->cr_raw_principal && !target->cr_raw_principal) ||
2187 (source->cr_targ_princ && !target->cr_targ_princ))
2190 target->cr_flavor = source->cr_flavor;
2191 target->cr_uid = source->cr_uid;
2192 target->cr_gid = source->cr_gid;
2193 target->cr_group_info = source->cr_group_info;
2194 get_group_info(target->cr_group_info);
2195 target->cr_gss_mech = source->cr_gss_mech;
2196 if (source->cr_gss_mech)
2197 gss_mech_get(source->cr_gss_mech);
2202 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2204 if (o1->len < o2->len)
2206 if (o1->len > o2->len)
2208 return memcmp(o1->data, o2->data, o1->len);
2212 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2214 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2218 same_clid(clientid_t *cl1, clientid_t *cl2)
2220 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2223 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2227 if (g1->ngroups != g2->ngroups)
2229 for (i=0; i<g1->ngroups; i++)
2230 if (!gid_eq(g1->gid[i], g2->gid[i]))
2236 * RFC 3530 language requires clid_inuse be returned when the
2237 * "principal" associated with a requests differs from that previously
2238 * used. We use uid, gid's, and gss principal string as our best
2239 * approximation. We also don't want to allow non-gss use of a client
2240 * established using gss: in theory cr_principal should catch that
2241 * change, but in practice cr_principal can be null even in the gss case
2242 * since gssd doesn't always pass down a principal string.
2244 static bool is_gss_cred(struct svc_cred *cr)
2246 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2247 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2252 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2254 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2255 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2256 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2257 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2259 /* XXX: check that cr_targ_princ fields match ? */
2260 if (cr1->cr_principal == cr2->cr_principal)
2262 if (!cr1->cr_principal || !cr2->cr_principal)
2264 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2267 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2269 struct svc_cred *cr = &rqstp->rq_cred;
2272 if (!cr->cr_gss_mech)
2274 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2275 return service == RPC_GSS_SVC_INTEGRITY ||
2276 service == RPC_GSS_SVC_PRIVACY;
2279 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2281 struct svc_cred *cr = &rqstp->rq_cred;
2283 if (!cl->cl_mach_cred)
2285 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2287 if (!svc_rqst_integrity_protected(rqstp))
2289 if (cl->cl_cred.cr_raw_principal)
2290 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2291 cr->cr_raw_principal);
2292 if (!cr->cr_principal)
2294 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2297 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2302 * This is opaque to client, so no need to byte-swap. Use
2303 * __force to keep sparse happy
2305 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2306 verf[1] = (__force __be32)nn->clverifier_counter++;
2307 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2310 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2312 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2313 clp->cl_clientid.cl_id = nn->clientid_counter++;
2314 gen_confirm(clp, nn);
2317 static struct nfs4_stid *
2318 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2320 struct nfs4_stid *ret;
2322 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2323 if (!ret || !ret->sc_type)
2328 static struct nfs4_stid *
2329 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2331 struct nfs4_stid *s;
2333 spin_lock(&cl->cl_lock);
2334 s = find_stateid_locked(cl, t);
2336 if (typemask & s->sc_type)
2337 refcount_inc(&s->sc_count);
2341 spin_unlock(&cl->cl_lock);
2345 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2347 struct nfsdfs_client *nc;
2348 nc = get_nfsdfs_client(inode);
2351 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2354 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2356 seq_printf(m, "\"");
2357 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2358 seq_printf(m, "\"");
2361 static const char *cb_state2str(int state)
2366 case NFSD4_CB_UNKNOWN:
2370 case NFSD4_CB_FAULT:
2376 static int client_info_show(struct seq_file *m, void *v)
2378 struct inode *inode = m->private;
2379 struct nfs4_client *clp;
2382 clp = get_nfsdfs_clp(inode);
2385 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2386 seq_printf(m, "clientid: 0x%llx\n", clid);
2387 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2388 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2389 seq_puts(m, "status: confirmed\n");
2391 seq_puts(m, "status: unconfirmed\n");
2392 seq_printf(m, "name: ");
2393 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2394 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2395 if (clp->cl_nii_domain.data) {
2396 seq_printf(m, "Implementation domain: ");
2397 seq_quote_mem(m, clp->cl_nii_domain.data,
2398 clp->cl_nii_domain.len);
2399 seq_printf(m, "\nImplementation name: ");
2400 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2401 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2402 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2404 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2405 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2411 static int client_info_open(struct inode *inode, struct file *file)
2413 return single_open(file, client_info_show, inode);
2416 static const struct file_operations client_info_fops = {
2417 .open = client_info_open,
2419 .llseek = seq_lseek,
2420 .release = single_release,
2423 static void *states_start(struct seq_file *s, loff_t *pos)
2424 __acquires(&clp->cl_lock)
2426 struct nfs4_client *clp = s->private;
2427 unsigned long id = *pos;
2430 spin_lock(&clp->cl_lock);
2431 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2436 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2438 struct nfs4_client *clp = s->private;
2439 unsigned long id = *pos;
2444 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2449 static void states_stop(struct seq_file *s, void *v)
2450 __releases(&clp->cl_lock)
2452 struct nfs4_client *clp = s->private;
2454 spin_unlock(&clp->cl_lock);
2457 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2459 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2462 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2464 struct inode *inode = f->nf_inode;
2466 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2467 MAJOR(inode->i_sb->s_dev),
2468 MINOR(inode->i_sb->s_dev),
2472 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2474 seq_printf(s, "owner: ");
2475 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2478 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2480 seq_printf(s, "0x%.8x", stid->si_generation);
2481 seq_printf(s, "%12phN", &stid->si_opaque);
2484 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2486 struct nfs4_ol_stateid *ols;
2487 struct nfs4_file *nf;
2488 struct nfsd_file *file;
2489 struct nfs4_stateowner *oo;
2490 unsigned int access, deny;
2492 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2493 return 0; /* XXX: or SEQ_SKIP? */
2494 ols = openlockstateid(st);
2495 oo = ols->st_stateowner;
2497 file = find_any_file(nf);
2501 seq_printf(s, "- ");
2502 nfs4_show_stateid(s, &st->sc_stateid);
2503 seq_printf(s, ": { type: open, ");
2505 access = bmap_to_share_mode(ols->st_access_bmap);
2506 deny = bmap_to_share_mode(ols->st_deny_bmap);
2508 seq_printf(s, "access: %s%s, ",
2509 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2510 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2511 seq_printf(s, "deny: %s%s, ",
2512 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2513 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2515 nfs4_show_superblock(s, file);
2516 seq_printf(s, ", ");
2517 nfs4_show_fname(s, file);
2518 seq_printf(s, ", ");
2519 nfs4_show_owner(s, oo);
2520 seq_printf(s, " }\n");
2521 nfsd_file_put(file);
2526 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2528 struct nfs4_ol_stateid *ols;
2529 struct nfs4_file *nf;
2530 struct nfsd_file *file;
2531 struct nfs4_stateowner *oo;
2533 ols = openlockstateid(st);
2534 oo = ols->st_stateowner;
2536 file = find_any_file(nf);
2540 seq_printf(s, "- ");
2541 nfs4_show_stateid(s, &st->sc_stateid);
2542 seq_printf(s, ": { type: lock, ");
2545 * Note: a lock stateid isn't really the same thing as a lock,
2546 * it's the locking state held by one owner on a file, and there
2547 * may be multiple (or no) lock ranges associated with it.
2548 * (Same for the matter is true of open stateids.)
2551 nfs4_show_superblock(s, file);
2552 /* XXX: open stateid? */
2553 seq_printf(s, ", ");
2554 nfs4_show_fname(s, file);
2555 seq_printf(s, ", ");
2556 nfs4_show_owner(s, oo);
2557 seq_printf(s, " }\n");
2558 nfsd_file_put(file);
2563 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2565 struct nfs4_delegation *ds;
2566 struct nfs4_file *nf;
2567 struct nfsd_file *file;
2569 ds = delegstateid(st);
2571 file = find_deleg_file(nf);
2575 seq_printf(s, "- ");
2576 nfs4_show_stateid(s, &st->sc_stateid);
2577 seq_printf(s, ": { type: deleg, ");
2579 /* Kinda dead code as long as we only support read delegs: */
2580 seq_printf(s, "access: %s, ",
2581 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2583 /* XXX: lease time, whether it's being recalled. */
2585 nfs4_show_superblock(s, file);
2586 seq_printf(s, ", ");
2587 nfs4_show_fname(s, file);
2588 seq_printf(s, " }\n");
2589 nfsd_file_put(file);
2594 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2596 struct nfs4_layout_stateid *ls;
2597 struct nfsd_file *file;
2599 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2602 seq_printf(s, "- ");
2603 nfs4_show_stateid(s, &st->sc_stateid);
2604 seq_printf(s, ": { type: layout, ");
2606 /* XXX: What else would be useful? */
2608 nfs4_show_superblock(s, file);
2609 seq_printf(s, ", ");
2610 nfs4_show_fname(s, file);
2611 seq_printf(s, " }\n");
2616 static int states_show(struct seq_file *s, void *v)
2618 struct nfs4_stid *st = v;
2620 switch (st->sc_type) {
2621 case NFS4_OPEN_STID:
2622 return nfs4_show_open(s, st);
2623 case NFS4_LOCK_STID:
2624 return nfs4_show_lock(s, st);
2625 case NFS4_DELEG_STID:
2626 return nfs4_show_deleg(s, st);
2627 case NFS4_LAYOUT_STID:
2628 return nfs4_show_layout(s, st);
2630 return 0; /* XXX: or SEQ_SKIP? */
2632 /* XXX: copy stateids? */
2635 static struct seq_operations states_seq_ops = {
2636 .start = states_start,
2637 .next = states_next,
2638 .stop = states_stop,
2642 static int client_states_open(struct inode *inode, struct file *file)
2645 struct nfs4_client *clp;
2648 clp = get_nfsdfs_clp(inode);
2652 ret = seq_open(file, &states_seq_ops);
2655 s = file->private_data;
2660 static int client_opens_release(struct inode *inode, struct file *file)
2662 struct seq_file *m = file->private_data;
2663 struct nfs4_client *clp = m->private;
2665 /* XXX: alternatively, we could get/drop in seq start/stop */
2670 static const struct file_operations client_states_fops = {
2671 .open = client_states_open,
2673 .llseek = seq_lseek,
2674 .release = client_opens_release,
2678 * Normally we refuse to destroy clients that are in use, but here the
2679 * administrator is telling us to just do it. We also want to wait
2680 * so the caller has a guarantee that the client's locks are gone by
2681 * the time the write returns:
2683 static void force_expire_client(struct nfs4_client *clp)
2685 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2686 bool already_expired;
2688 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2690 spin_lock(&nn->client_lock);
2692 spin_unlock(&nn->client_lock);
2694 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2695 spin_lock(&nn->client_lock);
2696 already_expired = list_empty(&clp->cl_lru);
2697 if (!already_expired)
2698 unhash_client_locked(clp);
2699 spin_unlock(&nn->client_lock);
2701 if (!already_expired)
2704 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2707 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2708 size_t size, loff_t *pos)
2711 struct nfs4_client *clp;
2713 data = simple_transaction_get(file, buf, size);
2715 return PTR_ERR(data);
2716 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2718 clp = get_nfsdfs_clp(file_inode(file));
2721 force_expire_client(clp);
2726 static const struct file_operations client_ctl_fops = {
2727 .write = client_ctl_write,
2728 .release = simple_transaction_release,
2731 static const struct tree_descr client_files[] = {
2732 [0] = {"info", &client_info_fops, S_IRUSR},
2733 [1] = {"states", &client_states_fops, S_IRUSR},
2734 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2738 static struct nfs4_client *create_client(struct xdr_netobj name,
2739 struct svc_rqst *rqstp, nfs4_verifier *verf)
2741 struct nfs4_client *clp;
2742 struct sockaddr *sa = svc_addr(rqstp);
2744 struct net *net = SVC_NET(rqstp);
2745 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2746 struct dentry *dentries[ARRAY_SIZE(client_files)];
2748 clp = alloc_client(name);
2752 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2758 kref_init(&clp->cl_nfsdfs.cl_ref);
2759 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2760 clp->cl_time = ktime_get_boottime_seconds();
2761 clear_bit(0, &clp->cl_cb_slot_busy);
2762 copy_verf(clp, verf);
2763 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2764 clp->cl_cb_session = NULL;
2766 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2767 nn, &clp->cl_nfsdfs,
2768 clp->cl_clientid.cl_id - nn->clientid_base,
2769 client_files, dentries);
2770 clp->cl_nfsd_info_dentry = dentries[0];
2771 if (!clp->cl_nfsd_dentry) {
2779 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2781 struct rb_node **new = &(root->rb_node), *parent = NULL;
2782 struct nfs4_client *clp;
2785 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2788 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2789 new = &((*new)->rb_left);
2791 new = &((*new)->rb_right);
2794 rb_link_node(&new_clp->cl_namenode, parent, new);
2795 rb_insert_color(&new_clp->cl_namenode, root);
2798 static struct nfs4_client *
2799 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2802 struct rb_node *node = root->rb_node;
2803 struct nfs4_client *clp;
2806 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2807 cmp = compare_blob(&clp->cl_name, name);
2809 node = node->rb_left;
2811 node = node->rb_right;
2819 add_to_unconfirmed(struct nfs4_client *clp)
2821 unsigned int idhashval;
2822 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2824 lockdep_assert_held(&nn->client_lock);
2826 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2827 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2828 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2829 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2830 renew_client_locked(clp);
2834 move_to_confirmed(struct nfs4_client *clp)
2836 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2837 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2839 lockdep_assert_held(&nn->client_lock);
2841 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2842 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2843 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2844 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2845 trace_nfsd_clid_confirmed(&clp->cl_clientid);
2846 renew_client_locked(clp);
2849 static struct nfs4_client *
2850 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2852 struct nfs4_client *clp;
2853 unsigned int idhashval = clientid_hashval(clid->cl_id);
2855 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2856 if (same_clid(&clp->cl_clientid, clid)) {
2857 if ((bool)clp->cl_minorversion != sessions)
2859 renew_client_locked(clp);
2866 static struct nfs4_client *
2867 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2869 struct list_head *tbl = nn->conf_id_hashtbl;
2871 lockdep_assert_held(&nn->client_lock);
2872 return find_client_in_id_table(tbl, clid, sessions);
2875 static struct nfs4_client *
2876 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2878 struct list_head *tbl = nn->unconf_id_hashtbl;
2880 lockdep_assert_held(&nn->client_lock);
2881 return find_client_in_id_table(tbl, clid, sessions);
2884 static bool clp_used_exchangeid(struct nfs4_client *clp)
2886 return clp->cl_exchange_flags != 0;
2889 static struct nfs4_client *
2890 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2892 lockdep_assert_held(&nn->client_lock);
2893 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2896 static struct nfs4_client *
2897 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2899 lockdep_assert_held(&nn->client_lock);
2900 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2904 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2906 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2907 struct sockaddr *sa = svc_addr(rqstp);
2908 u32 scopeid = rpc_get_scope_id(sa);
2909 unsigned short expected_family;
2911 /* Currently, we only support tcp and tcp6 for the callback channel */
2912 if (se->se_callback_netid_len == 3 &&
2913 !memcmp(se->se_callback_netid_val, "tcp", 3))
2914 expected_family = AF_INET;
2915 else if (se->se_callback_netid_len == 4 &&
2916 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2917 expected_family = AF_INET6;
2921 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2922 se->se_callback_addr_len,
2923 (struct sockaddr *)&conn->cb_addr,
2924 sizeof(conn->cb_addr));
2926 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2929 if (conn->cb_addr.ss_family == AF_INET6)
2930 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2932 conn->cb_prog = se->se_callback_prog;
2933 conn->cb_ident = se->se_callback_ident;
2934 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2935 trace_nfsd_cb_args(clp, conn);
2938 conn->cb_addr.ss_family = AF_UNSPEC;
2939 conn->cb_addrlen = 0;
2940 trace_nfsd_cb_nodelegs(clp);
2945 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2948 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2950 struct xdr_buf *buf = resp->xdr->buf;
2951 struct nfsd4_slot *slot = resp->cstate.slot;
2954 dprintk("--> %s slot %p\n", __func__, slot);
2956 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2957 slot->sl_opcnt = resp->opcnt;
2958 slot->sl_status = resp->cstate.status;
2959 free_svc_cred(&slot->sl_cred);
2960 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2962 if (!nfsd4_cache_this(resp)) {
2963 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2966 slot->sl_flags |= NFSD4_SLOT_CACHED;
2968 base = resp->cstate.data_offset;
2969 slot->sl_datalen = buf->len - base;
2970 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2971 WARN(1, "%s: sessions DRC could not cache compound\n",
2977 * Encode the replay sequence operation from the slot values.
2978 * If cachethis is FALSE encode the uncached rep error on the next
2979 * operation which sets resp->p and increments resp->opcnt for
2980 * nfs4svc_encode_compoundres.
2984 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2985 struct nfsd4_compoundres *resp)
2987 struct nfsd4_op *op;
2988 struct nfsd4_slot *slot = resp->cstate.slot;
2990 /* Encode the replayed sequence operation */
2991 op = &args->ops[resp->opcnt - 1];
2992 nfsd4_encode_operation(resp, op);
2994 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2996 if (args->opcnt == 1) {
2998 * The original operation wasn't a solo sequence--we
2999 * always cache those--so this retry must not match the
3002 op->status = nfserr_seq_false_retry;
3004 op = &args->ops[resp->opcnt++];
3005 op->status = nfserr_retry_uncached_rep;
3006 nfsd4_encode_operation(resp, op);
3012 * The sequence operation is not cached because we can use the slot and
3016 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3017 struct nfsd4_sequence *seq)
3019 struct nfsd4_slot *slot = resp->cstate.slot;
3020 struct xdr_stream *xdr = resp->xdr;
3024 dprintk("--> %s slot %p\n", __func__, slot);
3026 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3030 p = xdr_reserve_space(xdr, slot->sl_datalen);
3033 return nfserr_serverfault;
3035 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3036 xdr_commit_encode(xdr);
3038 resp->opcnt = slot->sl_opcnt;
3039 return slot->sl_status;
3043 * Set the exchange_id flags returned by the server.
3046 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3048 #ifdef CONFIG_NFSD_PNFS
3049 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3051 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3054 /* Referrals are supported, Migration is not. */
3055 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3057 /* set the wire flags to return to client. */
3058 clid->flags = new->cl_exchange_flags;
3061 static bool client_has_openowners(struct nfs4_client *clp)
3063 struct nfs4_openowner *oo;
3065 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3066 if (!list_empty(&oo->oo_owner.so_stateids))
3072 static bool client_has_state(struct nfs4_client *clp)
3074 return client_has_openowners(clp)
3075 #ifdef CONFIG_NFSD_PNFS
3076 || !list_empty(&clp->cl_lo_states)
3078 || !list_empty(&clp->cl_delegations)
3079 || !list_empty(&clp->cl_sessions)
3080 || !list_empty(&clp->async_copies);
3083 static __be32 copy_impl_id(struct nfs4_client *clp,
3084 struct nfsd4_exchange_id *exid)
3086 if (!exid->nii_domain.data)
3088 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3089 if (!clp->cl_nii_domain.data)
3090 return nfserr_jukebox;
3091 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3092 if (!clp->cl_nii_name.data)
3093 return nfserr_jukebox;
3094 clp->cl_nii_time = exid->nii_time;
3099 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3100 union nfsd4_op_u *u)
3102 struct nfsd4_exchange_id *exid = &u->exchange_id;
3103 struct nfs4_client *conf, *new;
3104 struct nfs4_client *unconf = NULL;
3106 char addr_str[INET6_ADDRSTRLEN];
3107 nfs4_verifier verf = exid->verifier;
3108 struct sockaddr *sa = svc_addr(rqstp);
3109 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3110 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3112 rpc_ntop(sa, addr_str, sizeof(addr_str));
3113 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3114 "ip_addr=%s flags %x, spa_how %u\n",
3115 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3116 addr_str, exid->flags, exid->spa_how);
3118 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3119 return nfserr_inval;
3121 new = create_client(exid->clname, rqstp, &verf);
3123 return nfserr_jukebox;
3124 status = copy_impl_id(new, exid);
3128 switch (exid->spa_how) {
3130 exid->spo_must_enforce[0] = 0;
3131 exid->spo_must_enforce[1] = (
3132 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3133 1 << (OP_EXCHANGE_ID - 32) |
3134 1 << (OP_CREATE_SESSION - 32) |
3135 1 << (OP_DESTROY_SESSION - 32) |
3136 1 << (OP_DESTROY_CLIENTID - 32));
3138 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3139 1 << (OP_OPEN_DOWNGRADE) |
3141 1 << (OP_DELEGRETURN));
3143 exid->spo_must_allow[1] &= (
3144 1 << (OP_TEST_STATEID - 32) |
3145 1 << (OP_FREE_STATEID - 32));
3146 if (!svc_rqst_integrity_protected(rqstp)) {
3147 status = nfserr_inval;
3151 * Sometimes userspace doesn't give us a principal.
3152 * Which is a bug, really. Anyway, we can't enforce
3153 * MACH_CRED in that case, better to give up now:
3155 if (!new->cl_cred.cr_principal &&
3156 !new->cl_cred.cr_raw_principal) {
3157 status = nfserr_serverfault;
3160 new->cl_mach_cred = true;
3164 default: /* checked by xdr code */
3168 status = nfserr_encr_alg_unsupp;
3172 /* Cases below refer to rfc 5661 section 18.35.4: */
3173 spin_lock(&nn->client_lock);
3174 conf = find_confirmed_client_by_name(&exid->clname, nn);
3176 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3177 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3180 if (!clp_used_exchangeid(conf)) { /* buggy client */
3181 status = nfserr_inval;
3184 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3185 status = nfserr_wrong_cred;
3188 if (!creds_match) { /* case 9 */
3189 status = nfserr_perm;
3192 if (!verfs_match) { /* case 8 */
3193 status = nfserr_not_same;
3197 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3198 trace_nfsd_clid_confirmed_r(conf);
3201 if (!creds_match) { /* case 3 */
3202 if (client_has_state(conf)) {
3203 status = nfserr_clid_inuse;
3204 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3209 if (verfs_match) { /* case 2 */
3210 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3211 trace_nfsd_clid_confirmed_r(conf);
3214 /* case 5, client reboot */
3215 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3220 if (update) { /* case 7 */
3221 status = nfserr_noent;
3225 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3226 if (unconf) /* case 4, possible retry or client restart */
3227 unhash_client_locked(unconf);
3229 /* case 1, new owner ID */
3230 trace_nfsd_clid_fresh(new);
3234 status = mark_client_expired_locked(conf);
3237 trace_nfsd_clid_replaced(&conf->cl_clientid);
3239 new->cl_minorversion = cstate->minorversion;
3240 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3241 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3243 add_to_unconfirmed(new);
3246 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3247 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3249 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3250 nfsd4_set_ex_flags(conf, exid);
3252 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3253 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3257 spin_unlock(&nn->client_lock);
3262 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3263 expire_client(unconf);
3269 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3271 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3274 /* The slot is in use, and no response has been sent. */
3276 if (seqid == slot_seqid)
3277 return nfserr_jukebox;
3279 return nfserr_seq_misordered;
3281 /* Note unsigned 32-bit arithmetic handles wraparound: */
3282 if (likely(seqid == slot_seqid + 1))
3284 if (seqid == slot_seqid)
3285 return nfserr_replay_cache;
3286 return nfserr_seq_misordered;
3290 * Cache the create session result into the create session single DRC
3291 * slot cache by saving the xdr structure. sl_seqid has been set.
3292 * Do this for solo or embedded create session operations.
3295 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3296 struct nfsd4_clid_slot *slot, __be32 nfserr)
3298 slot->sl_status = nfserr;
3299 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3303 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3304 struct nfsd4_clid_slot *slot)
3306 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3307 return slot->sl_status;
3310 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3311 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3312 1 + /* MIN tag is length with zero, only length */ \
3313 3 + /* version, opcount, opcode */ \
3314 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3315 /* seqid, slotID, slotID, cache */ \
3316 4 ) * sizeof(__be32))
3318 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3319 2 + /* verifier: AUTH_NULL, length 0 */\
3321 1 + /* MIN tag is length with zero, only length */ \
3322 3 + /* opcount, opcode, opstatus*/ \
3323 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3324 /* seqid, slotID, slotID, slotID, status */ \
3325 5 ) * sizeof(__be32))
3327 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3329 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3331 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3332 return nfserr_toosmall;
3333 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3334 return nfserr_toosmall;
3335 ca->headerpadsz = 0;
3336 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3337 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3338 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3339 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3340 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3341 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3343 * Note decreasing slot size below client's request may make it
3344 * difficult for client to function correctly, whereas
3345 * decreasing the number of slots will (just?) affect
3346 * performance. When short on memory we therefore prefer to
3347 * decrease number of slots instead of their size. Clients that
3348 * request larger slots than they need will get poor results:
3349 * Note that we always allow at least one slot, because our
3350 * accounting is soft and provides no guarantees either way.
3352 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3358 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3359 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3361 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3362 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3364 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3365 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3367 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3368 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3369 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3370 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3373 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3375 ca->headerpadsz = 0;
3377 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3378 return nfserr_toosmall;
3379 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3380 return nfserr_toosmall;
3381 ca->maxresp_cached = 0;
3383 return nfserr_toosmall;
3388 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3390 switch (cbs->flavor) {
3396 * GSS case: the spec doesn't allow us to return this
3397 * error. But it also doesn't allow us not to support
3399 * I'd rather this fail hard than return some error the
3400 * client might think it can already handle:
3402 return nfserr_encr_alg_unsupp;
3407 nfsd4_create_session(struct svc_rqst *rqstp,
3408 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3410 struct nfsd4_create_session *cr_ses = &u->create_session;
3411 struct sockaddr *sa = svc_addr(rqstp);
3412 struct nfs4_client *conf, *unconf;
3413 struct nfs4_client *old = NULL;
3414 struct nfsd4_session *new;
3415 struct nfsd4_conn *conn;
3416 struct nfsd4_clid_slot *cs_slot = NULL;
3418 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3420 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3421 return nfserr_inval;
3422 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3425 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3428 status = check_backchannel_attrs(&cr_ses->back_channel);
3430 goto out_release_drc_mem;
3431 status = nfserr_jukebox;
3432 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3434 goto out_release_drc_mem;
3435 conn = alloc_conn_from_crses(rqstp, cr_ses);
3437 goto out_free_session;
3439 spin_lock(&nn->client_lock);
3440 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3441 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3442 WARN_ON_ONCE(conf && unconf);
3445 status = nfserr_wrong_cred;
3446 if (!nfsd4_mach_creds_match(conf, rqstp))
3448 cs_slot = &conf->cl_cs_slot;
3449 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3451 if (status == nfserr_replay_cache)
3452 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3455 } else if (unconf) {
3456 status = nfserr_clid_inuse;
3457 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3458 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3459 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3462 status = nfserr_wrong_cred;
3463 if (!nfsd4_mach_creds_match(unconf, rqstp))
3465 cs_slot = &unconf->cl_cs_slot;
3466 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3468 /* an unconfirmed replay returns misordered */
3469 status = nfserr_seq_misordered;
3472 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3474 status = mark_client_expired_locked(old);
3479 trace_nfsd_clid_replaced(&old->cl_clientid);
3481 move_to_confirmed(unconf);
3484 status = nfserr_stale_clientid;
3488 /* Persistent sessions are not supported */
3489 cr_ses->flags &= ~SESSION4_PERSIST;
3490 /* Upshifting from TCP to RDMA is not supported */
3491 cr_ses->flags &= ~SESSION4_RDMA;
3493 init_session(rqstp, new, conf, cr_ses);
3494 nfsd4_get_session_locked(new);
3496 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3497 NFS4_MAX_SESSIONID_LEN);
3498 cs_slot->sl_seqid++;
3499 cr_ses->seqid = cs_slot->sl_seqid;
3501 /* cache solo and embedded create sessions under the client_lock */
3502 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3503 spin_unlock(&nn->client_lock);
3505 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3506 /* init connection and backchannel */
3507 nfsd4_init_conn(rqstp, conn, new);
3508 nfsd4_put_session(new);
3513 spin_unlock(&nn->client_lock);
3518 __free_session(new);
3519 out_release_drc_mem:
3520 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3524 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3527 case NFS4_CDFC4_FORE:
3528 case NFS4_CDFC4_BACK:
3530 case NFS4_CDFC4_FORE_OR_BOTH:
3531 case NFS4_CDFC4_BACK_OR_BOTH:
3532 *dir = NFS4_CDFC4_BOTH;
3535 return nfserr_inval;
3538 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3539 struct nfsd4_compound_state *cstate,
3540 union nfsd4_op_u *u)
3542 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3543 struct nfsd4_session *session = cstate->session;
3544 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3547 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3550 spin_lock(&nn->client_lock);
3551 session->se_cb_prog = bc->bc_cb_program;
3552 session->se_cb_sec = bc->bc_cb_sec;
3553 spin_unlock(&nn->client_lock);
3555 nfsd4_probe_callback(session->se_client);
3560 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3562 struct nfsd4_conn *c;
3564 list_for_each_entry(c, &s->se_conns, cn_persession) {
3565 if (c->cn_xprt == xpt) {
3572 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3573 struct nfsd4_session *session, u32 req)
3575 struct nfs4_client *clp = session->se_client;
3576 struct svc_xprt *xpt = rqst->rq_xprt;
3577 struct nfsd4_conn *c;
3580 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3581 spin_lock(&clp->cl_lock);
3582 c = __nfsd4_find_conn(xpt, session);
3584 status = nfserr_noent;
3585 else if (req == c->cn_flags)
3587 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3588 c->cn_flags != NFS4_CDFC4_BACK)
3590 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3591 c->cn_flags != NFS4_CDFC4_FORE)
3594 status = nfserr_inval;
3595 spin_unlock(&clp->cl_lock);
3599 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3600 struct nfsd4_compound_state *cstate,
3601 union nfsd4_op_u *u)
3603 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3605 struct nfsd4_conn *conn;
3606 struct nfsd4_session *session;
3607 struct net *net = SVC_NET(rqstp);
3608 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3610 if (!nfsd4_last_compound_op(rqstp))
3611 return nfserr_not_only_op;
3612 spin_lock(&nn->client_lock);
3613 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3614 spin_unlock(&nn->client_lock);
3616 goto out_no_session;
3617 status = nfserr_wrong_cred;
3618 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3620 status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
3621 if (status == nfs_ok || status == nfserr_inval)
3623 status = nfsd4_map_bcts_dir(&bcts->dir);
3626 conn = alloc_conn(rqstp, bcts->dir);
3627 status = nfserr_jukebox;
3630 nfsd4_init_conn(rqstp, conn, session);
3633 nfsd4_put_session(session);
3638 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3640 if (!cstate->session)
3642 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3646 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3647 union nfsd4_op_u *u)
3649 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3650 struct nfsd4_session *ses;
3652 int ref_held_by_me = 0;
3653 struct net *net = SVC_NET(r);
3654 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3656 status = nfserr_not_only_op;
3657 if (nfsd4_compound_in_session(cstate, sessionid)) {
3658 if (!nfsd4_last_compound_op(r))
3662 dump_sessionid(__func__, sessionid);
3663 spin_lock(&nn->client_lock);
3664 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3666 goto out_client_lock;
3667 status = nfserr_wrong_cred;
3668 if (!nfsd4_mach_creds_match(ses->se_client, r))
3669 goto out_put_session;
3670 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3672 goto out_put_session;
3673 unhash_session(ses);
3674 spin_unlock(&nn->client_lock);
3676 nfsd4_probe_callback_sync(ses->se_client);
3678 spin_lock(&nn->client_lock);
3681 nfsd4_put_session_locked(ses);
3683 spin_unlock(&nn->client_lock);
3688 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3690 struct nfs4_client *clp = ses->se_client;
3691 struct nfsd4_conn *c;
3692 __be32 status = nfs_ok;
3695 spin_lock(&clp->cl_lock);
3696 c = __nfsd4_find_conn(new->cn_xprt, ses);
3699 status = nfserr_conn_not_bound_to_session;
3700 if (clp->cl_mach_cred)
3702 __nfsd4_hash_conn(new, ses);
3703 spin_unlock(&clp->cl_lock);
3704 ret = nfsd4_register_conn(new);
3706 /* oops; xprt is already down: */
3707 nfsd4_conn_lost(&new->cn_xpt_user);
3710 spin_unlock(&clp->cl_lock);
3715 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3717 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3719 return args->opcnt > session->se_fchannel.maxops;
3722 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3723 struct nfsd4_session *session)
3725 struct xdr_buf *xb = &rqstp->rq_arg;
3727 return xb->len > session->se_fchannel.maxreq_sz;
3730 static bool replay_matches_cache(struct svc_rqst *rqstp,
3731 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3733 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3735 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3736 (bool)seq->cachethis)
3739 * If there's an error then the reply can have fewer ops than
3742 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3745 * But if we cached a reply with *more* ops than the call you're
3746 * sending us now, then this new call is clearly not really a
3747 * replay of the old one:
3749 if (slot->sl_opcnt > argp->opcnt)
3751 /* This is the only check explicitly called by spec: */
3752 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3755 * There may be more comparisons we could actually do, but the
3756 * spec doesn't require us to catch every case where the calls
3757 * don't match (that would require caching the call as well as
3758 * the reply), so we don't bother.
3764 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3765 union nfsd4_op_u *u)
3767 struct nfsd4_sequence *seq = &u->sequence;
3768 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3769 struct xdr_stream *xdr = resp->xdr;
3770 struct nfsd4_session *session;
3771 struct nfs4_client *clp;
3772 struct nfsd4_slot *slot;
3773 struct nfsd4_conn *conn;
3776 struct net *net = SVC_NET(rqstp);
3777 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3779 if (resp->opcnt != 1)
3780 return nfserr_sequence_pos;
3783 * Will be either used or freed by nfsd4_sequence_check_conn
3786 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3788 return nfserr_jukebox;
3790 spin_lock(&nn->client_lock);
3791 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3793 goto out_no_session;
3794 clp = session->se_client;
3796 status = nfserr_too_many_ops;
3797 if (nfsd4_session_too_many_ops(rqstp, session))
3798 goto out_put_session;
3800 status = nfserr_req_too_big;
3801 if (nfsd4_request_too_big(rqstp, session))
3802 goto out_put_session;
3804 status = nfserr_badslot;
3805 if (seq->slotid >= session->se_fchannel.maxreqs)
3806 goto out_put_session;
3808 slot = session->se_slots[seq->slotid];
3809 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3811 /* We do not negotiate the number of slots yet, so set the
3812 * maxslots to the session maxreqs which is used to encode
3813 * sr_highest_slotid and the sr_target_slot id to maxslots */
3814 seq->maxslots = session->se_fchannel.maxreqs;
3816 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3817 slot->sl_flags & NFSD4_SLOT_INUSE);
3818 if (status == nfserr_replay_cache) {
3819 status = nfserr_seq_misordered;
3820 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3821 goto out_put_session;
3822 status = nfserr_seq_false_retry;
3823 if (!replay_matches_cache(rqstp, seq, slot))
3824 goto out_put_session;
3825 cstate->slot = slot;
3826 cstate->session = session;
3828 /* Return the cached reply status and set cstate->status
3829 * for nfsd4_proc_compound processing */
3830 status = nfsd4_replay_cache_entry(resp, seq);
3831 cstate->status = nfserr_replay_cache;
3835 goto out_put_session;
3837 status = nfsd4_sequence_check_conn(conn, session);
3840 goto out_put_session;
3842 buflen = (seq->cachethis) ?
3843 session->se_fchannel.maxresp_cached :
3844 session->se_fchannel.maxresp_sz;
3845 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3847 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3848 goto out_put_session;
3849 svc_reserve(rqstp, buflen);
3852 /* Success! bump slot seqid */
3853 slot->sl_seqid = seq->seqid;
3854 slot->sl_flags |= NFSD4_SLOT_INUSE;
3856 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3858 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3860 cstate->slot = slot;
3861 cstate->session = session;
3865 switch (clp->cl_cb_state) {
3867 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3869 case NFSD4_CB_FAULT:
3870 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3873 seq->status_flags = 0;
3875 if (!list_empty(&clp->cl_revoked))
3876 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3880 spin_unlock(&nn->client_lock);
3883 nfsd4_put_session_locked(session);
3884 goto out_no_session;
3888 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3890 struct nfsd4_compound_state *cs = &resp->cstate;
3892 if (nfsd4_has_session(cs)) {
3893 if (cs->status != nfserr_replay_cache) {
3894 nfsd4_store_cache_entry(resp);
3895 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3897 /* Drop session reference that was taken in nfsd4_sequence() */
3898 nfsd4_put_session(cs->session);
3900 put_client_renew(cs->clp);
3904 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3905 struct nfsd4_compound_state *cstate,
3906 union nfsd4_op_u *u)
3908 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3909 struct nfs4_client *conf, *unconf;
3910 struct nfs4_client *clp = NULL;
3912 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3914 spin_lock(&nn->client_lock);
3915 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3916 conf = find_confirmed_client(&dc->clientid, true, nn);
3917 WARN_ON_ONCE(conf && unconf);
3920 if (client_has_state(conf)) {
3921 status = nfserr_clientid_busy;
3924 status = mark_client_expired_locked(conf);
3931 status = nfserr_stale_clientid;
3934 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3936 status = nfserr_wrong_cred;
3939 trace_nfsd_clid_destroyed(&clp->cl_clientid);
3940 unhash_client_locked(clp);
3942 spin_unlock(&nn->client_lock);
3949 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3950 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3952 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3953 struct nfs4_client *clp = cstate->clp;
3956 if (rc->rca_one_fs) {
3957 if (!cstate->current_fh.fh_dentry)
3958 return nfserr_nofilehandle;
3960 * We don't take advantage of the rca_one_fs case.
3961 * That's OK, it's optional, we can safely ignore it.
3966 status = nfserr_complete_already;
3967 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
3970 status = nfserr_stale_clientid;
3971 if (is_client_expired(clp))
3973 * The following error isn't really legal.
3974 * But we only get here if the client just explicitly
3975 * destroyed the client. Surely it no longer cares what
3976 * error it gets back on an operation for the dead
3982 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
3983 nfsd4_client_record_create(clp);
3984 inc_reclaim_complete(clp);
3990 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3991 union nfsd4_op_u *u)
3993 struct nfsd4_setclientid *setclid = &u->setclientid;
3994 struct xdr_netobj clname = setclid->se_name;
3995 nfs4_verifier clverifier = setclid->se_verf;
3996 struct nfs4_client *conf, *new;
3997 struct nfs4_client *unconf = NULL;
3999 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4001 new = create_client(clname, rqstp, &clverifier);
4003 return nfserr_jukebox;
4004 spin_lock(&nn->client_lock);
4005 conf = find_confirmed_client_by_name(&clname, nn);
4006 if (conf && client_has_state(conf)) {
4007 status = nfserr_clid_inuse;
4008 if (clp_used_exchangeid(conf))
4010 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4011 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4015 unconf = find_unconfirmed_client_by_name(&clname, nn);
4017 unhash_client_locked(unconf);
4019 if (same_verf(&conf->cl_verifier, &clverifier)) {
4020 copy_clid(new, conf);
4021 gen_confirm(new, nn);
4023 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4026 trace_nfsd_clid_fresh(new);
4027 new->cl_minorversion = 0;
4028 gen_callback(new, setclid, rqstp);
4029 add_to_unconfirmed(new);
4030 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4031 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4032 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4036 spin_unlock(&nn->client_lock);
4040 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4041 expire_client(unconf);
4047 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4048 struct nfsd4_compound_state *cstate,
4049 union nfsd4_op_u *u)
4051 struct nfsd4_setclientid_confirm *setclientid_confirm =
4052 &u->setclientid_confirm;
4053 struct nfs4_client *conf, *unconf;
4054 struct nfs4_client *old = NULL;
4055 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4056 clientid_t * clid = &setclientid_confirm->sc_clientid;
4058 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4060 if (STALE_CLIENTID(clid, nn))
4061 return nfserr_stale_clientid;
4063 spin_lock(&nn->client_lock);
4064 conf = find_confirmed_client(clid, false, nn);
4065 unconf = find_unconfirmed_client(clid, false, nn);
4067 * We try hard to give out unique clientid's, so if we get an
4068 * attempt to confirm the same clientid with a different cred,
4069 * the client may be buggy; this should never happen.
4071 * Nevertheless, RFC 7530 recommends INUSE for this case:
4073 status = nfserr_clid_inuse;
4074 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4075 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4078 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4079 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4082 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4083 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4086 status = nfserr_stale_clientid;
4092 unhash_client_locked(old);
4093 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4095 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4097 status = nfserr_clid_inuse;
4098 if (client_has_state(old)
4099 && !same_creds(&unconf->cl_cred,
4102 status = mark_client_expired_locked(old);
4107 trace_nfsd_clid_replaced(&old->cl_clientid);
4109 move_to_confirmed(unconf);
4112 get_client_locked(conf);
4113 spin_unlock(&nn->client_lock);
4115 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4116 nfsd4_probe_callback(conf);
4117 spin_lock(&nn->client_lock);
4118 put_client_renew_locked(conf);
4120 spin_unlock(&nn->client_lock);
4126 static struct nfs4_file *nfsd4_alloc_file(void)
4128 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4131 /* OPEN Share state helper functions */
4132 static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
4133 struct nfs4_file *fp)
4135 lockdep_assert_held(&state_lock);
4137 refcount_set(&fp->fi_ref, 1);
4138 spin_lock_init(&fp->fi_lock);
4139 INIT_LIST_HEAD(&fp->fi_stateids);
4140 INIT_LIST_HEAD(&fp->fi_delegations);
4141 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4142 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4143 fp->fi_deleg_file = NULL;
4144 fp->fi_had_conflict = false;
4145 fp->fi_share_deny = 0;
4146 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4147 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4148 fp->fi_aliased = false;
4149 fp->fi_inode = d_inode(fh->fh_dentry);
4150 #ifdef CONFIG_NFSD_PNFS
4151 INIT_LIST_HEAD(&fp->fi_lo_states);
4152 atomic_set(&fp->fi_lo_recalls, 0);
4154 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4158 nfsd4_free_slabs(void)
4160 kmem_cache_destroy(client_slab);
4161 kmem_cache_destroy(openowner_slab);
4162 kmem_cache_destroy(lockowner_slab);
4163 kmem_cache_destroy(file_slab);
4164 kmem_cache_destroy(stateid_slab);
4165 kmem_cache_destroy(deleg_slab);
4166 kmem_cache_destroy(odstate_slab);
4170 nfsd4_init_slabs(void)
4172 client_slab = kmem_cache_create("nfsd4_clients",
4173 sizeof(struct nfs4_client), 0, 0, NULL);
4174 if (client_slab == NULL)
4176 openowner_slab = kmem_cache_create("nfsd4_openowners",
4177 sizeof(struct nfs4_openowner), 0, 0, NULL);
4178 if (openowner_slab == NULL)
4179 goto out_free_client_slab;
4180 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4181 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4182 if (lockowner_slab == NULL)
4183 goto out_free_openowner_slab;
4184 file_slab = kmem_cache_create("nfsd4_files",
4185 sizeof(struct nfs4_file), 0, 0, NULL);
4186 if (file_slab == NULL)
4187 goto out_free_lockowner_slab;
4188 stateid_slab = kmem_cache_create("nfsd4_stateids",
4189 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4190 if (stateid_slab == NULL)
4191 goto out_free_file_slab;
4192 deleg_slab = kmem_cache_create("nfsd4_delegations",
4193 sizeof(struct nfs4_delegation), 0, 0, NULL);
4194 if (deleg_slab == NULL)
4195 goto out_free_stateid_slab;
4196 odstate_slab = kmem_cache_create("nfsd4_odstate",
4197 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4198 if (odstate_slab == NULL)
4199 goto out_free_deleg_slab;
4202 out_free_deleg_slab:
4203 kmem_cache_destroy(deleg_slab);
4204 out_free_stateid_slab:
4205 kmem_cache_destroy(stateid_slab);
4207 kmem_cache_destroy(file_slab);
4208 out_free_lockowner_slab:
4209 kmem_cache_destroy(lockowner_slab);
4210 out_free_openowner_slab:
4211 kmem_cache_destroy(openowner_slab);
4212 out_free_client_slab:
4213 kmem_cache_destroy(client_slab);
4218 static void init_nfs4_replay(struct nfs4_replay *rp)
4220 rp->rp_status = nfserr_serverfault;
4222 rp->rp_buf = rp->rp_ibuf;
4223 mutex_init(&rp->rp_mutex);
4226 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4227 struct nfs4_stateowner *so)
4229 if (!nfsd4_has_session(cstate)) {
4230 mutex_lock(&so->so_replay.rp_mutex);
4231 cstate->replay_owner = nfs4_get_stateowner(so);
4235 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4237 struct nfs4_stateowner *so = cstate->replay_owner;
4240 cstate->replay_owner = NULL;
4241 mutex_unlock(&so->so_replay.rp_mutex);
4242 nfs4_put_stateowner(so);
4246 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4248 struct nfs4_stateowner *sop;
4250 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4254 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4255 if (!sop->so_owner.data) {
4256 kmem_cache_free(slab, sop);
4260 INIT_LIST_HEAD(&sop->so_stateids);
4261 sop->so_client = clp;
4262 init_nfs4_replay(&sop->so_replay);
4263 atomic_set(&sop->so_count, 1);
4267 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4269 lockdep_assert_held(&clp->cl_lock);
4271 list_add(&oo->oo_owner.so_strhash,
4272 &clp->cl_ownerstr_hashtbl[strhashval]);
4273 list_add(&oo->oo_perclient, &clp->cl_openowners);
4276 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4278 unhash_openowner_locked(openowner(so));
4281 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4283 struct nfs4_openowner *oo = openowner(so);
4285 kmem_cache_free(openowner_slab, oo);
4288 static const struct nfs4_stateowner_operations openowner_ops = {
4289 .so_unhash = nfs4_unhash_openowner,
4290 .so_free = nfs4_free_openowner,
4293 static struct nfs4_ol_stateid *
4294 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4296 struct nfs4_ol_stateid *local, *ret = NULL;
4297 struct nfs4_openowner *oo = open->op_openowner;
4299 lockdep_assert_held(&fp->fi_lock);
4301 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4302 /* ignore lock owners */
4303 if (local->st_stateowner->so_is_open_owner == 0)
4305 if (local->st_stateowner != &oo->oo_owner)
4307 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4309 refcount_inc(&ret->st_stid.sc_count);
4317 nfsd4_verify_open_stid(struct nfs4_stid *s)
4319 __be32 ret = nfs_ok;
4321 switch (s->sc_type) {
4325 case NFS4_CLOSED_STID:
4326 case NFS4_CLOSED_DELEG_STID:
4327 ret = nfserr_bad_stateid;
4329 case NFS4_REVOKED_DELEG_STID:
4330 ret = nfserr_deleg_revoked;
4335 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4337 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4341 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4342 ret = nfsd4_verify_open_stid(&stp->st_stid);
4344 mutex_unlock(&stp->st_mutex);
4348 static struct nfs4_ol_stateid *
4349 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4351 struct nfs4_ol_stateid *stp;
4353 spin_lock(&fp->fi_lock);
4354 stp = nfsd4_find_existing_open(fp, open);
4355 spin_unlock(&fp->fi_lock);
4356 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4358 nfs4_put_stid(&stp->st_stid);
4363 static struct nfs4_openowner *
4364 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4365 struct nfsd4_compound_state *cstate)
4367 struct nfs4_client *clp = cstate->clp;
4368 struct nfs4_openowner *oo, *ret;
4370 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4373 oo->oo_owner.so_ops = &openowner_ops;
4374 oo->oo_owner.so_is_open_owner = 1;
4375 oo->oo_owner.so_seqid = open->op_seqid;
4377 if (nfsd4_has_session(cstate))
4378 oo->oo_flags |= NFS4_OO_CONFIRMED;
4380 oo->oo_last_closed_stid = NULL;
4381 INIT_LIST_HEAD(&oo->oo_close_lru);
4382 spin_lock(&clp->cl_lock);
4383 ret = find_openstateowner_str_locked(strhashval, open, clp);
4385 hash_openowner(oo, clp, strhashval);
4388 nfs4_free_stateowner(&oo->oo_owner);
4390 spin_unlock(&clp->cl_lock);
4394 static struct nfs4_ol_stateid *
4395 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4398 struct nfs4_openowner *oo = open->op_openowner;
4399 struct nfs4_ol_stateid *retstp = NULL;
4400 struct nfs4_ol_stateid *stp;
4403 /* We are moving these outside of the spinlocks to avoid the warnings */
4404 mutex_init(&stp->st_mutex);
4405 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4408 spin_lock(&oo->oo_owner.so_client->cl_lock);
4409 spin_lock(&fp->fi_lock);
4411 retstp = nfsd4_find_existing_open(fp, open);
4415 open->op_stp = NULL;
4416 refcount_inc(&stp->st_stid.sc_count);
4417 stp->st_stid.sc_type = NFS4_OPEN_STID;
4418 INIT_LIST_HEAD(&stp->st_locks);
4419 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4421 stp->st_stid.sc_file = fp;
4422 stp->st_access_bmap = 0;
4423 stp->st_deny_bmap = 0;
4424 stp->st_openstp = NULL;
4425 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4426 list_add(&stp->st_perfile, &fp->fi_stateids);
4429 spin_unlock(&fp->fi_lock);
4430 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4432 /* Handle races with CLOSE */
4433 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4434 nfs4_put_stid(&retstp->st_stid);
4437 /* To keep mutex tracking happy */
4438 mutex_unlock(&stp->st_mutex);
4445 * In the 4.0 case we need to keep the owners around a little while to handle
4446 * CLOSE replay. We still do need to release any file access that is held by
4447 * them before returning however.
4450 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4452 struct nfs4_ol_stateid *last;
4453 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4454 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4457 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4460 * We know that we hold one reference via nfsd4_close, and another
4461 * "persistent" reference for the client. If the refcount is higher
4462 * than 2, then there are still calls in progress that are using this
4463 * stateid. We can't put the sc_file reference until they are finished.
4464 * Wait for the refcount to drop to 2. Since it has been unhashed,
4465 * there should be no danger of the refcount going back up again at
4468 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4470 release_all_access(s);
4471 if (s->st_stid.sc_file) {
4472 put_nfs4_file(s->st_stid.sc_file);
4473 s->st_stid.sc_file = NULL;
4476 spin_lock(&nn->client_lock);
4477 last = oo->oo_last_closed_stid;
4478 oo->oo_last_closed_stid = s;
4479 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4480 oo->oo_time = ktime_get_boottime_seconds();
4481 spin_unlock(&nn->client_lock);
4483 nfs4_put_stid(&last->st_stid);
4486 /* search file_hashtbl[] for file */
4487 static struct nfs4_file *
4488 find_file_locked(struct svc_fh *fh, unsigned int hashval)
4490 struct nfs4_file *fp;
4492 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4493 lockdep_is_held(&state_lock)) {
4494 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4495 if (refcount_inc_not_zero(&fp->fi_ref))
4502 static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
4503 unsigned int hashval)
4505 struct nfs4_file *fp;
4506 struct nfs4_file *ret = NULL;
4507 bool alias_found = false;
4509 spin_lock(&state_lock);
4510 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4511 lockdep_is_held(&state_lock)) {
4512 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4513 if (refcount_inc_not_zero(&fp->fi_ref))
4515 } else if (d_inode(fh->fh_dentry) == fp->fi_inode)
4516 fp->fi_aliased = alias_found = true;
4518 if (likely(ret == NULL)) {
4519 nfsd4_init_file(fh, hashval, new);
4520 new->fi_aliased = alias_found;
4523 spin_unlock(&state_lock);
4527 static struct nfs4_file * find_file(struct svc_fh *fh)
4529 struct nfs4_file *fp;
4530 unsigned int hashval = file_hashval(fh);
4533 fp = find_file_locked(fh, hashval);
4538 static struct nfs4_file *
4539 find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
4541 struct nfs4_file *fp;
4542 unsigned int hashval = file_hashval(fh);
4545 fp = find_file_locked(fh, hashval);
4550 return insert_file(new, fh, hashval);
4554 * Called to check deny when READ with all zero stateid or
4555 * WRITE with all zero or all one stateid
4558 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4560 struct nfs4_file *fp;
4561 __be32 ret = nfs_ok;
4563 fp = find_file(current_fh);
4566 /* Check for conflicting share reservations */
4567 spin_lock(&fp->fi_lock);
4568 if (fp->fi_share_deny & deny_type)
4569 ret = nfserr_locked;
4570 spin_unlock(&fp->fi_lock);
4575 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4577 struct nfs4_delegation *dp = cb_to_delegation(cb);
4578 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4581 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4584 * We can't do this in nfsd_break_deleg_cb because it is
4585 * already holding inode->i_lock.
4587 * If the dl_time != 0, then we know that it has already been
4588 * queued for a lease break. Don't queue it again.
4590 spin_lock(&state_lock);
4591 if (dp->dl_time == 0) {
4592 dp->dl_time = ktime_get_boottime_seconds();
4593 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4595 spin_unlock(&state_lock);
4598 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4599 struct rpc_task *task)
4601 struct nfs4_delegation *dp = cb_to_delegation(cb);
4603 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4604 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4607 switch (task->tk_status) {
4610 case -NFS4ERR_DELAY:
4611 rpc_delay(task, 2 * HZ);
4614 case -NFS4ERR_BAD_STATEID:
4616 * Race: client probably got cb_recall before open reply
4617 * granting delegation.
4619 if (dp->dl_retries--) {
4620 rpc_delay(task, 2 * HZ);
4629 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4631 struct nfs4_delegation *dp = cb_to_delegation(cb);
4633 nfs4_put_stid(&dp->dl_stid);
4636 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4637 .prepare = nfsd4_cb_recall_prepare,
4638 .done = nfsd4_cb_recall_done,
4639 .release = nfsd4_cb_recall_release,
4642 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4645 * We're assuming the state code never drops its reference
4646 * without first removing the lease. Since we're in this lease
4647 * callback (and since the lease code is serialized by the
4648 * i_lock) we know the server hasn't removed the lease yet, and
4649 * we know it's safe to take a reference.
4651 refcount_inc(&dp->dl_stid.sc_count);
4652 nfsd4_run_cb(&dp->dl_recall);
4655 /* Called from break_lease() with i_lock held. */
4657 nfsd_break_deleg_cb(struct file_lock *fl)
4660 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4661 struct nfs4_file *fp = dp->dl_stid.sc_file;
4663 trace_nfsd_cb_recall(&dp->dl_stid);
4666 * We don't want the locks code to timeout the lease for us;
4667 * we'll remove it ourself if a delegation isn't returned
4670 fl->fl_break_time = 0;
4672 spin_lock(&fp->fi_lock);
4673 fp->fi_had_conflict = true;
4674 nfsd_break_one_deleg(dp);
4675 spin_unlock(&fp->fi_lock);
4679 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4681 struct nfs4_delegation *dl = fl->fl_owner;
4682 struct svc_rqst *rqst;
4683 struct nfs4_client *clp;
4687 rqst = kthread_data(current);
4688 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4689 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4691 clp = *(rqst->rq_lease_breaker);
4692 return dl->dl_stid.sc_client == clp;
4696 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4697 struct list_head *dispose)
4700 return lease_modify(onlist, arg, dispose);
4705 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4706 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4707 .lm_break = nfsd_break_deleg_cb,
4708 .lm_change = nfsd_change_deleg_cb,
4711 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4713 if (nfsd4_has_session(cstate))
4715 if (seqid == so->so_seqid - 1)
4716 return nfserr_replay_me;
4717 if (seqid == so->so_seqid)
4719 return nfserr_bad_seqid;
4722 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4723 struct nfsd_net *nn)
4725 struct nfs4_client *found;
4727 spin_lock(&nn->client_lock);
4728 found = find_confirmed_client(clid, sessions, nn);
4730 atomic_inc(&found->cl_rpc_users);
4731 spin_unlock(&nn->client_lock);
4735 static __be32 set_client(clientid_t *clid,
4736 struct nfsd4_compound_state *cstate,
4737 struct nfsd_net *nn)
4740 if (!same_clid(&cstate->clp->cl_clientid, clid))
4741 return nfserr_stale_clientid;
4744 if (STALE_CLIENTID(clid, nn))
4745 return nfserr_stale_clientid;
4747 * We're in the 4.0 case (otherwise the SEQUENCE op would have
4748 * set cstate->clp), so session = false:
4750 cstate->clp = lookup_clientid(clid, false, nn);
4752 return nfserr_expired;
4757 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4758 struct nfsd4_open *open, struct nfsd_net *nn)
4760 clientid_t *clientid = &open->op_clientid;
4761 struct nfs4_client *clp = NULL;
4762 unsigned int strhashval;
4763 struct nfs4_openowner *oo = NULL;
4767 * In case we need it later, after we've already created the
4768 * file and don't want to risk a further failure:
4770 open->op_file = nfsd4_alloc_file();
4771 if (open->op_file == NULL)
4772 return nfserr_jukebox;
4774 status = set_client(clientid, cstate, nn);
4779 strhashval = ownerstr_hashval(&open->op_owner);
4780 oo = find_openstateowner_str(strhashval, open, clp);
4781 open->op_openowner = oo;
4785 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4786 /* Replace unconfirmed owners without checking for replay. */
4787 release_openowner(oo);
4788 open->op_openowner = NULL;
4791 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4796 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4798 return nfserr_jukebox;
4799 open->op_openowner = oo;
4801 open->op_stp = nfs4_alloc_open_stateid(clp);
4803 return nfserr_jukebox;
4805 if (nfsd4_has_session(cstate) &&
4806 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4807 open->op_odstate = alloc_clnt_odstate(clp);
4808 if (!open->op_odstate)
4809 return nfserr_jukebox;
4815 static inline __be32
4816 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4818 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4819 return nfserr_openmode;
4824 static int share_access_to_flags(u32 share_access)
4826 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4829 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4831 struct nfs4_stid *ret;
4833 ret = find_stateid_by_type(cl, s,
4834 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4837 return delegstateid(ret);
4840 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4842 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4843 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4847 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4848 struct nfs4_delegation **dp)
4851 __be32 status = nfserr_bad_stateid;
4852 struct nfs4_delegation *deleg;
4854 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4857 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4858 nfs4_put_stid(&deleg->dl_stid);
4859 if (cl->cl_minorversion)
4860 status = nfserr_deleg_revoked;
4863 flags = share_access_to_flags(open->op_share_access);
4864 status = nfs4_check_delegmode(deleg, flags);
4866 nfs4_put_stid(&deleg->dl_stid);
4871 if (!nfsd4_is_deleg_cur(open))
4875 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4879 static inline int nfs4_access_to_access(u32 nfs4_access)
4883 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4884 flags |= NFSD_MAY_READ;
4885 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4886 flags |= NFSD_MAY_WRITE;
4890 static inline __be32
4891 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4892 struct nfsd4_open *open)
4894 struct iattr iattr = {
4895 .ia_valid = ATTR_SIZE,
4898 if (!open->op_truncate)
4900 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4901 return nfserr_inval;
4902 return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4905 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4906 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4907 struct nfsd4_open *open)
4909 struct nfsd_file *nf = NULL;
4911 int oflag = nfs4_access_to_omode(open->op_share_access);
4912 int access = nfs4_access_to_access(open->op_share_access);
4913 unsigned char old_access_bmap, old_deny_bmap;
4915 spin_lock(&fp->fi_lock);
4918 * Are we trying to set a deny mode that would conflict with
4921 status = nfs4_file_check_deny(fp, open->op_share_deny);
4922 if (status != nfs_ok) {
4923 spin_unlock(&fp->fi_lock);
4927 /* set access to the file */
4928 status = nfs4_file_get_access(fp, open->op_share_access);
4929 if (status != nfs_ok) {
4930 spin_unlock(&fp->fi_lock);
4934 /* Set access bits in stateid */
4935 old_access_bmap = stp->st_access_bmap;
4936 set_access(open->op_share_access, stp);
4938 /* Set new deny mask */
4939 old_deny_bmap = stp->st_deny_bmap;
4940 set_deny(open->op_share_deny, stp);
4941 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4943 if (!fp->fi_fds[oflag]) {
4944 spin_unlock(&fp->fi_lock);
4945 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4947 goto out_put_access;
4948 spin_lock(&fp->fi_lock);
4949 if (!fp->fi_fds[oflag]) {
4950 fp->fi_fds[oflag] = nf;
4954 spin_unlock(&fp->fi_lock);
4958 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
4961 goto out_put_access;
4963 status = nfsd4_truncate(rqstp, cur_fh, open);
4965 goto out_put_access;
4969 stp->st_access_bmap = old_access_bmap;
4970 nfs4_file_put_access(fp, open->op_share_access);
4971 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4976 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4979 unsigned char old_deny_bmap = stp->st_deny_bmap;
4981 if (!test_access(open->op_share_access, stp))
4982 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4984 /* test and set deny mode */
4985 spin_lock(&fp->fi_lock);
4986 status = nfs4_file_check_deny(fp, open->op_share_deny);
4987 if (status == nfs_ok) {
4988 set_deny(open->op_share_deny, stp);
4989 fp->fi_share_deny |=
4990 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4992 spin_unlock(&fp->fi_lock);
4994 if (status != nfs_ok)
4997 status = nfsd4_truncate(rqstp, cur_fh, open);
4998 if (status != nfs_ok)
4999 reset_union_bmap_deny(old_deny_bmap, stp);
5003 /* Should we give out recallable state?: */
5004 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5006 if (clp->cl_cb_state == NFSD4_CB_UP)
5009 * In the sessions case, since we don't have to establish a
5010 * separate connection for callbacks, we assume it's OK
5011 * until we hear otherwise:
5013 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5016 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5019 struct file_lock *fl;
5021 fl = locks_alloc_lock();
5024 fl->fl_lmops = &nfsd_lease_mng_ops;
5025 fl->fl_flags = FL_DELEG;
5026 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5027 fl->fl_end = OFFSET_MAX;
5028 fl->fl_owner = (fl_owner_t)dp;
5029 fl->fl_pid = current->tgid;
5030 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5034 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5035 struct nfs4_file *fp)
5037 struct nfs4_ol_stateid *st;
5038 struct file *f = fp->fi_deleg_file->nf_file;
5039 struct inode *ino = locks_inode(f);
5042 writes = atomic_read(&ino->i_writecount);
5046 * There could be multiple filehandles (hence multiple
5047 * nfs4_files) referencing this file, but that's not too
5048 * common; let's just give up in that case rather than
5049 * trying to go look up all the clients using that other
5050 * nfs4_file as well:
5055 * If there's a close in progress, make sure that we see it
5056 * clear any fi_fds[] entries before we see it decrement
5059 smp_mb__after_atomic();
5061 if (fp->fi_fds[O_WRONLY])
5063 if (fp->fi_fds[O_RDWR])
5066 return -EAGAIN; /* There may be non-NFSv4 writers */
5068 * It's possible there are non-NFSv4 write opens in progress,
5069 * but if they haven't incremented i_writecount yet then they
5070 * also haven't called break lease yet; so, they'll break this
5071 * lease soon enough. So, all that's left to check for is NFSv4
5074 spin_lock(&fp->fi_lock);
5075 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5076 if (st->st_openstp == NULL /* it's an open */ &&
5077 access_permit_write(st) &&
5078 st->st_stid.sc_client != clp) {
5079 spin_unlock(&fp->fi_lock);
5083 spin_unlock(&fp->fi_lock);
5085 * There's a small chance that we could be racing with another
5086 * NFSv4 open. However, any open that hasn't added itself to
5087 * the fi_stateids list also hasn't called break_lease yet; so,
5088 * they'll break this lease soon enough.
5093 static struct nfs4_delegation *
5094 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
5095 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
5098 struct nfs4_delegation *dp;
5099 struct nfsd_file *nf;
5100 struct file_lock *fl;
5103 * The fi_had_conflict and nfs_get_existing_delegation checks
5104 * here are just optimizations; we'll need to recheck them at
5107 if (fp->fi_had_conflict)
5108 return ERR_PTR(-EAGAIN);
5110 nf = find_readable_file(fp);
5113 * We probably could attempt another open and get a read
5114 * delegation, but for now, don't bother until the
5115 * client actually sends us one.
5117 return ERR_PTR(-EAGAIN);
5119 spin_lock(&state_lock);
5120 spin_lock(&fp->fi_lock);
5121 if (nfs4_delegation_exists(clp, fp))
5123 else if (!fp->fi_deleg_file) {
5124 fp->fi_deleg_file = nf;
5125 /* increment early to prevent fi_deleg_file from being
5127 fp->fi_delegees = 1;
5131 spin_unlock(&fp->fi_lock);
5132 spin_unlock(&state_lock);
5136 return ERR_PTR(status);
5139 dp = alloc_init_deleg(clp, fp, fh, odstate);
5143 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5145 goto out_clnt_odstate;
5147 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5149 locks_free_lock(fl);
5151 goto out_clnt_odstate;
5152 status = nfsd4_check_conflicting_opens(clp, fp);
5156 spin_lock(&state_lock);
5157 spin_lock(&fp->fi_lock);
5158 if (fp->fi_had_conflict)
5161 status = hash_delegation_locked(dp, fp);
5162 spin_unlock(&fp->fi_lock);
5163 spin_unlock(&state_lock);
5170 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5172 put_clnt_odstate(dp->dl_clnt_odstate);
5173 nfs4_put_stid(&dp->dl_stid);
5176 return ERR_PTR(status);
5179 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5181 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5182 if (status == -EAGAIN)
5183 open->op_why_no_deleg = WND4_CONTENTION;
5185 open->op_why_no_deleg = WND4_RESOURCE;
5186 switch (open->op_deleg_want) {
5187 case NFS4_SHARE_WANT_READ_DELEG:
5188 case NFS4_SHARE_WANT_WRITE_DELEG:
5189 case NFS4_SHARE_WANT_ANY_DELEG:
5191 case NFS4_SHARE_WANT_CANCEL:
5192 open->op_why_no_deleg = WND4_CANCELLED;
5194 case NFS4_SHARE_WANT_NO_DELEG:
5201 * Attempt to hand out a delegation.
5203 * Note we don't support write delegations, and won't until the vfs has
5204 * proper support for them.
5207 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5208 struct nfs4_ol_stateid *stp)
5210 struct nfs4_delegation *dp;
5211 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5212 struct nfs4_client *clp = stp->st_stid.sc_client;
5216 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5217 open->op_recall = 0;
5218 switch (open->op_claim_type) {
5219 case NFS4_OPEN_CLAIM_PREVIOUS:
5221 open->op_recall = 1;
5222 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5225 case NFS4_OPEN_CLAIM_NULL:
5226 case NFS4_OPEN_CLAIM_FH:
5228 * Let's not give out any delegations till everyone's
5229 * had the chance to reclaim theirs, *and* until
5230 * NLM locks have all been reclaimed:
5232 if (locks_in_grace(clp->net))
5234 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5240 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5244 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5246 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5247 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5248 nfs4_put_stid(&dp->dl_stid);
5251 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5252 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5253 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5254 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5255 open->op_recall = 1;
5258 /* 4.1 client asking for a delegation? */
5259 if (open->op_deleg_want)
5260 nfsd4_open_deleg_none_ext(open, status);
5264 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5265 struct nfs4_delegation *dp)
5267 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5268 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5269 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5270 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5271 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5272 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5273 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5274 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5276 /* Otherwise the client must be confused wanting a delegation
5277 * it already has, therefore we don't return
5278 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5283 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5285 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5286 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5287 struct nfs4_file *fp = NULL;
5288 struct nfs4_ol_stateid *stp = NULL;
5289 struct nfs4_delegation *dp = NULL;
5291 bool new_stp = false;
5294 * Lookup file; if found, lookup stateid and check open request,
5295 * and check for delegations in the process of being recalled.
5296 * If not found, create the nfs4_file struct
5298 fp = find_or_add_file(open->op_file, current_fh);
5299 if (fp != open->op_file) {
5300 status = nfs4_check_deleg(cl, open, &dp);
5303 stp = nfsd4_find_and_lock_existing_open(fp, open);
5305 open->op_file = NULL;
5306 status = nfserr_bad_stateid;
5307 if (nfsd4_is_deleg_cur(open))
5312 stp = init_open_stateid(fp, open);
5318 * OPEN the file, or upgrade an existing OPEN.
5319 * If truncate fails, the OPEN fails.
5321 * stp is already locked.
5324 /* Stateid was found, this is an OPEN upgrade */
5325 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5327 mutex_unlock(&stp->st_mutex);
5331 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5333 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5334 release_open_stateid(stp);
5335 mutex_unlock(&stp->st_mutex);
5339 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5341 if (stp->st_clnt_odstate == open->op_odstate)
5342 open->op_odstate = NULL;
5345 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5346 mutex_unlock(&stp->st_mutex);
5348 if (nfsd4_has_session(&resp->cstate)) {
5349 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5350 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5351 open->op_why_no_deleg = WND4_NOT_WANTED;
5357 * Attempt to hand out a delegation. No error return, because the
5358 * OPEN succeeds even if we fail.
5360 nfs4_open_delegation(current_fh, open, stp);
5363 trace_nfsd_open(&stp->st_stid.sc_stateid);
5365 /* 4.1 client trying to upgrade/downgrade delegation? */
5366 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5367 open->op_deleg_want)
5368 nfsd4_deleg_xgrade_none_ext(open, dp);
5372 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5373 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5375 * To finish the open response, we just need to set the rflags.
5377 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5378 if (nfsd4_has_session(&resp->cstate))
5379 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5380 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5381 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5384 nfs4_put_stid(&dp->dl_stid);
5386 nfs4_put_stid(&stp->st_stid);
5391 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5392 struct nfsd4_open *open)
5394 if (open->op_openowner) {
5395 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5397 nfsd4_cstate_assign_replay(cstate, so);
5398 nfs4_put_stateowner(so);
5401 kmem_cache_free(file_slab, open->op_file);
5403 nfs4_put_stid(&open->op_stp->st_stid);
5404 if (open->op_odstate)
5405 kmem_cache_free(odstate_slab, open->op_odstate);
5409 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5410 union nfsd4_op_u *u)
5412 clientid_t *clid = &u->renew;
5413 struct nfs4_client *clp;
5415 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5417 trace_nfsd_clid_renew(clid);
5418 status = set_client(clid, cstate, nn);
5422 if (!list_empty(&clp->cl_delegations)
5423 && clp->cl_cb_state != NFSD4_CB_UP)
5424 return nfserr_cb_path_down;
5429 nfsd4_end_grace(struct nfsd_net *nn)
5431 /* do nothing if grace period already ended */
5432 if (nn->grace_ended)
5435 trace_nfsd_grace_complete(nn);
5436 nn->grace_ended = true;
5438 * If the server goes down again right now, an NFSv4
5439 * client will still be allowed to reclaim after it comes back up,
5440 * even if it hasn't yet had a chance to reclaim state this time.
5443 nfsd4_record_grace_done(nn);
5445 * At this point, NFSv4 clients can still reclaim. But if the
5446 * server crashes, any that have not yet reclaimed will be out
5447 * of luck on the next boot.
5449 * (NFSv4.1+ clients are considered to have reclaimed once they
5450 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5451 * have reclaimed after their first OPEN.)
5453 locks_end_grace(&nn->nfsd4_manager);
5455 * At this point, and once lockd and/or any other containers
5456 * exit their grace period, further reclaims will fail and
5457 * regular locking can resume.
5462 * If we've waited a lease period but there are still clients trying to
5463 * reclaim, wait a little longer to give them a chance to finish.
5465 static bool clients_still_reclaiming(struct nfsd_net *nn)
5467 time64_t double_grace_period_end = nn->boot_time +
5468 2 * nn->nfsd4_lease;
5470 if (nn->track_reclaim_completes &&
5471 atomic_read(&nn->nr_reclaim_complete) ==
5472 nn->reclaim_str_hashtbl_size)
5474 if (!nn->somebody_reclaimed)
5476 nn->somebody_reclaimed = false;
5478 * If we've given them *two* lease times to reclaim, and they're
5479 * still not done, give up:
5481 if (ktime_get_boottime_seconds() > double_grace_period_end)
5486 struct laundry_time {
5491 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5493 time64_t time_remaining;
5495 if (last_refresh < lt->cutoff)
5497 time_remaining = last_refresh - lt->cutoff;
5498 lt->new_timeo = min(lt->new_timeo, time_remaining);
5502 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5503 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5505 spin_lock_init(&nn->nfsd_ssc_lock);
5506 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5507 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5509 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5512 * This is called when nfsd is being shutdown, after all inter_ssc
5513 * cleanup were done, to destroy the ssc delayed unmount list.
5515 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5517 struct nfsd4_ssc_umount_item *ni = NULL;
5518 struct nfsd4_ssc_umount_item *tmp;
5520 spin_lock(&nn->nfsd_ssc_lock);
5521 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5522 list_del(&ni->nsui_list);
5523 spin_unlock(&nn->nfsd_ssc_lock);
5524 mntput(ni->nsui_vfsmount);
5526 spin_lock(&nn->nfsd_ssc_lock);
5528 spin_unlock(&nn->nfsd_ssc_lock);
5531 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5533 bool do_wakeup = false;
5534 struct nfsd4_ssc_umount_item *ni = 0;
5535 struct nfsd4_ssc_umount_item *tmp;
5537 spin_lock(&nn->nfsd_ssc_lock);
5538 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5539 if (time_after(jiffies, ni->nsui_expire)) {
5540 if (refcount_read(&ni->nsui_refcnt) > 1)
5543 /* mark being unmount */
5544 ni->nsui_busy = true;
5545 spin_unlock(&nn->nfsd_ssc_lock);
5546 mntput(ni->nsui_vfsmount);
5547 spin_lock(&nn->nfsd_ssc_lock);
5549 /* waiters need to start from begin of list */
5550 list_del(&ni->nsui_list);
5553 /* wakeup ssc_connect waiters */
5560 wake_up_all(&nn->nfsd_ssc_waitq);
5561 spin_unlock(&nn->nfsd_ssc_lock);
5566 nfs4_laundromat(struct nfsd_net *nn)
5568 struct nfs4_client *clp;
5569 struct nfs4_openowner *oo;
5570 struct nfs4_delegation *dp;
5571 struct nfs4_ol_stateid *stp;
5572 struct nfsd4_blocked_lock *nbl;
5573 struct list_head *pos, *next, reaplist;
5574 struct laundry_time lt = {
5575 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
5576 .new_timeo = nn->nfsd4_lease
5578 struct nfs4_cpntf_state *cps;
5579 copy_stateid_t *cps_t;
5582 if (clients_still_reclaiming(nn)) {
5586 nfsd4_end_grace(nn);
5587 INIT_LIST_HEAD(&reaplist);
5589 spin_lock(&nn->s2s_cp_lock);
5590 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5591 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5592 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5593 state_expired(<, cps->cpntf_time))
5594 _free_cpntf_state_locked(nn, cps);
5596 spin_unlock(&nn->s2s_cp_lock);
5598 spin_lock(&nn->client_lock);
5599 list_for_each_safe(pos, next, &nn->client_lru) {
5600 clp = list_entry(pos, struct nfs4_client, cl_lru);
5601 if (!state_expired(<, clp->cl_time))
5603 if (mark_client_expired_locked(clp))
5605 list_add(&clp->cl_lru, &reaplist);
5607 spin_unlock(&nn->client_lock);
5608 list_for_each_safe(pos, next, &reaplist) {
5609 clp = list_entry(pos, struct nfs4_client, cl_lru);
5610 trace_nfsd_clid_purged(&clp->cl_clientid);
5611 list_del_init(&clp->cl_lru);
5614 spin_lock(&state_lock);
5615 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5616 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5617 if (!state_expired(<, dp->dl_time))
5619 WARN_ON(!unhash_delegation_locked(dp));
5620 list_add(&dp->dl_recall_lru, &reaplist);
5622 spin_unlock(&state_lock);
5623 while (!list_empty(&reaplist)) {
5624 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5626 list_del_init(&dp->dl_recall_lru);
5627 revoke_delegation(dp);
5630 spin_lock(&nn->client_lock);
5631 while (!list_empty(&nn->close_lru)) {
5632 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5634 if (!state_expired(<, oo->oo_time))
5636 list_del_init(&oo->oo_close_lru);
5637 stp = oo->oo_last_closed_stid;
5638 oo->oo_last_closed_stid = NULL;
5639 spin_unlock(&nn->client_lock);
5640 nfs4_put_stid(&stp->st_stid);
5641 spin_lock(&nn->client_lock);
5643 spin_unlock(&nn->client_lock);
5646 * It's possible for a client to try and acquire an already held lock
5647 * that is being held for a long time, and then lose interest in it.
5648 * So, we clean out any un-revisited request after a lease period
5649 * under the assumption that the client is no longer interested.
5651 * RFC5661, sec. 9.6 states that the client must not rely on getting
5652 * notifications and must continue to poll for locks, even when the
5653 * server supports them. Thus this shouldn't lead to clients blocking
5654 * indefinitely once the lock does become free.
5656 BUG_ON(!list_empty(&reaplist));
5657 spin_lock(&nn->blocked_locks_lock);
5658 while (!list_empty(&nn->blocked_locks_lru)) {
5659 nbl = list_first_entry(&nn->blocked_locks_lru,
5660 struct nfsd4_blocked_lock, nbl_lru);
5661 if (!state_expired(<, nbl->nbl_time))
5663 list_move(&nbl->nbl_lru, &reaplist);
5664 list_del_init(&nbl->nbl_list);
5666 spin_unlock(&nn->blocked_locks_lock);
5668 while (!list_empty(&reaplist)) {
5669 nbl = list_first_entry(&reaplist,
5670 struct nfsd4_blocked_lock, nbl_lru);
5671 list_del_init(&nbl->nbl_lru);
5672 free_blocked_lock(nbl);
5674 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5675 /* service the server-to-server copy delayed unmount list */
5676 nfsd4_ssc_expire_umount(nn);
5679 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5682 static struct workqueue_struct *laundry_wq;
5683 static void laundromat_main(struct work_struct *);
5686 laundromat_main(struct work_struct *laundry)
5689 struct delayed_work *dwork = to_delayed_work(laundry);
5690 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5693 t = nfs4_laundromat(nn);
5694 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5697 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5699 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5700 return nfserr_bad_stateid;
5705 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5707 __be32 status = nfserr_openmode;
5709 /* For lock stateid's, we test the parent open, not the lock: */
5710 if (stp->st_openstp)
5711 stp = stp->st_openstp;
5712 if ((flags & WR_STATE) && !access_permit_write(stp))
5714 if ((flags & RD_STATE) && !access_permit_read(stp))
5721 static inline __be32
5722 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5724 if (ONE_STATEID(stateid) && (flags & RD_STATE))
5726 else if (opens_in_grace(net)) {
5727 /* Answer in remaining cases depends on existence of
5728 * conflicting state; so we must wait out the grace period. */
5729 return nfserr_grace;
5730 } else if (flags & WR_STATE)
5731 return nfs4_share_conflict(current_fh,
5732 NFS4_SHARE_DENY_WRITE);
5733 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5734 return nfs4_share_conflict(current_fh,
5735 NFS4_SHARE_DENY_READ);
5738 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5741 * When sessions are used the stateid generation number is ignored
5744 if (has_session && in->si_generation == 0)
5747 if (in->si_generation == ref->si_generation)
5750 /* If the client sends us a stateid from the future, it's buggy: */
5751 if (nfsd4_stateid_generation_after(in, ref))
5752 return nfserr_bad_stateid;
5754 * However, we could see a stateid from the past, even from a
5755 * non-buggy client. For example, if the client sends a lock
5756 * while some IO is outstanding, the lock may bump si_generation
5757 * while the IO is still in flight. The client could avoid that
5758 * situation by waiting for responses on all the IO requests,
5759 * but better performance may result in retrying IO that
5760 * receives an old_stateid error if requests are rarely
5761 * reordered in flight:
5763 return nfserr_old_stateid;
5766 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5770 spin_lock(&s->sc_lock);
5771 ret = nfsd4_verify_open_stid(s);
5773 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5774 spin_unlock(&s->sc_lock);
5778 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5780 if (ols->st_stateowner->so_is_open_owner &&
5781 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5782 return nfserr_bad_stateid;
5786 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5788 struct nfs4_stid *s;
5789 __be32 status = nfserr_bad_stateid;
5791 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5792 CLOSE_STATEID(stateid))
5794 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5796 spin_lock(&cl->cl_lock);
5797 s = find_stateid_locked(cl, stateid);
5800 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5803 switch (s->sc_type) {
5804 case NFS4_DELEG_STID:
5807 case NFS4_REVOKED_DELEG_STID:
5808 status = nfserr_deleg_revoked;
5810 case NFS4_OPEN_STID:
5811 case NFS4_LOCK_STID:
5812 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5815 printk("unknown stateid type %x\n", s->sc_type);
5817 case NFS4_CLOSED_STID:
5818 case NFS4_CLOSED_DELEG_STID:
5819 status = nfserr_bad_stateid;
5822 spin_unlock(&cl->cl_lock);
5827 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5828 stateid_t *stateid, unsigned char typemask,
5829 struct nfs4_stid **s, struct nfsd_net *nn)
5832 bool return_revoked = false;
5835 * only return revoked delegations if explicitly asked.
5836 * otherwise we report revoked or bad_stateid status.
5838 if (typemask & NFS4_REVOKED_DELEG_STID)
5839 return_revoked = true;
5840 else if (typemask & NFS4_DELEG_STID)
5841 typemask |= NFS4_REVOKED_DELEG_STID;
5843 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5844 CLOSE_STATEID(stateid))
5845 return nfserr_bad_stateid;
5846 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
5847 if (status == nfserr_stale_clientid) {
5848 if (cstate->session)
5849 return nfserr_bad_stateid;
5850 return nfserr_stale_stateid;
5854 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5856 return nfserr_bad_stateid;
5857 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5859 if (cstate->minorversion)
5860 return nfserr_deleg_revoked;
5861 return nfserr_bad_stateid;
5866 static struct nfsd_file *
5867 nfs4_find_file(struct nfs4_stid *s, int flags)
5872 switch (s->sc_type) {
5873 case NFS4_DELEG_STID:
5874 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5876 return nfsd_file_get(s->sc_file->fi_deleg_file);
5877 case NFS4_OPEN_STID:
5878 case NFS4_LOCK_STID:
5879 if (flags & RD_STATE)
5880 return find_readable_file(s->sc_file);
5882 return find_writeable_file(s->sc_file);
5889 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5893 status = nfsd4_check_openowner_confirmed(ols);
5896 return nfs4_check_openmode(ols, flags);
5900 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5901 struct nfsd_file **nfp, int flags)
5903 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5904 struct nfsd_file *nf;
5907 nf = nfs4_find_file(s, flags);
5909 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5910 acc | NFSD_MAY_OWNER_OVERRIDE);
5916 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5925 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5927 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5928 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5930 list_del(&cps->cp_list);
5931 idr_remove(&nn->s2s_cp_stateids,
5932 cps->cp_stateid.stid.si_opaque.so_id);
5936 * A READ from an inter server to server COPY will have a
5937 * copy stateid. Look up the copy notify stateid from the
5938 * idr structure and take a reference on it.
5940 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5941 struct nfs4_client *clp,
5942 struct nfs4_cpntf_state **cps)
5944 copy_stateid_t *cps_t;
5945 struct nfs4_cpntf_state *state = NULL;
5947 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5948 return nfserr_bad_stateid;
5949 spin_lock(&nn->s2s_cp_lock);
5950 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5952 state = container_of(cps_t, struct nfs4_cpntf_state,
5954 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5959 refcount_inc(&state->cp_stateid.sc_count);
5961 _free_cpntf_state_locked(nn, state);
5964 spin_unlock(&nn->s2s_cp_lock);
5966 return nfserr_bad_stateid;
5972 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5973 struct nfs4_stid **stid)
5976 struct nfs4_cpntf_state *cps = NULL;
5977 struct nfs4_client *found;
5979 status = manage_cpntf_state(nn, st, NULL, &cps);
5983 cps->cpntf_time = ktime_get_boottime_seconds();
5985 status = nfserr_expired;
5986 found = lookup_clientid(&cps->cp_p_clid, true, nn);
5990 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
5991 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
5995 status = nfserr_bad_stateid;
5997 put_client_renew(found);
5999 nfs4_put_cpntf_state(nn, cps);
6003 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6005 spin_lock(&nn->s2s_cp_lock);
6006 _free_cpntf_state_locked(nn, cps);
6007 spin_unlock(&nn->s2s_cp_lock);
6011 * Checks for stateid operations
6014 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6015 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6016 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6017 struct nfs4_stid **cstid)
6019 struct net *net = SVC_NET(rqstp);
6020 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6021 struct nfs4_stid *s = NULL;
6027 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6028 status = check_special_stateids(net, fhp, stateid, flags);
6032 status = nfsd4_lookup_stateid(cstate, stateid,
6033 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6035 if (status == nfserr_bad_stateid)
6036 status = find_cpntf_state(nn, stateid, &s);
6039 status = nfsd4_stid_check_stateid_generation(stateid, s,
6040 nfsd4_has_session(cstate));
6044 switch (s->sc_type) {
6045 case NFS4_DELEG_STID:
6046 status = nfs4_check_delegmode(delegstateid(s), flags);
6048 case NFS4_OPEN_STID:
6049 case NFS4_LOCK_STID:
6050 status = nfs4_check_olstateid(openlockstateid(s), flags);
6053 status = nfserr_bad_stateid;
6058 status = nfs4_check_fh(fhp, s);
6061 if (status == nfs_ok && nfp)
6062 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6065 if (!status && cstid)
6074 * Test if the stateid is valid
6077 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6078 union nfsd4_op_u *u)
6080 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6081 struct nfsd4_test_stateid_id *stateid;
6082 struct nfs4_client *cl = cstate->clp;
6084 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6085 stateid->ts_id_status =
6086 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6092 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6094 struct nfs4_ol_stateid *stp = openlockstateid(s);
6097 ret = nfsd4_lock_ol_stateid(stp);
6101 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6105 ret = nfserr_locks_held;
6106 if (check_for_locks(stp->st_stid.sc_file,
6107 lockowner(stp->st_stateowner)))
6110 release_lock_stateid(stp);
6114 mutex_unlock(&stp->st_mutex);
6121 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6122 union nfsd4_op_u *u)
6124 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6125 stateid_t *stateid = &free_stateid->fr_stateid;
6126 struct nfs4_stid *s;
6127 struct nfs4_delegation *dp;
6128 struct nfs4_client *cl = cstate->clp;
6129 __be32 ret = nfserr_bad_stateid;
6131 spin_lock(&cl->cl_lock);
6132 s = find_stateid_locked(cl, stateid);
6135 spin_lock(&s->sc_lock);
6136 switch (s->sc_type) {
6137 case NFS4_DELEG_STID:
6138 ret = nfserr_locks_held;
6140 case NFS4_OPEN_STID:
6141 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6144 ret = nfserr_locks_held;
6146 case NFS4_LOCK_STID:
6147 spin_unlock(&s->sc_lock);
6148 refcount_inc(&s->sc_count);
6149 spin_unlock(&cl->cl_lock);
6150 ret = nfsd4_free_lock_stateid(stateid, s);
6152 case NFS4_REVOKED_DELEG_STID:
6153 spin_unlock(&s->sc_lock);
6154 dp = delegstateid(s);
6155 list_del_init(&dp->dl_recall_lru);
6156 spin_unlock(&cl->cl_lock);
6160 /* Default falls through and returns nfserr_bad_stateid */
6162 spin_unlock(&s->sc_lock);
6164 spin_unlock(&cl->cl_lock);
6172 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6173 RD_STATE : WR_STATE;
6176 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6178 struct svc_fh *current_fh = &cstate->current_fh;
6179 struct nfs4_stateowner *sop = stp->st_stateowner;
6182 status = nfsd4_check_seqid(cstate, sop, seqid);
6185 status = nfsd4_lock_ol_stateid(stp);
6186 if (status != nfs_ok)
6188 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6189 if (status == nfs_ok)
6190 status = nfs4_check_fh(current_fh, &stp->st_stid);
6191 if (status != nfs_ok)
6192 mutex_unlock(&stp->st_mutex);
6197 * Checks for sequence id mutating operations.
6200 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6201 stateid_t *stateid, char typemask,
6202 struct nfs4_ol_stateid **stpp,
6203 struct nfsd_net *nn)
6206 struct nfs4_stid *s;
6207 struct nfs4_ol_stateid *stp = NULL;
6209 trace_nfsd_preprocess(seqid, stateid);
6212 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6215 stp = openlockstateid(s);
6216 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6218 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6222 nfs4_put_stid(&stp->st_stid);
6226 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6227 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6230 struct nfs4_openowner *oo;
6231 struct nfs4_ol_stateid *stp;
6233 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6234 NFS4_OPEN_STID, &stp, nn);
6237 oo = openowner(stp->st_stateowner);
6238 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6239 mutex_unlock(&stp->st_mutex);
6240 nfs4_put_stid(&stp->st_stid);
6241 return nfserr_bad_stateid;
6248 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6249 union nfsd4_op_u *u)
6251 struct nfsd4_open_confirm *oc = &u->open_confirm;
6253 struct nfs4_openowner *oo;
6254 struct nfs4_ol_stateid *stp;
6255 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6257 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6258 cstate->current_fh.fh_dentry);
6260 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6264 status = nfs4_preprocess_seqid_op(cstate,
6265 oc->oc_seqid, &oc->oc_req_stateid,
6266 NFS4_OPEN_STID, &stp, nn);
6269 oo = openowner(stp->st_stateowner);
6270 status = nfserr_bad_stateid;
6271 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6272 mutex_unlock(&stp->st_mutex);
6275 oo->oo_flags |= NFS4_OO_CONFIRMED;
6276 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6277 mutex_unlock(&stp->st_mutex);
6278 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6279 nfsd4_client_record_create(oo->oo_owner.so_client);
6282 nfs4_put_stid(&stp->st_stid);
6284 nfsd4_bump_seqid(cstate, status);
6288 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6290 if (!test_access(access, stp))
6292 nfs4_file_put_access(stp->st_stid.sc_file, access);
6293 clear_access(access, stp);
6296 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6298 switch (to_access) {
6299 case NFS4_SHARE_ACCESS_READ:
6300 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6301 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6303 case NFS4_SHARE_ACCESS_WRITE:
6304 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6305 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6307 case NFS4_SHARE_ACCESS_BOTH:
6315 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6316 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6318 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6320 struct nfs4_ol_stateid *stp;
6321 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6323 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6324 cstate->current_fh.fh_dentry);
6326 /* We don't yet support WANT bits: */
6327 if (od->od_deleg_want)
6328 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6331 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6332 &od->od_stateid, &stp, nn);
6335 status = nfserr_inval;
6336 if (!test_access(od->od_share_access, stp)) {
6337 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6338 stp->st_access_bmap, od->od_share_access);
6341 if (!test_deny(od->od_share_deny, stp)) {
6342 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6343 stp->st_deny_bmap, od->od_share_deny);
6346 nfs4_stateid_downgrade(stp, od->od_share_access);
6347 reset_union_bmap_deny(od->od_share_deny, stp);
6348 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6351 mutex_unlock(&stp->st_mutex);
6352 nfs4_put_stid(&stp->st_stid);
6354 nfsd4_bump_seqid(cstate, status);
6358 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6360 struct nfs4_client *clp = s->st_stid.sc_client;
6362 LIST_HEAD(reaplist);
6364 spin_lock(&clp->cl_lock);
6365 unhashed = unhash_open_stateid(s, &reaplist);
6367 if (clp->cl_minorversion) {
6369 put_ol_stateid_locked(s, &reaplist);
6370 spin_unlock(&clp->cl_lock);
6371 free_ol_stateid_reaplist(&reaplist);
6373 spin_unlock(&clp->cl_lock);
6374 free_ol_stateid_reaplist(&reaplist);
6376 move_to_close_lru(s, clp->net);
6381 * nfs4_unlock_state() called after encode
6384 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6385 union nfsd4_op_u *u)
6387 struct nfsd4_close *close = &u->close;
6389 struct nfs4_ol_stateid *stp;
6390 struct net *net = SVC_NET(rqstp);
6391 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6393 dprintk("NFSD: nfsd4_close on file %pd\n",
6394 cstate->current_fh.fh_dentry);
6396 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6398 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6400 nfsd4_bump_seqid(cstate, status);
6404 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6407 * Technically we don't _really_ have to increment or copy it, since
6408 * it should just be gone after this operation and we clobber the
6409 * copied value below, but we continue to do so here just to ensure
6410 * that racing ops see that there was a state change.
6412 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6414 nfsd4_close_open_stateid(stp);
6415 mutex_unlock(&stp->st_mutex);
6417 /* v4.1+ suggests that we send a special stateid in here, since the
6418 * clients should just ignore this anyway. Since this is not useful
6419 * for v4.0 clients either, we set it to the special close_stateid
6422 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6424 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6426 /* put reference from nfs4_preprocess_seqid_op */
6427 nfs4_put_stid(&stp->st_stid);
6433 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6434 union nfsd4_op_u *u)
6436 struct nfsd4_delegreturn *dr = &u->delegreturn;
6437 struct nfs4_delegation *dp;
6438 stateid_t *stateid = &dr->dr_stateid;
6439 struct nfs4_stid *s;
6441 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6443 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6446 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6449 dp = delegstateid(s);
6450 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6454 destroy_delegation(dp);
6456 nfs4_put_stid(&dp->dl_stid);
6461 /* last octet in a range */
6463 last_byte_offset(u64 start, u64 len)
6469 return end > start ? end - 1: NFS4_MAX_UINT64;
6473 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6474 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6475 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6476 * locking, this prevents us from being completely protocol-compliant. The
6477 * real solution to this problem is to start using unsigned file offsets in
6478 * the VFS, but this is a very deep change!
6481 nfs4_transform_lock_offset(struct file_lock *lock)
6483 if (lock->fl_start < 0)
6484 lock->fl_start = OFFSET_MAX;
6485 if (lock->fl_end < 0)
6486 lock->fl_end = OFFSET_MAX;
6490 nfsd4_fl_get_owner(fl_owner_t owner)
6492 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6494 nfs4_get_stateowner(&lo->lo_owner);
6499 nfsd4_fl_put_owner(fl_owner_t owner)
6501 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6504 nfs4_put_stateowner(&lo->lo_owner);
6508 nfsd4_lm_notify(struct file_lock *fl)
6510 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6511 struct net *net = lo->lo_owner.so_client->net;
6512 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6513 struct nfsd4_blocked_lock *nbl = container_of(fl,
6514 struct nfsd4_blocked_lock, nbl_lock);
6517 /* An empty list means that something else is going to be using it */
6518 spin_lock(&nn->blocked_locks_lock);
6519 if (!list_empty(&nbl->nbl_list)) {
6520 list_del_init(&nbl->nbl_list);
6521 list_del_init(&nbl->nbl_lru);
6524 spin_unlock(&nn->blocked_locks_lock);
6527 trace_nfsd_cb_notify_lock(lo, nbl);
6528 nfsd4_run_cb(&nbl->nbl_cb);
6532 static const struct lock_manager_operations nfsd_posix_mng_ops = {
6533 .lm_notify = nfsd4_lm_notify,
6534 .lm_get_owner = nfsd4_fl_get_owner,
6535 .lm_put_owner = nfsd4_fl_put_owner,
6539 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6541 struct nfs4_lockowner *lo;
6543 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6544 lo = (struct nfs4_lockowner *) fl->fl_owner;
6545 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6547 if (!deny->ld_owner.data)
6548 /* We just don't care that much */
6550 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6553 deny->ld_owner.len = 0;
6554 deny->ld_owner.data = NULL;
6555 deny->ld_clientid.cl_boot = 0;
6556 deny->ld_clientid.cl_id = 0;
6558 deny->ld_start = fl->fl_start;
6559 deny->ld_length = NFS4_MAX_UINT64;
6560 if (fl->fl_end != NFS4_MAX_UINT64)
6561 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6562 deny->ld_type = NFS4_READ_LT;
6563 if (fl->fl_type != F_RDLCK)
6564 deny->ld_type = NFS4_WRITE_LT;
6567 static struct nfs4_lockowner *
6568 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6570 unsigned int strhashval = ownerstr_hashval(owner);
6571 struct nfs4_stateowner *so;
6573 lockdep_assert_held(&clp->cl_lock);
6575 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6577 if (so->so_is_open_owner)
6579 if (same_owner_str(so, owner))
6580 return lockowner(nfs4_get_stateowner(so));
6585 static struct nfs4_lockowner *
6586 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6588 struct nfs4_lockowner *lo;
6590 spin_lock(&clp->cl_lock);
6591 lo = find_lockowner_str_locked(clp, owner);
6592 spin_unlock(&clp->cl_lock);
6596 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6598 unhash_lockowner_locked(lockowner(sop));
6601 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6603 struct nfs4_lockowner *lo = lockowner(sop);
6605 kmem_cache_free(lockowner_slab, lo);
6608 static const struct nfs4_stateowner_operations lockowner_ops = {
6609 .so_unhash = nfs4_unhash_lockowner,
6610 .so_free = nfs4_free_lockowner,
6614 * Alloc a lock owner structure.
6615 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6618 * strhashval = ownerstr_hashval
6620 static struct nfs4_lockowner *
6621 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6622 struct nfs4_ol_stateid *open_stp,
6623 struct nfsd4_lock *lock)
6625 struct nfs4_lockowner *lo, *ret;
6627 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6630 INIT_LIST_HEAD(&lo->lo_blocked);
6631 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6632 lo->lo_owner.so_is_open_owner = 0;
6633 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6634 lo->lo_owner.so_ops = &lockowner_ops;
6635 spin_lock(&clp->cl_lock);
6636 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6638 list_add(&lo->lo_owner.so_strhash,
6639 &clp->cl_ownerstr_hashtbl[strhashval]);
6642 nfs4_free_stateowner(&lo->lo_owner);
6644 spin_unlock(&clp->cl_lock);
6648 static struct nfs4_ol_stateid *
6649 find_lock_stateid(const struct nfs4_lockowner *lo,
6650 const struct nfs4_ol_stateid *ost)
6652 struct nfs4_ol_stateid *lst;
6654 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6656 /* If ost is not hashed, ost->st_locks will not be valid */
6657 if (!nfs4_ol_stateid_unhashed(ost))
6658 list_for_each_entry(lst, &ost->st_locks, st_locks) {
6659 if (lst->st_stateowner == &lo->lo_owner) {
6660 refcount_inc(&lst->st_stid.sc_count);
6667 static struct nfs4_ol_stateid *
6668 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6669 struct nfs4_file *fp, struct inode *inode,
6670 struct nfs4_ol_stateid *open_stp)
6672 struct nfs4_client *clp = lo->lo_owner.so_client;
6673 struct nfs4_ol_stateid *retstp;
6675 mutex_init(&stp->st_mutex);
6676 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6678 spin_lock(&clp->cl_lock);
6679 if (nfs4_ol_stateid_unhashed(open_stp))
6681 retstp = find_lock_stateid(lo, open_stp);
6684 refcount_inc(&stp->st_stid.sc_count);
6685 stp->st_stid.sc_type = NFS4_LOCK_STID;
6686 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6688 stp->st_stid.sc_file = fp;
6689 stp->st_access_bmap = 0;
6690 stp->st_deny_bmap = open_stp->st_deny_bmap;
6691 stp->st_openstp = open_stp;
6692 spin_lock(&fp->fi_lock);
6693 list_add(&stp->st_locks, &open_stp->st_locks);
6694 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6695 list_add(&stp->st_perfile, &fp->fi_stateids);
6696 spin_unlock(&fp->fi_lock);
6697 spin_unlock(&clp->cl_lock);
6700 spin_unlock(&clp->cl_lock);
6701 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6702 nfs4_put_stid(&retstp->st_stid);
6705 /* To keep mutex tracking happy */
6706 mutex_unlock(&stp->st_mutex);
6709 spin_unlock(&clp->cl_lock);
6710 mutex_unlock(&stp->st_mutex);
6714 static struct nfs4_ol_stateid *
6715 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6716 struct inode *inode, struct nfs4_ol_stateid *ost,
6719 struct nfs4_stid *ns = NULL;
6720 struct nfs4_ol_stateid *lst;
6721 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6722 struct nfs4_client *clp = oo->oo_owner.so_client;
6725 spin_lock(&clp->cl_lock);
6726 lst = find_lock_stateid(lo, ost);
6727 spin_unlock(&clp->cl_lock);
6729 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6731 nfs4_put_stid(&lst->st_stid);
6733 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6737 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6738 if (lst == openlockstateid(ns))
6747 check_lock_length(u64 offset, u64 length)
6749 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6750 (length > ~offset)));
6753 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6755 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6757 lockdep_assert_held(&fp->fi_lock);
6759 if (test_access(access, lock_stp))
6761 __nfs4_file_get_access(fp, access);
6762 set_access(access, lock_stp);
6766 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6767 struct nfs4_ol_stateid *ost,
6768 struct nfsd4_lock *lock,
6769 struct nfs4_ol_stateid **plst, bool *new)
6772 struct nfs4_file *fi = ost->st_stid.sc_file;
6773 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6774 struct nfs4_client *cl = oo->oo_owner.so_client;
6775 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6776 struct nfs4_lockowner *lo;
6777 struct nfs4_ol_stateid *lst;
6778 unsigned int strhashval;
6780 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6782 strhashval = ownerstr_hashval(&lock->lk_new_owner);
6783 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6785 return nfserr_jukebox;
6787 /* with an existing lockowner, seqids must be the same */
6788 status = nfserr_bad_seqid;
6789 if (!cstate->minorversion &&
6790 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6794 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6796 status = nfserr_jukebox;
6803 nfs4_put_stateowner(&lo->lo_owner);
6811 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6812 union nfsd4_op_u *u)
6814 struct nfsd4_lock *lock = &u->lock;
6815 struct nfs4_openowner *open_sop = NULL;
6816 struct nfs4_lockowner *lock_sop = NULL;
6817 struct nfs4_ol_stateid *lock_stp = NULL;
6818 struct nfs4_ol_stateid *open_stp = NULL;
6819 struct nfs4_file *fp;
6820 struct nfsd_file *nf = NULL;
6821 struct nfsd4_blocked_lock *nbl = NULL;
6822 struct file_lock *file_lock = NULL;
6823 struct file_lock *conflock = NULL;
6824 struct super_block *sb;
6829 unsigned char fl_type;
6830 unsigned int fl_flags = FL_POSIX;
6831 struct net *net = SVC_NET(rqstp);
6832 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6834 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6835 (long long) lock->lk_offset,
6836 (long long) lock->lk_length);
6838 if (check_lock_length(lock->lk_offset, lock->lk_length))
6839 return nfserr_inval;
6841 if ((status = fh_verify(rqstp, &cstate->current_fh,
6842 S_IFREG, NFSD_MAY_LOCK))) {
6843 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6846 sb = cstate->current_fh.fh_dentry->d_sb;
6848 if (lock->lk_is_new) {
6849 if (nfsd4_has_session(cstate))
6850 /* See rfc 5661 18.10.3: given clientid is ignored: */
6851 memcpy(&lock->lk_new_clientid,
6852 &cstate->clp->cl_clientid,
6853 sizeof(clientid_t));
6855 /* validate and update open stateid and open seqid */
6856 status = nfs4_preprocess_confirmed_seqid_op(cstate,
6857 lock->lk_new_open_seqid,
6858 &lock->lk_new_open_stateid,
6862 mutex_unlock(&open_stp->st_mutex);
6863 open_sop = openowner(open_stp->st_stateowner);
6864 status = nfserr_bad_stateid;
6865 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6866 &lock->lk_new_clientid))
6868 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6871 status = nfs4_preprocess_seqid_op(cstate,
6872 lock->lk_old_lock_seqid,
6873 &lock->lk_old_lock_stateid,
6874 NFS4_LOCK_STID, &lock_stp, nn);
6878 lock_sop = lockowner(lock_stp->st_stateowner);
6880 lkflg = setlkflg(lock->lk_type);
6881 status = nfs4_check_openmode(lock_stp, lkflg);
6885 status = nfserr_grace;
6886 if (locks_in_grace(net) && !lock->lk_reclaim)
6888 status = nfserr_no_grace;
6889 if (!locks_in_grace(net) && lock->lk_reclaim)
6892 if (lock->lk_reclaim)
6893 fl_flags |= FL_RECLAIM;
6895 fp = lock_stp->st_stid.sc_file;
6896 switch (lock->lk_type) {
6898 if (nfsd4_has_session(cstate) &&
6899 !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6900 fl_flags |= FL_SLEEP;
6903 spin_lock(&fp->fi_lock);
6904 nf = find_readable_file_locked(fp);
6906 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6907 spin_unlock(&fp->fi_lock);
6910 case NFS4_WRITEW_LT:
6911 if (nfsd4_has_session(cstate) &&
6912 !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6913 fl_flags |= FL_SLEEP;
6916 spin_lock(&fp->fi_lock);
6917 nf = find_writeable_file_locked(fp);
6919 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6920 spin_unlock(&fp->fi_lock);
6924 status = nfserr_inval;
6929 status = nfserr_openmode;
6933 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6935 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6936 status = nfserr_jukebox;
6940 file_lock = &nbl->nbl_lock;
6941 file_lock->fl_type = fl_type;
6942 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6943 file_lock->fl_pid = current->tgid;
6944 file_lock->fl_file = nf->nf_file;
6945 file_lock->fl_flags = fl_flags;
6946 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6947 file_lock->fl_start = lock->lk_offset;
6948 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6949 nfs4_transform_lock_offset(file_lock);
6951 conflock = locks_alloc_lock();
6953 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6954 status = nfserr_jukebox;
6958 if (fl_flags & FL_SLEEP) {
6959 nbl->nbl_time = ktime_get_boottime_seconds();
6960 spin_lock(&nn->blocked_locks_lock);
6961 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6962 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6963 spin_unlock(&nn->blocked_locks_lock);
6966 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6968 case 0: /* success! */
6969 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6971 if (lock->lk_reclaim)
6972 nn->somebody_reclaimed = true;
6974 case FILE_LOCK_DEFERRED:
6977 case -EAGAIN: /* conflock holds conflicting lock */
6978 status = nfserr_denied;
6979 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6980 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6983 status = nfserr_deadlock;
6986 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6987 status = nfserrno(err);
6992 /* dequeue it if we queued it before */
6993 if (fl_flags & FL_SLEEP) {
6994 spin_lock(&nn->blocked_locks_lock);
6995 list_del_init(&nbl->nbl_list);
6996 list_del_init(&nbl->nbl_lru);
6997 spin_unlock(&nn->blocked_locks_lock);
6999 free_blocked_lock(nbl);
7004 /* Bump seqid manually if the 4.0 replay owner is openowner */
7005 if (cstate->replay_owner &&
7006 cstate->replay_owner != &lock_sop->lo_owner &&
7007 seqid_mutating_err(ntohl(status)))
7008 lock_sop->lo_owner.so_seqid++;
7011 * If this is a new, never-before-used stateid, and we are
7012 * returning an error, then just go ahead and release it.
7015 release_lock_stateid(lock_stp);
7017 mutex_unlock(&lock_stp->st_mutex);
7019 nfs4_put_stid(&lock_stp->st_stid);
7022 nfs4_put_stid(&open_stp->st_stid);
7023 nfsd4_bump_seqid(cstate, status);
7025 locks_free_lock(conflock);
7030 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7031 * so we do a temporary open here just to get an open file to pass to
7034 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7036 struct nfsd_file *nf;
7039 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7042 fh_lock(fhp); /* to block new leases till after test_lock: */
7043 err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
7047 lock->fl_file = nf->nf_file;
7048 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7049 lock->fl_file = NULL;
7060 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7061 union nfsd4_op_u *u)
7063 struct nfsd4_lockt *lockt = &u->lockt;
7064 struct file_lock *file_lock = NULL;
7065 struct nfs4_lockowner *lo = NULL;
7067 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7069 if (locks_in_grace(SVC_NET(rqstp)))
7070 return nfserr_grace;
7072 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7073 return nfserr_inval;
7075 if (!nfsd4_has_session(cstate)) {
7076 status = set_client(&lockt->lt_clientid, cstate, nn);
7081 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7084 file_lock = locks_alloc_lock();
7086 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7087 status = nfserr_jukebox;
7091 switch (lockt->lt_type) {
7094 file_lock->fl_type = F_RDLCK;
7097 case NFS4_WRITEW_LT:
7098 file_lock->fl_type = F_WRLCK;
7101 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7102 status = nfserr_inval;
7106 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7108 file_lock->fl_owner = (fl_owner_t)lo;
7109 file_lock->fl_pid = current->tgid;
7110 file_lock->fl_flags = FL_POSIX;
7112 file_lock->fl_start = lockt->lt_offset;
7113 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7115 nfs4_transform_lock_offset(file_lock);
7117 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7121 if (file_lock->fl_type != F_UNLCK) {
7122 status = nfserr_denied;
7123 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7127 nfs4_put_stateowner(&lo->lo_owner);
7129 locks_free_lock(file_lock);
7134 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7135 union nfsd4_op_u *u)
7137 struct nfsd4_locku *locku = &u->locku;
7138 struct nfs4_ol_stateid *stp;
7139 struct nfsd_file *nf = NULL;
7140 struct file_lock *file_lock = NULL;
7143 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7145 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7146 (long long) locku->lu_offset,
7147 (long long) locku->lu_length);
7149 if (check_lock_length(locku->lu_offset, locku->lu_length))
7150 return nfserr_inval;
7152 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7153 &locku->lu_stateid, NFS4_LOCK_STID,
7157 nf = find_any_file(stp->st_stid.sc_file);
7159 status = nfserr_lock_range;
7162 file_lock = locks_alloc_lock();
7164 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7165 status = nfserr_jukebox;
7169 file_lock->fl_type = F_UNLCK;
7170 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7171 file_lock->fl_pid = current->tgid;
7172 file_lock->fl_file = nf->nf_file;
7173 file_lock->fl_flags = FL_POSIX;
7174 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7175 file_lock->fl_start = locku->lu_offset;
7177 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7179 nfs4_transform_lock_offset(file_lock);
7181 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7183 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7186 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7190 mutex_unlock(&stp->st_mutex);
7191 nfs4_put_stid(&stp->st_stid);
7193 nfsd4_bump_seqid(cstate, status);
7195 locks_free_lock(file_lock);
7199 status = nfserrno(err);
7205 * true: locks held by lockowner
7206 * false: no locks held by lockowner
7209 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7211 struct file_lock *fl;
7213 struct nfsd_file *nf = find_any_file(fp);
7214 struct inode *inode;
7215 struct file_lock_context *flctx;
7218 /* Any valid lock stateid should have some sort of access */
7223 inode = locks_inode(nf->nf_file);
7224 flctx = inode->i_flctx;
7226 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7227 spin_lock(&flctx->flc_lock);
7228 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7229 if (fl->fl_owner == (fl_owner_t)lowner) {
7234 spin_unlock(&flctx->flc_lock);
7241 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7242 struct nfsd4_compound_state *cstate,
7243 union nfsd4_op_u *u)
7245 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7246 clientid_t *clid = &rlockowner->rl_clientid;
7247 struct nfs4_stateowner *sop;
7248 struct nfs4_lockowner *lo = NULL;
7249 struct nfs4_ol_stateid *stp;
7250 struct xdr_netobj *owner = &rlockowner->rl_owner;
7251 unsigned int hashval = ownerstr_hashval(owner);
7253 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7254 struct nfs4_client *clp;
7255 LIST_HEAD (reaplist);
7257 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7258 clid->cl_boot, clid->cl_id);
7260 status = set_client(clid, cstate, nn);
7265 /* Find the matching lock stateowner */
7266 spin_lock(&clp->cl_lock);
7267 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7270 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7273 /* see if there are still any locks associated with it */
7274 lo = lockowner(sop);
7275 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
7276 if (check_for_locks(stp->st_stid.sc_file, lo)) {
7277 status = nfserr_locks_held;
7278 spin_unlock(&clp->cl_lock);
7283 nfs4_get_stateowner(sop);
7287 spin_unlock(&clp->cl_lock);
7291 unhash_lockowner_locked(lo);
7292 while (!list_empty(&lo->lo_owner.so_stateids)) {
7293 stp = list_first_entry(&lo->lo_owner.so_stateids,
7294 struct nfs4_ol_stateid,
7296 WARN_ON(!unhash_lock_stateid(stp));
7297 put_ol_stateid_locked(stp, &reaplist);
7299 spin_unlock(&clp->cl_lock);
7300 free_ol_stateid_reaplist(&reaplist);
7301 remove_blocked_locks(lo);
7302 nfs4_put_stateowner(&lo->lo_owner);
7307 static inline struct nfs4_client_reclaim *
7310 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7314 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7316 struct nfs4_client_reclaim *crp;
7318 crp = nfsd4_find_reclaim_client(name, nn);
7319 return (crp && crp->cr_clp);
7323 * failure => all reset bets are off, nfserr_no_grace...
7325 * The caller is responsible for freeing name.data if NULL is returned (it
7326 * will be freed in nfs4_remove_reclaim_record in the normal case).
7328 struct nfs4_client_reclaim *
7329 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7330 struct nfsd_net *nn)
7332 unsigned int strhashval;
7333 struct nfs4_client_reclaim *crp;
7335 crp = alloc_reclaim();
7337 strhashval = clientstr_hashval(name);
7338 INIT_LIST_HEAD(&crp->cr_strhash);
7339 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7340 crp->cr_name.data = name.data;
7341 crp->cr_name.len = name.len;
7342 crp->cr_princhash.data = princhash.data;
7343 crp->cr_princhash.len = princhash.len;
7345 nn->reclaim_str_hashtbl_size++;
7351 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7353 list_del(&crp->cr_strhash);
7354 kfree(crp->cr_name.data);
7355 kfree(crp->cr_princhash.data);
7357 nn->reclaim_str_hashtbl_size--;
7361 nfs4_release_reclaim(struct nfsd_net *nn)
7363 struct nfs4_client_reclaim *crp = NULL;
7366 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7367 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7368 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7369 struct nfs4_client_reclaim, cr_strhash);
7370 nfs4_remove_reclaim_record(crp, nn);
7373 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7377 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7378 struct nfs4_client_reclaim *
7379 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7381 unsigned int strhashval;
7382 struct nfs4_client_reclaim *crp = NULL;
7384 strhashval = clientstr_hashval(name);
7385 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7386 if (compare_blob(&crp->cr_name, &name) == 0) {
7394 nfs4_check_open_reclaim(struct nfs4_client *clp)
7396 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7397 return nfserr_no_grace;
7399 if (nfsd4_client_record_check(clp))
7400 return nfserr_reclaim_bad;
7406 * Since the lifetime of a delegation isn't limited to that of an open, a
7407 * client may quite reasonably hang on to a delegation as long as it has
7408 * the inode cached. This becomes an obvious problem the first time a
7409 * client's inode cache approaches the size of the server's total memory.
7411 * For now we avoid this problem by imposing a hard limit on the number
7412 * of delegations, which varies according to the server's memory size.
7415 set_max_delegations(void)
7418 * Allow at most 4 delegations per megabyte of RAM. Quick
7419 * estimates suggest that in the worst case (where every delegation
7420 * is for a different inode), a delegation could take about 1.5K,
7421 * giving a worst case usage of about 6% of memory.
7423 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7426 static int nfs4_state_create_net(struct net *net)
7428 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7431 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7432 sizeof(struct list_head),
7434 if (!nn->conf_id_hashtbl)
7436 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7437 sizeof(struct list_head),
7439 if (!nn->unconf_id_hashtbl)
7441 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7442 sizeof(struct list_head),
7444 if (!nn->sessionid_hashtbl)
7447 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7448 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7449 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7451 for (i = 0; i < SESSION_HASH_SIZE; i++)
7452 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7453 nn->conf_name_tree = RB_ROOT;
7454 nn->unconf_name_tree = RB_ROOT;
7455 nn->boot_time = ktime_get_real_seconds();
7456 nn->grace_ended = false;
7457 nn->nfsd4_manager.block_opens = true;
7458 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7459 INIT_LIST_HEAD(&nn->client_lru);
7460 INIT_LIST_HEAD(&nn->close_lru);
7461 INIT_LIST_HEAD(&nn->del_recall_lru);
7462 spin_lock_init(&nn->client_lock);
7463 spin_lock_init(&nn->s2s_cp_lock);
7464 idr_init(&nn->s2s_cp_stateids);
7466 spin_lock_init(&nn->blocked_locks_lock);
7467 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7469 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7475 kfree(nn->unconf_id_hashtbl);
7477 kfree(nn->conf_id_hashtbl);
7483 nfs4_state_destroy_net(struct net *net)
7486 struct nfs4_client *clp = NULL;
7487 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7489 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7490 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7491 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7492 destroy_client(clp);
7496 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7498 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7499 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7500 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7501 destroy_client(clp);
7505 kfree(nn->sessionid_hashtbl);
7506 kfree(nn->unconf_id_hashtbl);
7507 kfree(nn->conf_id_hashtbl);
7512 nfs4_state_start_net(struct net *net)
7514 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7517 ret = nfs4_state_create_net(net);
7520 locks_start_grace(net, &nn->nfsd4_manager);
7521 nfsd4_client_tracking_init(net);
7522 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7524 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7525 nn->nfsd4_grace, net->ns.inum);
7526 trace_nfsd_grace_start(nn);
7527 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7531 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7533 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7534 nfsd4_end_grace(nn);
7538 /* initialization to perform when the nfsd service is started: */
7541 nfs4_state_start(void)
7545 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7546 if (laundry_wq == NULL) {
7550 ret = nfsd4_create_callback_queue();
7552 goto out_free_laundry;
7554 set_max_delegations();
7558 destroy_workqueue(laundry_wq);
7564 nfs4_state_shutdown_net(struct net *net)
7566 struct nfs4_delegation *dp = NULL;
7567 struct list_head *pos, *next, reaplist;
7568 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7570 cancel_delayed_work_sync(&nn->laundromat_work);
7571 locks_end_grace(&nn->nfsd4_manager);
7573 INIT_LIST_HEAD(&reaplist);
7574 spin_lock(&state_lock);
7575 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7576 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7577 WARN_ON(!unhash_delegation_locked(dp));
7578 list_add(&dp->dl_recall_lru, &reaplist);
7580 spin_unlock(&state_lock);
7581 list_for_each_safe(pos, next, &reaplist) {
7582 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7583 list_del_init(&dp->dl_recall_lru);
7584 destroy_unhashed_deleg(dp);
7587 nfsd4_client_tracking_exit(net);
7588 nfs4_state_destroy_net(net);
7589 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
7590 nfsd4_ssc_shutdown_umount(nn);
7595 nfs4_state_shutdown(void)
7597 destroy_workqueue(laundry_wq);
7598 nfsd4_destroy_callback_queue();
7602 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7604 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7605 CURRENT_STATEID(stateid))
7606 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7610 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7612 if (cstate->minorversion) {
7613 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7614 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7619 clear_current_stateid(struct nfsd4_compound_state *cstate)
7621 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7625 * functions to set current state id
7628 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7629 union nfsd4_op_u *u)
7631 put_stateid(cstate, &u->open_downgrade.od_stateid);
7635 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7636 union nfsd4_op_u *u)
7638 put_stateid(cstate, &u->open.op_stateid);
7642 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7643 union nfsd4_op_u *u)
7645 put_stateid(cstate, &u->close.cl_stateid);
7649 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7650 union nfsd4_op_u *u)
7652 put_stateid(cstate, &u->lock.lk_resp_stateid);
7656 * functions to consume current state id
7660 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7661 union nfsd4_op_u *u)
7663 get_stateid(cstate, &u->open_downgrade.od_stateid);
7667 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7668 union nfsd4_op_u *u)
7670 get_stateid(cstate, &u->delegreturn.dr_stateid);
7674 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7675 union nfsd4_op_u *u)
7677 get_stateid(cstate, &u->free_stateid.fr_stateid);
7681 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7682 union nfsd4_op_u *u)
7684 get_stateid(cstate, &u->setattr.sa_stateid);
7688 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7689 union nfsd4_op_u *u)
7691 get_stateid(cstate, &u->close.cl_stateid);
7695 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7696 union nfsd4_op_u *u)
7698 get_stateid(cstate, &u->locku.lu_stateid);
7702 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7703 union nfsd4_op_u *u)
7705 get_stateid(cstate, &u->read.rd_stateid);
7709 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7710 union nfsd4_op_u *u)
7712 get_stateid(cstate, &u->write.wr_stateid);