Merge tag 'nfsd-4.10' of git://linux-nfs.org/~bfields/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Dec 2016 18:48:28 +0000 (10:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Dec 2016 18:48:28 +0000 (10:48 -0800)
Pull nfsd updates from Bruce Fields:
 "The one new feature is support for a new NFSv4.2 mode_umask attribute
  that makes ACL inheritance a little more useful in environments that
  default to restrictive umasks. Requires client-side support, also on
  its way for 4.10.

  Other than that, miscellaneous smaller fixes and cleanup, especially
  to the server rdma code"

[ The client side of the umask attribute was merged yesterday ]

* tag 'nfsd-4.10' of git://linux-nfs.org/~bfields/linux:
  nfsd: add support for the umask attribute
  sunrpc: use DEFINE_SPINLOCK()
  svcrdma: Further clean-up of svc_rdma_get_inv_rkey()
  svcrdma: Break up dprintk format in svc_rdma_accept()
  svcrdma: Remove unused variable in rdma_copy_tail()
  svcrdma: Remove unused variables in xprt_rdma_bc_allocate()
  svcrdma: Remove svc_rdma_op_ctxt::wc_status
  svcrdma: Remove DMA map accounting
  svcrdma: Remove BH-disabled spin locking in svc_rdma_send()
  svcrdma: Renovate sendto chunk list parsing
  svcauth_gss: Close connection when dropping an incoming message
  svcrdma: Clear xpt_bc_xps in xprt_setup_rdma_bc() error exit arm
  nfsd: constify reply_cache_stats_operations structure
  nfsd: update workqueue creation
  sunrpc: GFP_KERNEL should be GFP_NOFS in crypto code
  nfsd: catch errors in decode_fattr earlier
  nfsd: clean up supported attribute handling
  nfsd: fix error handling for clients that fail to return the layout
  nfsd: more robust allocation failure handling in nfsd_reply_cache_init

1  2 
fs/nfsd/nfsctl.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

diff --combined fs/nfsd/nfsctl.c
@@@ -217,7 -217,7 +217,7 @@@ static const struct file_operations poo
        .release        = nfsd_pool_stats_release,
  };
  
- static struct file_operations reply_cache_stats_operations = {
+ static const struct file_operations reply_cache_stats_operations = {
        .open           = nfsd_reply_cache_stats_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@@ -1201,7 -1201,7 +1201,7 @@@ static int create_proc_exports_entry(vo
  }
  #endif
  
 -int nfsd_net_id;
 +unsigned int nfsd_net_id;
  
  static __net_init int nfsd_init_net(struct net *net)
  {
@@@ -41,6 -41,7 +41,7 @@@
   */
  
  #include <linux/sunrpc/svc_xprt.h>
+ #include <linux/sunrpc/addr.h>
  #include <linux/sunrpc/debug.h>
  #include <linux/sunrpc/rpc_rdma.h>
  #include <linux/interrupt.h>
@@@ -67,7 -68,6 +68,7 @@@ static void svc_rdma_detach(struct svc_
  static void svc_rdma_free(struct svc_xprt *xprt);
  static int svc_rdma_has_wspace(struct svc_xprt *xprt);
  static int svc_rdma_secure_port(struct svc_rqst *);
 +static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
  
  static struct svc_xprt_ops svc_rdma_ops = {
        .xpo_create = svc_rdma_create,
@@@ -80,7 -80,6 +81,7 @@@
        .xpo_has_wspace = svc_rdma_has_wspace,
        .xpo_accept = svc_rdma_accept,
        .xpo_secure_port = svc_rdma_secure_port,
 +      .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
  };
  
  struct svc_xprt_class svc_rdma_class = {
@@@ -226,25 -225,22 +227,22 @@@ void svc_rdma_unmap_dma(struct svc_rdma
        struct svcxprt_rdma *xprt = ctxt->xprt;
        struct ib_device *device = xprt->sc_cm_id->device;
        u32 lkey = xprt->sc_pd->local_dma_lkey;
-       unsigned int i, count;
+       unsigned int i;
  
-       for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
+       for (i = 0; i < ctxt->mapped_sges; i++) {
                /*
                 * Unmap the DMA addr in the SGE if the lkey matches
                 * the local_dma_lkey, otherwise, ignore it since it is
                 * an FRMR lkey and will be unmapped later when the
                 * last WR that uses it completes.
                 */
-               if (ctxt->sge[i].lkey == lkey) {
-                       count++;
+               if (ctxt->sge[i].lkey == lkey)
                        ib_dma_unmap_page(device,
                                            ctxt->sge[i].addr,
                                            ctxt->sge[i].length,
                                            ctxt->direction);
-               }
        }
        ctxt->mapped_sges = 0;
-       atomic_sub(count, &xprt->sc_dma_used);
  }
  
  void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
@@@ -398,7 -394,6 +396,6 @@@ static void svc_rdma_wc_receive(struct 
  
        /* WARNING: Only wc->wr_cqe and wc->status are reliable */
        ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
-       ctxt->wc_status = wc->status;
        svc_rdma_unmap_dma(ctxt);
  
        if (wc->status != IB_WC_SUCCESS)
@@@ -436,7 -431,7 +433,7 @@@ static void svc_rdma_send_wc_common(str
                goto err;
  
  out:
-       atomic_dec(&xprt->sc_sq_count);
+       atomic_inc(&xprt->sc_sq_avail);
        wake_up(&xprt->sc_send_wait);
        return;
  
@@@ -946,7 -941,6 +943,6 @@@ void svc_rdma_put_frmr(struct svcxprt_r
        if (frmr) {
                ib_dma_unmap_sg(rdma->sc_cm_id->device,
                                frmr->sg, frmr->sg_nents, frmr->direction);
-               atomic_dec(&rdma->sc_dma_used);
                spin_lock_bh(&rdma->sc_frmr_q_lock);
                WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
                list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
@@@ -973,6 -967,7 +969,7 @@@ static struct svc_xprt *svc_rdma_accept
        struct rpcrdma_connect_private pmsg;
        struct ib_qp_init_attr qp_attr;
        struct ib_device *dev;
+       struct sockaddr *sap;
        unsigned int i;
        int ret = 0;
  
        newxprt->sc_rq_depth = newxprt->sc_max_requests +
                               newxprt->sc_max_bc_requests;
        newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth;
+       atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
  
        if (!svc_rdma_prealloc_ctxts(newxprt))
                goto errout;
        qp_attr.qp_type = IB_QPT_RC;
        qp_attr.send_cq = newxprt->sc_sq_cq;
        qp_attr.recv_cq = newxprt->sc_rq_cq;
-       dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
-               "    cm_id->device=%p, sc_pd->device=%p\n"
-               "    cap.max_send_wr = %d\n"
-               "    cap.max_recv_wr = %d\n"
-               "    cap.max_send_sge = %d\n"
-               "    cap.max_recv_sge = %d\n",
-               newxprt->sc_cm_id, newxprt->sc_pd,
-               dev, newxprt->sc_pd->device,
-               qp_attr.cap.max_send_wr,
-               qp_attr.cap.max_recv_wr,
-               qp_attr.cap.max_send_sge,
-               qp_attr.cap.max_recv_sge);
+       dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
+               newxprt->sc_cm_id, newxprt->sc_pd);
+       dprintk("    cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
+               qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
+       dprintk("    cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
+               qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
  
        ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
        if (ret) {
                goto errout;
        }
  
-       dprintk("svcrdma: new connection %p accepted with the following "
-               "attributes:\n"
-               "    local_ip        : %pI4\n"
-               "    local_port      : %d\n"
-               "    remote_ip       : %pI4\n"
-               "    remote_port     : %d\n"
-               "    max_sge         : %d\n"
-               "    max_sge_rd      : %d\n"
-               "    sq_depth        : %d\n"
-               "    max_requests    : %d\n"
-               "    ord             : %d\n",
-               newxprt,
-               &((struct sockaddr_in *)&newxprt->sc_cm_id->
-                        route.addr.src_addr)->sin_addr.s_addr,
-               ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
-                      route.addr.src_addr)->sin_port),
-               &((struct sockaddr_in *)&newxprt->sc_cm_id->
-                        route.addr.dst_addr)->sin_addr.s_addr,
-               ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
-                      route.addr.dst_addr)->sin_port),
-               newxprt->sc_max_sge,
-               newxprt->sc_max_sge_rd,
-               newxprt->sc_sq_depth,
-               newxprt->sc_max_requests,
-               newxprt->sc_ord);
+       dprintk("svcrdma: new connection %p accepted:\n", newxprt);
+       sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
+       dprintk("    local address   : %pIS:%u\n", sap, rpc_get_port(sap));
+       sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
+       dprintk("    remote address  : %pIS:%u\n", sap, rpc_get_port(sap));
+       dprintk("    max_sge         : %d\n", newxprt->sc_max_sge);
+       dprintk("    max_sge_rd      : %d\n", newxprt->sc_max_sge_rd);
+       dprintk("    sq_depth        : %d\n", newxprt->sc_sq_depth);
+       dprintk("    max_requests    : %d\n", newxprt->sc_max_requests);
+       dprintk("    ord             : %d\n", newxprt->sc_ord);
  
        return &newxprt->sc_xprt;
  
@@@ -1257,9 -1232,6 +1234,6 @@@ static void __svc_rdma_free(struct work
        if (rdma->sc_ctxt_used != 0)
                pr_err("svcrdma: ctxt still in use? (%d)\n",
                       rdma->sc_ctxt_used);
-       if (atomic_read(&rdma->sc_dma_used) != 0)
-               pr_err("svcrdma: dma still in use? (%d)\n",
-                      atomic_read(&rdma->sc_dma_used));
  
        /* Final put of backchannel client transport */
        if (xprt->xpt_bc_xprt) {
@@@ -1319,10 -1291,6 +1293,10 @@@ static int svc_rdma_secure_port(struct 
        return 1;
  }
  
 +static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
 +{
 +}
 +
  int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
  {
        struct ib_send_wr *bad_wr, *n_wr;
  
        /* If the SQ is full, wait until an SQ entry is available */
        while (1) {
-               spin_lock_bh(&xprt->sc_lock);
-               if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
-                       spin_unlock_bh(&xprt->sc_lock);
+               if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) {
                        atomic_inc(&rdma_stat_sq_starve);
  
                        /* Wait until SQ WR available if SQ still full */
+                       atomic_add(wr_count, &xprt->sc_sq_avail);
                        wait_event(xprt->sc_send_wait,
-                                  atomic_read(&xprt->sc_sq_count) <
-                                  xprt->sc_sq_depth);
+                                  atomic_read(&xprt->sc_sq_avail) > wr_count);
                        if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
                                return -ENOTCONN;
                        continue;
                        svc_xprt_get(&xprt->sc_xprt);
  
                /* Bump used SQ WR count and post */
-               atomic_add(wr_count, &xprt->sc_sq_count);
                ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
                if (ret) {
                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
-                       atomic_sub(wr_count, &xprt->sc_sq_count);
                        for (i = 0; i < wr_count; i ++)
                                svc_xprt_put(&xprt->sc_xprt);
-                       dprintk("svcrdma: failed to post SQ WR rc=%d, "
-                              "sc_sq_count=%d, sc_sq_depth=%d\n",
-                              ret, atomic_read(&xprt->sc_sq_count),
-                              xprt->sc_sq_depth);
-               }
-               spin_unlock_bh(&xprt->sc_lock);
-               if (ret)
+                       dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret);
+                       dprintk("    sc_sq_avail=%d, sc_sq_depth=%d\n",
+                               atomic_read(&xprt->sc_sq_avail),
+                               xprt->sc_sq_depth);
                        wake_up(&xprt->sc_send_wait);
+               }
                break;
        }
        return ret;