Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Nov 2019 16:21:48 +0000 (09:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Nov 2019 16:21:48 +0000 (09:21 -0700)
Pull rdma fixes from Jason Gunthorpe:
 "A number of bug fixes and a regression fix:

   - Various issues from static analysis in hfi1, uverbs, hns, and cxgb4

   - Fix for deadlock in a case when the new auto RDMA module loading is
     used

   - Missing _irq notation in a prior -rc patch found by lockdep

   - Fix a locking and lifetime issue in siw

   - Minor functional bug fixes in cxgb4, mlx5, qedr

   - Fix a regression where vlan interfaces no longer worked with RDMA
     CM in some cases"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hns: Prevent memory leaks of eq->buf_list
  RDMA/iw_cxgb4: Avoid freeing skb twice in arp failure case
  RDMA/mlx5: Use irq xarray locking for mkey_table
  IB/core: Avoid deadlock during netlink message handling
  RDMA/nldev: Skip counter if port doesn't match
  RDMA/uverbs: Prevent potential underflow
  IB/core: Use rdma_read_gid_l2_fields to compare GID L2 fields
  RDMA/qedr: Fix reported firmware version
  RDMA/siw: free siw_base_qp in kref release routine
  RDMA/iwcm: move iw_rem_ref() calls out of spinlock
  iw_cxgb4: fix ECN check on the passive accept
  IB/hfi1: Use a common pad buffer for 9B and 16B packets
  IB/hfi1: Avoid excessive retry for TID RDMA READ request
  RDMA/mlx5: Clear old rate limit when closing QP

18 files changed:
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
include/rdma/ib_verbs.h

index 3a8b091..9d07378 100644 (file)
@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+void rdma_nl_init(void);
 void rdma_nl_exit(void);
 
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
index 2dd2cfe..50a9244 100644 (file)
@@ -2716,6 +2716,8 @@ static int __init ib_core_init(void)
                goto err_comp_unbound;
        }
 
+       rdma_nl_init();
+
        ret = addr_init();
        if (ret) {
                pr_warn("Could't init IB address resolution\n");
index 72141c5..ade7182 100644 (file)
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
 static void destroy_cm_id(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
+       struct ib_qp *qp;
        unsigned long flags;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
+
        switch (cm_id_priv->state) {
        case IW_CM_STATE_LISTEN:
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                /* Abrupt close of the connection */
-               (void)iwcm_modify_qp_err(cm_id_priv->qp);
+               (void)iwcm_modify_qp_err(qp);
                spin_lock_irqsave(&cm_id_priv->lock, flags);
                break;
        case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                BUG();
                break;
        }
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
 
        if (cm_id->mapped) {
                iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
                BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
                cm_id_priv->state = IW_CM_STATE_IDLE;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
-               if (cm_id_priv->qp) {
-                       cm_id->device->ops.iw_rem_ref(qp);
-                       cm_id_priv->qp = NULL;
-               }
+               qp = cm_id_priv->qp;
+               cm_id_priv->qp = NULL;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               if (qp)
+                       cm_id->device->ops.iw_rem_ref(qp);
                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
                wake_up_all(&cm_id_priv->connect_wait);
        }
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        struct iwcm_id_private *cm_id_priv;
        int ret;
        unsigned long flags;
-       struct ib_qp *qp;
+       struct ib_qp *qp = NULL;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
                return 0;       /* success */
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
-       if (cm_id_priv->qp) {
-               cm_id->device->ops.iw_rem_ref(qp);
-               cm_id_priv->qp = NULL;
-       }
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
        cm_id_priv->state = IW_CM_STATE_IDLE;
 err:
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id->device->ops.iw_rem_ref(qp);
        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
        wake_up_all(&cm_id_priv->connect_wait);
        return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                               struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp = NULL;
        unsigned long flags;
        int ret;
 
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
        } else {
                /* REJECTED or RESET */
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+               qp = cm_id_priv->qp;
                cm_id_priv->qp = NULL;
                cm_id_priv->state = IW_CM_STATE_IDLE;
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 
        if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
                                  struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp;
        unsigned long flags;
-       int ret = 0;
+       int ret = 0, notify_event = 0;
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
 
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        switch (cm_id_priv->state) {
        case IW_CM_STATE_ESTABLISHED:
        case IW_CM_STATE_CLOSING:
                cm_id_priv->state = IW_CM_STATE_IDLE;
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
-               spin_lock_irqsave(&cm_id_priv->lock, flags);
+               notify_event = 1;
                break;
        case IW_CM_STATE_DESTROYING:
                break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
+       if (notify_event)
+               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
        return ret;
 }
 
index 81dbd5f..8cd31ef 100644 (file)
 #include <linux/module.h>
 #include "core_priv.h"
 
-static DEFINE_MUTEX(rdma_nl_mutex);
 static struct {
-       const struct rdma_nl_cbs   *cb_table;
+       const struct rdma_nl_cbs *cb_table;
+       /* Synchronizes between ongoing netlink commands and netlink client
+        * unregistration.
+        */
+       struct rw_semaphore sem;
 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
 
 bool rdma_nl_chk_listeners(unsigned int group)
@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
        return (op < max_num_ops[type]) ? true : false;
 }
 
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
 {
        const struct rdma_nl_cbs *cb_table;
 
-       if (!is_nl_msg_valid(type, op))
-               return false;
-
        /*
         * Currently only NLDEV client is supporting netlink commands in
         * non init_net net namespace.
         */
        if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
-               return false;
+               return NULL;
 
-       if (!rdma_nl_types[type].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               request_module("rdma-netlink-subsys-%d", type);
-               mutex_lock(&rdma_nl_mutex);
-       }
+       cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       if (!cb_table) {
+               /*
+                * Didn't get valid reference of the table, attempt module
+                * load once.
+                */
+               up_read(&rdma_nl_types[type].sem);
 
-       cb_table = rdma_nl_types[type].cb_table;
+               request_module("rdma-netlink-subsys-%d", type);
 
+               down_read(&rdma_nl_types[type].sem);
+               cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       }
        if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
-               return false;
-       return true;
+               return NULL;
+       return cb_table;
 }
 
 void rdma_nl_register(unsigned int index,
                      const struct rdma_nl_cbs cb_table[])
 {
-       mutex_lock(&rdma_nl_mutex);
-       if (!is_nl_msg_valid(index, 0)) {
-               /*
-                * All clients are not interesting in success/failure of
-                * this call. They want to see the print to error log and
-                * continue their initialization. Print warning for them,
-                * because it is programmer's error to be here.
-                */
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The not-valid %u index was supplied to RDMA netlink\n",
-                    index);
+       if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+           WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
                return;
-       }
-
-       if (rdma_nl_types[index].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The %u index is already registered in RDMA netlink\n",
-                    index);
-               return;
-       }
 
-       rdma_nl_types[index].cb_table = cb_table;
-       mutex_unlock(&rdma_nl_mutex);
+       /* Pairs with the READ_ONCE in is_nl_valid() */
+       smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
 }
 EXPORT_SYMBOL(rdma_nl_register);
 
 void rdma_nl_unregister(unsigned int index)
 {
-       mutex_lock(&rdma_nl_mutex);
+       down_write(&rdma_nl_types[index].sem);
        rdma_nl_types[index].cb_table = NULL;
-       mutex_unlock(&rdma_nl_mutex);
+       up_write(&rdma_nl_types[index].sem);
 }
 EXPORT_SYMBOL(rdma_nl_unregister);
 
@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned int index = RDMA_NL_GET_CLIENT(type);
        unsigned int op = RDMA_NL_GET_OP(type);
        const struct rdma_nl_cbs *cb_table;
+       int err = -EINVAL;
 
-       if (!is_nl_valid(skb, index, op))
+       if (!is_nl_msg_valid(index, op))
                return -EINVAL;
 
-       cb_table = rdma_nl_types[index].cb_table;
+       down_read(&rdma_nl_types[index].sem);
+       cb_table = get_cb_table(skb, index, op);
+       if (!cb_table)
+               goto done;
 
        if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
-           !netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
+           !netlink_capable(skb, CAP_NET_ADMIN)) {
+               err = -EPERM;
+               goto done;
+       }
 
        /*
         * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
         */
        if (index == RDMA_NL_LS) {
                if (cb_table[op].doit)
-                       return cb_table[op].doit(skb, nlh, extack);
-               return -EINVAL;
+                       err = cb_table[op].doit(skb, nlh, extack);
+               goto done;
        }
        /* FIXME: Convert IWCM to properly handle doit callbacks */
        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                        .dump = cb_table[op].dump,
                };
                if (c.dump)
-                       return netlink_dump_start(skb->sk, skb, nlh, &c);
-               return -EINVAL;
+                       err = netlink_dump_start(skb->sk, skb, nlh, &c);
+               goto done;
        }
 
        if (cb_table[op].doit)
-               return cb_table[op].doit(skb, nlh, extack);
-
-       return 0;
+               err = cb_table[op].doit(skb, nlh, extack);
+done:
+       up_read(&rdma_nl_types[index].sem);
+       return err;
 }
 
 /*
@@ -263,9 +256,7 @@ skip:
 
 static void rdma_nl_rcv(struct sk_buff *skb)
 {
-       mutex_lock(&rdma_nl_mutex);
        rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
-       mutex_unlock(&rdma_nl_mutex);
 }
 
 int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(rdma_nl_multicast);
 
+void rdma_nl_init(void)
+{
+       int idx;
+
+       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+               init_rwsem(&rdma_nl_types[idx].sem);
+}
+
 void rdma_nl_exit(void)
 {
        int idx;
index 65b3654..c03af08 100644 (file)
@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
                container_of(res, struct rdma_counter, res);
 
        if (port && port != counter->port)
-               return 0;
+               return -EAGAIN;
 
        /* Dump it even query failed */
        rdma_counter_query_stats(counter);
index 1e5aeb3..63f7f7d 100644 (file)
@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
 
 struct ib_uverbs_device {
        atomic_t                                refcount;
-       int                                     num_comp_vectors;
+       u32                                     num_comp_vectors;
        struct completion                       comp;
        struct device                           dev;
        /* First group for device attributes, NULL terminated array */
index f974b68..35c2841 100644 (file)
@@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
                           void *context)
 {
        struct find_gid_index_context *ctx = context;
+       u16 vlan_id = 0xffff;
+       int ret;
 
        if (ctx->gid_type != gid_attr->gid_type)
                return false;
 
-       if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
-           (is_vlan_dev(gid_attr->ndev) &&
-            vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+       ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+       if (ret)
                return false;
 
-       return true;
+       return ctx->vlan_id == vlan_id;
 }
 
 static const struct ib_gid_attr *
index e87fc04..347dc24 100644 (file)
@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
 
        pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
-       skb_get(skb);
-       rpl = cplhdr(skb);
-       if (!is_t4(adapter_type)) {
-               skb_trim(skb, roundup(sizeof(*rpl5), 16));
-               rpl5 = (void *)rpl;
-               INIT_TP_WR(rpl5, ep->hwtid);
-       } else {
-               skb_trim(skb, sizeof(*rpl));
-               INIT_TP_WR(rpl, ep->hwtid);
-       }
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
-                                                   ep->hwtid));
-
        cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
                      enable_tcp_timestamps && req->tcpopt.tstamp,
                      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN_V(1);
        }
+
+       skb_get(skb);
+       rpl = cplhdr(skb);
+       if (!is_t4(adapter_type)) {
+               skb_trim(skb, roundup(sizeof(*rpl5), 16));
+               rpl5 = (void *)rpl;
+               INIT_TP_WR(rpl5, ep->hwtid);
+       } else {
+               skb_trim(skb, sizeof(*rpl));
+               INIT_TP_WR(rpl, ep->hwtid);
+       }
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+                                                   ep->hwtid));
+
        if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
index 2ed7bfd..c61b602 100644 (file)
@@ -65,6 +65,7 @@
 #define SDMA_DESCQ_CNT 2048
 #define SDMA_DESC_INTR 64
 #define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
 
 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
 module_param(sdma_descq_cnt, uint, S_IRUGO);
@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
        struct sdma_engine *sde;
 
        if (dd->sdma_pad_dma) {
-               dma_free_coherent(&dd->pcidev->dev, 4,
+               dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
                                  (void *)dd->sdma_pad_dma,
                                  dd->sdma_pad_phys);
                dd->sdma_pad_dma = NULL;
@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        /* Allocate memory for pad */
-       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
                                              &dd->sdma_pad_phys, GFP_KERNEL);
        if (!dd->sdma_pad_dma) {
                dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
index b4dcc4d..f21fca3 100644 (file)
@@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
                                if (diff > 0) {
-                                       if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
-                                               restart_tid_rdma_read_req(rcd,
-                                                                         qp,
-                                                                         wqe);
-
                                        /* Drop the packet.*/
                                        goto s_unlock;
                                } else if (diff < 0) {
index 7bff0a1..089e201 100644 (file)
@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
 /* Length of buffer to create verbs txreq cache name */
 #define TXREQ_NAME_LEN 24
 
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
 static uint wss_threshold = 80;
 module_param(wss_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
 
        /* add icrc, lt byte, and padding to flit */
        if (extra_bytes)
-               ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
-                                       (void *)trail_buf, extra_bytes);
+               ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+                                      sde->dd->sdma_pad_phys, extra_bytes);
 
 bail_txadd:
        return ret;
@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                }
                /* add icrc, lt byte, and padding to flit */
                if (extra_bytes)
-                       seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+                       seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+                                        extra_bytes);
 
                seg_pio_copy_end(pbuf);
        }
index 7a89d66..e82567f 100644 (file)
@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
                return;
        }
 
-       if (eq->buf_list)
-               dma_free_coherent(hr_dev->dev, buf_chk_sz,
-                                 eq->buf_list->buf, eq->buf_list->map);
+       dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+                         eq->buf_list->map);
+       kfree(eq->buf_list);
 }
 
 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
index 6305993..7019c12 100644 (file)
@@ -1967,8 +1967,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
        int err;
 
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               xa_erase(&dev->mdev->priv.mkey_table,
-                        mlx5_base_mkey(mmw->mmkey.key));
+               xa_erase_irq(&dev->mdev->priv.mkey_table,
+                            mlx5_base_mkey(mmw->mmkey.key));
                /*
                 * pagefault_single_data_segment() may be accessing mmw under
                 * SRCU if the user bound an ODP MR to this MW.
index 8937d72..5fd071c 100644 (file)
@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
        }
 
        /* Only remove the old rate after new rate was set */
-       if ((old_rl.rate &&
-            !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
-           (new_state != MLX5_SQC_STATE_RDY))
+       if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+           (new_state != MLX5_SQC_STATE_RDY)) {
                mlx5_rl_remove_rate(dev, &old_rl);
+               if (new_state != MLX5_SQC_STATE_RDY)
+                       memset(&new_rl, 0, sizeof(new_rl));
+       }
 
        ibqp->rl = new_rl;
        sq->state = new_state;
index 5136b83..dc71b6e 100644 (file)
@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
        struct qedr_dev *qedr = get_qedr_dev(ibdev);
        u32 fw_ver = (u32)qedr->attr.fw_ver;
 
-       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
                 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
                 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
 }
index 52d402f..b431748 100644 (file)
@@ -1312,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 void siw_free_qp(struct kref *ref)
 {
        struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+       struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
        struct siw_device *sdev = qp->sdev;
        unsigned long flags;
 
@@ -1334,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
        atomic_dec(&sdev->num_qp);
        siw_dbg_qp(qp, "free QP\n");
        kfree_rcu(qp, rcu);
+       kfree(siw_base_qp);
 }
index 869e02b..b18a677 100644 (file)
@@ -604,7 +604,6 @@ out:
 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
 {
        struct siw_qp *qp = to_siw_qp(base_qp);
-       struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
        struct siw_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct siw_ucontext,
                                          base_ucontext);
@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
        qp->scq = qp->rcq = NULL;
 
        siw_qp_put(qp);
-       kfree(siw_base_qp);
 
        return 0;
 }
index 6a47ba8..e7e733a 100644 (file)
@@ -366,7 +366,7 @@ struct ib_tm_caps {
 
 struct ib_cq_init_attr {
        unsigned int    cqe;
-       int             comp_vector;
+       u32             comp_vector;
        u32             flags;
 };