RDMA/cxgb4: Use refcount_t instead of atomic_t for reference counting
authorWeihang Li <liweihang@huawei.com>
Fri, 28 May 2021 09:37:42 +0000 (17:37 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Jun 2021 17:59:05 +0000 (14:59 -0300)
The refcount_t API will WARN on underflow and overflow of a reference
counter, and avoid use-after-free risks.

Link: https://lore.kernel.org/r/1622194663-2383-12-git-send-email-liweihang@huawei.com
Cc: Potnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h

index 44c2416588d423995540cfc5fd895986e9e8814b..6c8c910f4e86df396d8aa6e315ebb7505ad7ad7f 100644 (file)
@@ -976,8 +976,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        chp = to_c4iw_cq(ib_cq);
 
        xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
-       atomic_dec(&chp->refcnt);
-       wait_event(chp->wait, !atomic_read(&chp->refcnt));
+       refcount_dec(&chp->refcnt);
+       wait_event(chp->wait, !refcount_read(&chp->refcnt));
 
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
@@ -1080,7 +1080,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        chp->ibcq.cqe = entries - 2;
        spin_lock_init(&chp->lock);
        spin_lock_init(&chp->comp_handler_lock);
-       atomic_set(&chp->refcnt, 1);
+       refcount_set(&chp->refcnt, 1);
        init_waitqueue_head(&chp->wait);
        ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
        if (ret)
index 4cd877bd2f564c1d0e6929813ea6cdfaab5ef08a..7798d090888bb94aeb1854ca11865fe9da450283 100644 (file)
@@ -151,7 +151,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
        }
 
        c4iw_qp_add_ref(&qhp->ibqp);
-       atomic_inc(&chp->refcnt);
+       refcount_inc(&chp->refcnt);
        xa_unlock_irq(&dev->qps);
 
        /* Bad incoming write */
@@ -213,7 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
                break;
        }
 done:
-       if (atomic_dec_and_test(&chp->refcnt))
+       if (refcount_dec_and_test(&chp->refcnt))
                wake_up(&chp->wait);
        c4iw_qp_rem_ref(&qhp->ibqp);
 out:
@@ -228,13 +228,13 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
        xa_lock_irqsave(&dev->cqs, flag);
        chp = xa_load(&dev->cqs, qid);
        if (chp) {
-               atomic_inc(&chp->refcnt);
+               refcount_inc(&chp->refcnt);
                xa_unlock_irqrestore(&dev->cqs, flag);
                t4_clear_cq_armed(&chp->cq);
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-               if (atomic_dec_and_test(&chp->refcnt))
+               if (refcount_dec_and_test(&chp->refcnt))
                        wake_up(&chp->wait);
        } else {
                pr_debug("unknown cqid 0x%x\n", qid);
index cdec5deb37a1f8e3756a98034e25329dc9f62637..3883af3d2312d1fc44570646b8c3d0e21e6c65f9 100644 (file)
@@ -427,7 +427,7 @@ struct c4iw_cq {
        struct t4_cq cq;
        spinlock_t lock;
        spinlock_t comp_handler_lock;
-       atomic_t refcnt;
+       refcount_t refcnt;
        wait_queue_head_t wait;
        struct c4iw_wr_wait *wr_waitp;
 };