RDMA/irdma: Use list_last_entry/list_first_entry
authorShiraz Saleem <shiraz.saleem@intel.com>
Tue, 8 Jun 2021 21:14:16 +0000 (16:14 -0500)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Jun 2021 23:04:00 +0000 (20:04 -0300)
Use list_last_entry and list_first_entry instead of using prev and next
pointers.

Link: https://lore.kernel.org/r/20210608211415.680-1-shiraz.saleem@intel.com
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/irdma/puda.c
drivers/infiniband/hw/irdma/utils.c

index c0be6e3..58e7d87 100644 (file)
@@ -1419,7 +1419,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
 
 error:
        while (!list_empty(&pbufl)) {
-               buf = (struct irdma_puda_buf *)(pbufl.prev);
+               buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
                list_move(&buf->list, rxlist);
        }
        if (txbuf)
index 8f04347..b4b91cb 100644 (file)
@@ -425,8 +425,8 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
 
        spin_lock_irqsave(&cqp->req_lock, flags);
        if (!list_empty(&cqp->cqp_avail_reqs)) {
-               cqp_request = list_entry(cqp->cqp_avail_reqs.next,
-                                        struct irdma_cqp_request, list);
+               cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
+                                              struct irdma_cqp_request, list);
                list_del_init(&cqp_request->list);
        }
        spin_unlock_irqrestore(&cqp->req_lock, flags);