RDMA/rxe: Make requester support atomic write on RC service
authorXiao Yang <yangx.jy@fujitsu.com>
Thu, 1 Dec 2022 14:39:25 +0000 (14:39 +0000)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 1 Dec 2022 23:51:09 +0000 (19:51 -0400)
Make requester process and send an atomic write request on RC service.

Link: https://lore.kernel.org/r/1669905568-62-1-git-send-email-yangx.jy@fujitsu.com
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_req.c

index 4d45f50..2713e90 100644 (file)
@@ -258,6 +258,10 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
                else
                        return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
                                IB_OPCODE_RC_SEND_FIRST;
+
+       case IB_WR_ATOMIC_WRITE:
+               return IB_OPCODE_RC_ATOMIC_WRITE;
+
        case IB_WR_REG_MR:
        case IB_WR_LOCAL_INV:
                return opcode;
@@ -486,6 +490,11 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
                }
        }
 
+       if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
+               memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
+               wqe->dma.resid -= payload;
+       }
+
        return 0;
 }
 
@@ -709,13 +718,15 @@ int rxe_requester(void *arg)
        }
 
        mask = rxe_opcode[opcode].mask;
-       if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
+       if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
+                       RXE_ATOMIC_WRITE_MASK))) {
                if (check_init_depth(qp, wqe))
                        goto exit;
        }
 
        mtu = get_mtu(qp);
-       payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
+       payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
+                       wqe->dma.resid : 0;
        if (payload > mtu) {
                if (qp_type(qp) == IB_QPT_UD) {
                        /* C10-93.1.1: If the total sum of all the buffer lengths specified for a