From 24f0ab0102115c7569379a606f9a26826577522f Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Mon, 6 Jun 2022 09:38:33 -0500 Subject: [PATCH] RDMA/rxe: Move code to rxe_prepare_atomic_res() Separate the code that prepares the atomic responder resource into a subroutine. This is preparation for merging the normal and retry atomic responder flows. Link: https://lore.kernel.org/r/20220606143836.3323-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_resp.c | 43 ++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index c45c9d9..bc84aad 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1017,38 +1017,49 @@ err1: return err; } -static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) +static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { - int rc = 0; + struct resp_res *res; + + res = &qp->resp.resources[qp->resp.res_head]; + rxe_advance_resp_resource(qp); + free_rd_atomic_resource(qp, res); + + res->type = RXE_ATOMIC_MASK; + res->first_psn = pkt->psn; + res->last_psn = pkt->psn; + res->cur_psn = pkt->psn; + + return res; +} + +static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, + u8 syndrome) +{ + int err = 0; struct rxe_pkt_info ack_pkt; struct sk_buff *skb; struct resp_res *res; skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, - 0, psn, syndrome); + 0, pkt->psn, syndrome); if (!skb) { - rc = -ENOMEM; + err = -ENOMEM; goto out; } - res = &qp->resp.resources[qp->resp.res_head]; - free_rd_atomic_resource(qp, res); - rxe_advance_resp_resource(qp); - skb_get(skb); - res->type = RXE_ATOMIC_MASK; + + res = rxe_prepare_atomic_res(qp, pkt); res->atomic.skb = skb; - res->first_psn = ack_pkt.psn; - res->last_psn = ack_pkt.psn; - res->cur_psn = ack_pkt.psn; - rc = rxe_xmit_packet(qp, &ack_pkt, skb); - if (rc) { + err = rxe_xmit_packet(qp, &ack_pkt, skb); + if (err) { pr_err_ratelimited("Failed sending ack\n"); rxe_put(qp); } out: - return rc; + return err; } static enum resp_states acknowledge(struct rxe_qp *qp, @@ -1060,7 +1071,7 @@ static enum resp_states acknowledge(struct rxe_qp *qp, if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); else if (pkt->mask & RXE_ATOMIC_MASK) - send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); + send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); else if (bth_ack(pkt)) send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); -- 2.7.4