2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
45 * Convert IB-specific error message to RDS error message and call core
48 static void rds_ib_send_complete(struct rds_message *rm,
50 void (*complete)(struct rds_message *rm, int status))
55 case IB_WC_WR_FLUSH_ERR:
59 notify_status = RDS_RDMA_SUCCESS;
62 case IB_WC_REM_ACCESS_ERR:
63 notify_status = RDS_RDMA_REMOTE_ERROR;
67 notify_status = RDS_RDMA_OTHER_ERROR;
70 complete(rm, notify_status);
73 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
74 struct rm_data_op *op,
78 ib_dma_unmap_sg(ic->i_cm_id->device,
79 op->op_sg, op->op_nents,
83 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
84 struct rm_rdma_op *op,
88 ib_dma_unmap_sg(ic->i_cm_id->device,
89 op->op_sg, op->op_nents,
90 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
94 /* If the user asked for a completion notification on this
95 * message, we can implement three different semantics:
96 * 1. Notify when we received the ACK on the RDS message
97 * that was queued with the RDMA. This provides reliable
98 * notification of RDMA status at the expense of a one-way
100 * 2. Notify when the IB stack gives us the completion event for
101 * the RDMA operation.
102 * 3. Notify when the IB stack gives us the completion event for
103 * the accompanying RDS messages.
104 * Here, we implement approach #3. To implement approach #2,
105 * we would need to take an event for the rdma WR. To implement #1,
106 * don't call rds_rdma_send_complete at all, and fall back to the notify
107 * handling in the ACK processing code.
109 * Note: There's no need to explicitly sync any RDMA buffers using
110 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
111 * operation itself unmapped the RDMA buffers, which takes care
114 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
115 wc_status, rds_rdma_send_complete);
118 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
120 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
123 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
124 struct rm_atomic_op *op,
127 /* unmap atomic recvbuf */
129 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
134 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
135 wc_status, rds_atomic_send_complete);
137 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
138 rds_ib_stats_inc(s_ib_atomic_cswp);
140 rds_ib_stats_inc(s_ib_atomic_fadd);
144 * Unmap the resources associated with a struct send_work.
146 * Returns the rm for no good reason other than it is unobtainable
147 * other than by switching on wr.opcode, currently, and the caller,
148 * the event handler, needs it.
150 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
151 struct rds_ib_send_work *send,
154 struct rds_message *rm = NULL;
156 /* In the error case, wc.opcode sometimes contains garbage */
157 switch (send->s_wr.opcode) {
160 rm = container_of(send->s_op, struct rds_message, data);
161 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
164 case IB_WR_RDMA_WRITE:
165 case IB_WR_RDMA_READ:
167 rm = container_of(send->s_op, struct rds_message, rdma);
168 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
171 case IB_WR_ATOMIC_FETCH_AND_ADD:
172 case IB_WR_ATOMIC_CMP_AND_SWP:
174 rm = container_of(send->s_op, struct rds_message, atomic);
175 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
179 printk_ratelimited(KERN_NOTICE
180 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
181 __func__, send->s_wr.opcode);
185 send->s_wr.opcode = 0xdead;
190 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
192 struct rds_ib_send_work *send;
195 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
200 send->s_wr.wr_id = i;
201 send->s_wr.sg_list = send->s_sge;
202 send->s_wr.ex.imm_data = 0;
204 sge = &send->s_sge[0];
205 sge->addr = ic->i_send_hdrs_dma[i];
207 sge->length = sizeof(struct rds_header);
208 sge->lkey = ic->i_pd->local_dma_lkey;
210 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
214 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
216 struct rds_ib_send_work *send;
219 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
220 if (send->s_op && send->s_wr.opcode != 0xdead)
221 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
226 * The only fast path caller always has a non-zero nr, so we don't
227 * bother testing nr before performing the atomic sub.
229 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
231 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
232 waitqueue_active(&rds_ib_ring_empty_wait))
233 wake_up(&rds_ib_ring_empty_wait);
234 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
238 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
239 * operations performed in the send path. As the sender allocs and potentially
240 * unallocs the next free entry in the ring it doesn't alter which is
241 * the next to be freed, which is what this is concerned with.
243 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
245 struct rds_message *rm = NULL;
246 struct rds_connection *conn = ic->conn;
247 struct rds_ib_send_work *send;
254 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
255 (unsigned long long)wc->wr_id, wc->status,
256 ib_wc_status_msg(wc->status), wc->byte_len,
257 be32_to_cpu(wc->ex.imm_data));
258 rds_ib_stats_inc(s_ib_tx_cq_event);
260 if (wc->wr_id == RDS_IB_ACK_WR_ID) {
261 if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
262 rds_ib_stats_inc(s_ib_tx_stalled);
263 rds_ib_ack_send_complete(ic);
267 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
269 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
271 for (i = 0; i < completed; i++) {
272 send = &ic->i_sends[oldest];
273 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
276 rm = rds_ib_send_unmap_op(ic, send, wc->status);
278 if (time_after(jiffies, send->s_queued + HZ / 2))
279 rds_ib_stats_inc(s_ib_tx_stalled);
282 if (send->s_op == rm->m_final_op) {
283 /* If anyone waited for this message to get
284 * flushed out, wake them up now
286 rds_message_unmapped(rm);
292 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
295 rds_ib_ring_free(&ic->i_send_ring, completed);
296 rds_ib_sub_signaled(ic, nr_sig);
299 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
300 test_bit(0, &conn->c_map_queued))
301 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
303 /* We expect errors as the qp is drained during shutdown */
304 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
305 rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
306 &conn->c_laddr, &conn->c_faddr,
307 conn->c_tos, wc->status,
308 ib_wc_status_msg(wc->status), wc->vendor_err);
313 * This is the main function for allocating credits when sending
316 * Conceptually, we have two counters:
317 * - send credits: this tells us how many WRs we're allowed
318 * to submit without overruning the receiver's queue. For
319 * each SEND WR we post, we decrement this by one.
321 * - posted credits: this tells us how many WRs we recently
322 * posted to the receive queue. This value is transferred
323 * to the peer as a "credit update" in a RDS header field.
324 * Every time we transmit credits to the peer, we subtract
325 * the amount of transferred credits from this counter.
327 * It is essential that we avoid situations where both sides have
328 * exhausted their send credits, and are unable to send new credits
329 * to the peer. We achieve this by requiring that we send at least
330 * one credit update to the peer before exhausting our credits.
331 * When new credits arrive, we subtract one credit that is withheld
332 * until we've posted new buffers and are ready to transmit these
333 * credits (see rds_ib_send_add_credits below).
335 * The RDS send code is essentially single-threaded; rds_send_xmit
336 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
337 * However, the ACK sending code is independent and can race with
340 * In the send path, we need to update the counters for send credits
341 * and the counter of posted buffers atomically - when we use the
342 * last available credit, we cannot allow another thread to race us
343 * and grab the posted credits counter. Hence, we have to use a
344 * spinlock to protect the credit counter, or use atomics.
346 * Spinlocks shared between the send and the receive path are bad,
347 * because they create unnecessary delays. An early implementation
348 * using a spinlock showed a 5% degradation in throughput at some
351 * This implementation avoids spinlocks completely, putting both
352 * counters into a single atomic, and updating that atomic using
353 * atomic_add (in the receive path, when receiving fresh credits),
354 * and using atomic_cmpxchg when updating the two counters.
356 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
357 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
359 unsigned int avail, posted, got = 0, advertise;
368 oldval = newval = atomic_read(&ic->i_credits);
369 posted = IB_GET_POST_CREDITS(oldval);
370 avail = IB_GET_SEND_CREDITS(oldval);
372 rdsdebug("wanted=%u credits=%u posted=%u\n",
373 wanted, avail, posted);
375 /* The last credit must be used to send a credit update. */
376 if (avail && !posted)
379 if (avail < wanted) {
380 struct rds_connection *conn = ic->i_cm_id->context;
382 /* Oops, there aren't that many credits left! */
383 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
386 /* Sometimes you get what you want, lalala. */
389 newval -= IB_SET_SEND_CREDITS(got);
392 * If need_posted is non-zero, then the caller wants
393 * the posted regardless of whether any send credits are
396 if (posted && (got || need_posted)) {
397 advertise = min_t(unsigned int, posted, max_posted);
398 newval -= IB_SET_POST_CREDITS(advertise);
401 /* Finally bill everything */
402 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
405 *adv_credits = advertise;
409 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
411 struct rds_ib_connection *ic = conn->c_transport_data;
416 rdsdebug("credits=%u current=%u%s\n",
418 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
419 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
421 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
422 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
423 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
425 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
427 rds_ib_stats_inc(s_ib_rx_credit_updates);
430 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
432 struct rds_ib_connection *ic = conn->c_transport_data;
437 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
439 /* Decide whether to send an update to the peer now.
440 * If we would send a credit update for every single buffer we
441 * post, we would end up with an ACK storm (ACK arrives,
442 * consumes buffer, we refill the ring, send ACK to remote
443 * advertising the newly posted buffer... ad inf)
445 * Performance pretty much depends on how often we send
446 * credit updates - too frequent updates mean lots of ACKs.
447 * Too infrequent updates, and the peer will run out of
448 * credits and has to throttle.
449 * For the time being, 16 seems to be a good compromise.
451 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
452 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
455 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
456 struct rds_ib_send_work *send,
460 * We want to delay signaling completions just enough to get
461 * the batching benefits but not so much that we create dead time
464 if (ic->i_unsignaled_wrs-- == 0 || notify) {
465 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
466 send->s_wr.send_flags |= IB_SEND_SIGNALED;
473 * This can be called multiple times for a given message. The first time
474 * we see a message we map its scatterlist into the IB device so that
475 * we can provide that mapped address to the IB scatter gather entries
476 * in the IB work requests. We translate the scatterlist into a series
477 * of work requests that fragment the message. These work requests complete
478 * in order so we pass ownership of the message to the completion handler
479 * once we send the final fragment.
481 * The RDS core uses the c_send_lock to only enter this function once
482 * per connection. This makes sure that the tx ring alloc/unalloc pairs
483 * don't get out of sync and confuse the ring.
485 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
486 unsigned int hdr_off, unsigned int sg, unsigned int off)
488 struct rds_ib_connection *ic = conn->c_transport_data;
489 struct ib_device *dev = ic->i_cm_id->device;
490 struct rds_ib_send_work *send = NULL;
491 struct rds_ib_send_work *first;
492 struct rds_ib_send_work *prev;
493 const struct ib_send_wr *failed_wr;
494 struct scatterlist *scat;
498 u32 credit_alloc = 0;
504 int flow_controlled = 0;
507 BUG_ON(off % RDS_FRAG_SIZE);
508 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
510 /* Do not send cong updates to IB loopback */
512 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
513 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
514 scat = &rm->data.op_sg[sg];
515 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
516 return sizeof(struct rds_header) + ret;
519 /* FIXME we may overallocate here */
520 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
523 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
525 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
526 if (work_alloc == 0) {
527 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
528 rds_ib_stats_inc(s_ib_tx_ring_full);
534 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
535 adv_credits += posted;
536 if (credit_alloc < work_alloc) {
537 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
538 work_alloc = credit_alloc;
541 if (work_alloc == 0) {
542 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
543 rds_ib_stats_inc(s_ib_tx_throttle);
549 /* map the message the first time we see it */
550 if (!ic->i_data_op) {
551 if (rm->data.op_nents) {
552 rm->data.op_count = ib_dma_map_sg(dev,
556 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
557 if (rm->data.op_count == 0) {
558 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
559 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
560 ret = -ENOMEM; /* XXX ? */
564 rm->data.op_count = 0;
567 rds_message_addref(rm);
568 rm->data.op_dmasg = 0;
569 rm->data.op_dmaoff = 0;
570 ic->i_data_op = &rm->data;
572 /* Finalize the header */
573 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
574 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
575 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
576 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
578 /* If it has a RDMA op, tell the peer we did it. This is
579 * used by the peer to release use-once RDMA MRs. */
580 if (rm->rdma.op_active) {
581 struct rds_ext_header_rdma ext_hdr;
583 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
584 rds_message_add_extension(&rm->m_inc.i_hdr,
585 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
587 if (rm->m_rdma_cookie) {
588 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
589 rds_rdma_cookie_key(rm->m_rdma_cookie),
590 rds_rdma_cookie_offset(rm->m_rdma_cookie));
593 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
594 * we should not do this unless we have a chance of at least
595 * sticking the header into the send ring. Which is why we
596 * should call rds_ib_ring_alloc first. */
597 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
598 rds_message_make_checksum(&rm->m_inc.i_hdr);
601 * Update adv_credits since we reset the ACK_REQUIRED bit.
604 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
605 adv_credits += posted;
606 BUG_ON(adv_credits > 255);
610 /* Sometimes you want to put a fence between an RDMA
611 * READ and the following SEND.
612 * We could either do this all the time
613 * or when requested by the user. Right now, we let
614 * the application choose.
616 if (rm->rdma.op_active && rm->rdma.op_fence)
617 send_flags = IB_SEND_FENCE;
619 /* Each frag gets a header. Msgs may be 0 bytes */
620 send = &ic->i_sends[pos];
623 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
626 unsigned int len = 0;
628 /* Set up the header */
629 send->s_wr.send_flags = send_flags;
630 send->s_wr.opcode = IB_WR_SEND;
631 send->s_wr.num_sge = 1;
632 send->s_wr.next = NULL;
633 send->s_queued = jiffies;
636 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
638 send->s_sge[0].length = sizeof(struct rds_header);
639 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
641 memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
642 sizeof(struct rds_header));
645 /* Set up the data, if present */
647 && scat != &rm->data.op_sg[rm->data.op_count]) {
648 len = min(RDS_FRAG_SIZE,
649 sg_dma_len(scat) - rm->data.op_dmaoff);
650 send->s_wr.num_sge = 2;
652 send->s_sge[1].addr = sg_dma_address(scat);
653 send->s_sge[1].addr += rm->data.op_dmaoff;
654 send->s_sge[1].length = len;
655 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
658 rm->data.op_dmaoff += len;
659 if (rm->data.op_dmaoff == sg_dma_len(scat)) {
662 rm->data.op_dmaoff = 0;
666 rds_ib_set_wr_signal_state(ic, send, false);
669 * Always signal the last one if we're stopping due to flow control.
671 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
672 rds_ib_set_wr_signal_state(ic, send, true);
673 send->s_wr.send_flags |= IB_SEND_SOLICITED;
676 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
679 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
680 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
682 if (ic->i_flowctl && adv_credits) {
683 struct rds_header *hdr = ic->i_send_hdrs[pos];
685 /* add credit and redo the header checksum */
686 hdr->h_credit = adv_credits;
687 rds_message_make_checksum(hdr);
689 rds_ib_stats_inc(s_ib_tx_credit_updates);
693 prev->s_wr.next = &send->s_wr;
696 pos = (pos + 1) % ic->i_send_ring.w_nr;
697 send = &ic->i_sends[pos];
700 } while (i < work_alloc
701 && scat != &rm->data.op_sg[rm->data.op_count]);
703 /* Account the RDS header in the number of bytes we sent, but just once.
704 * The caller has no concept of fragmentation. */
706 bytes_sent += sizeof(struct rds_header);
708 /* if we finished the message then send completion owns it */
709 if (scat == &rm->data.op_sg[rm->data.op_count]) {
710 prev->s_op = ic->i_data_op;
711 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
712 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
713 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
714 ic->i_data_op = NULL;
717 /* Put back wrs & credits we didn't use */
718 if (i < work_alloc) {
719 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
722 if (ic->i_flowctl && i < credit_alloc)
723 rds_ib_send_add_credits(conn, credit_alloc - i);
726 atomic_add(nr_sig, &ic->i_signaled_sends);
728 /* XXX need to worry about failed_wr and partial sends. */
729 failed_wr = &first->s_wr;
730 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
731 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
732 first, &first->s_wr, ret, failed_wr);
733 BUG_ON(failed_wr != &first->s_wr);
735 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
736 "returned %d\n", &conn->c_faddr, ret);
737 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
738 rds_ib_sub_signaled(ic, nr_sig);
740 ic->i_data_op = prev->s_op;
744 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
755 * Issue atomic operation.
756 * A simplified version of the rdma case, we always map 1 SG, and
757 * only 8 bytes, for the return value from the atomic operation.
759 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
761 struct rds_ib_connection *ic = conn->c_transport_data;
762 struct rds_ib_send_work *send = NULL;
763 const struct ib_send_wr *failed_wr;
769 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
770 if (work_alloc != 1) {
771 rds_ib_stats_inc(s_ib_tx_ring_full);
776 /* address of send request in ring */
777 send = &ic->i_sends[pos];
778 send->s_queued = jiffies;
780 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
781 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
782 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
783 send->s_atomic_wr.swap = op->op_m_cswp.swap;
784 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
785 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
787 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
788 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
789 send->s_atomic_wr.swap = 0;
790 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
791 send->s_atomic_wr.swap_mask = 0;
793 send->s_wr.send_flags = 0;
794 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
795 send->s_atomic_wr.wr.num_sge = 1;
796 send->s_atomic_wr.wr.next = NULL;
797 send->s_atomic_wr.remote_addr = op->op_remote_addr;
798 send->s_atomic_wr.rkey = op->op_rkey;
800 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
802 /* map 8 byte retval buffer to the device */
803 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
804 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
806 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
807 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
808 ret = -ENOMEM; /* XXX ? */
812 /* Convert our struct scatterlist to struct ib_sge */
813 send->s_sge[0].addr = sg_dma_address(op->op_sg);
814 send->s_sge[0].length = sg_dma_len(op->op_sg);
815 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
817 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
818 send->s_sge[0].addr, send->s_sge[0].length);
821 atomic_add(nr_sig, &ic->i_signaled_sends);
823 failed_wr = &send->s_atomic_wr.wr;
824 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
825 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
826 send, &send->s_atomic_wr, ret, failed_wr);
827 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
829 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
830 "returned %d\n", &conn->c_faddr, ret);
831 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
832 rds_ib_sub_signaled(ic, nr_sig);
836 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
837 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
838 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
845 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
847 struct rds_ib_connection *ic = conn->c_transport_data;
848 struct rds_ib_send_work *send = NULL;
849 struct rds_ib_send_work *first;
850 struct rds_ib_send_work *prev;
851 const struct ib_send_wr *failed_wr;
852 struct scatterlist *scat;
854 u64 remote_addr = op->op_remote_addr;
855 u32 max_sge = ic->rds_ibdev->max_sge;
864 u64 odp_addr = op->op_odp_addr;
867 /* map the op the first time we see it */
868 if (!op->op_odp_mr) {
869 if (!op->op_mapped) {
871 ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
873 (op->op_write) ? DMA_TO_DEVICE :
875 rdsdebug("ic %p mapping op %p: %d\n", ic, op,
877 if (op->op_count == 0) {
878 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
879 ret = -ENOMEM; /* XXX ? */
885 op->op_count = op->op_nents;
886 odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
890 * Instead of knowing how to return a partial rdma read/write we insist that there
891 * be enough work requests to send the entire message.
893 i = DIV_ROUND_UP(op->op_count, max_sge);
895 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
896 if (work_alloc != i) {
897 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
898 rds_ib_stats_inc(s_ib_tx_ring_full);
903 send = &ic->i_sends[pos];
906 scat = &op->op_sg[0];
908 num_sge = op->op_count;
910 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
911 send->s_wr.send_flags = 0;
912 send->s_queued = jiffies;
916 nr_sig += rds_ib_set_wr_signal_state(ic, send,
919 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
920 send->s_rdma_wr.remote_addr = remote_addr;
921 send->s_rdma_wr.rkey = op->op_rkey;
923 if (num_sge > max_sge) {
924 send->s_rdma_wr.wr.num_sge = max_sge;
927 send->s_rdma_wr.wr.num_sge = num_sge;
930 send->s_rdma_wr.wr.next = NULL;
933 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
935 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
936 scat != &op->op_sg[op->op_count]; j++) {
937 len = sg_dma_len(scat);
938 if (!op->op_odp_mr) {
939 send->s_sge[j].addr = sg_dma_address(scat);
940 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
942 send->s_sge[j].addr = odp_addr;
943 send->s_sge[j].lkey = odp_lkey;
945 send->s_sge[j].length = len;
948 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
955 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
957 send->s_rdma_wr.wr.num_sge,
958 send->s_rdma_wr.wr.next);
961 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
965 /* give a reference to the last op */
966 if (scat == &op->op_sg[op->op_count]) {
968 rds_message_addref(container_of(op, struct rds_message, rdma));
971 if (i < work_alloc) {
972 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
977 atomic_add(nr_sig, &ic->i_signaled_sends);
979 failed_wr = &first->s_rdma_wr.wr;
980 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
981 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
982 first, &first->s_rdma_wr.wr, ret, failed_wr);
983 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
985 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
986 "returned %d\n", &conn->c_faddr, ret);
987 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
988 rds_ib_sub_signaled(ic, nr_sig);
992 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
993 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
994 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
1002 void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
1004 struct rds_connection *conn = cp->cp_conn;
1005 struct rds_ib_connection *ic = conn->c_transport_data;
1007 /* We may have a pending ACK or window update we were unable
1008 * to send previously (due to flow control). Try again. */
1009 rds_ib_attempt_ack(ic);