1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Connection Data Control (CDC)
8 * Copyright IBM Corp. 2016
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 #include <linux/spinlock.h>
20 #include "smc_close.h"
22 /********************************** send *************************************/
24 /* handler for send/transmission completion of a CDC msg */
25 static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
26 struct smc_link *link,
27 enum ib_wc_status wc_status)
29 struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
30 struct smc_connection *conn = cdcpend->conn;
34 smc = container_of(conn, struct smc_sock, conn);
35 bh_lock_sock(&smc->sk);
37 diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
38 &cdcpend->conn->tx_curs_fin,
40 /* sndbuf_space is decreased in smc_sendmsg */
41 smp_mb__before_atomic();
42 atomic_add(diff, &cdcpend->conn->sndbuf_space);
43 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
44 smp_mb__after_atomic();
45 smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
46 smc_curs_copy(&conn->local_tx_ctrl_fin, &cdcpend->p_cursor,
48 conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
51 if (atomic_dec_and_test(&conn->cdc_pend_tx_wr)) {
52 /* If user owns the sock_lock, mark the connection need sending.
53 * User context will later try to send when it release sock_lock
56 if (sock_owned_by_user(&smc->sk))
57 conn->tx_in_release_sock = true;
61 if (unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
62 wake_up(&conn->cdc_pend_tx_wq);
64 WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
66 smc_tx_sndbuf_nonfull(smc);
67 bh_unlock_sock(&smc->sk);
70 int smc_cdc_get_free_slot(struct smc_connection *conn,
71 struct smc_link *link,
72 struct smc_wr_buf **wr_buf,
73 struct smc_rdma_wr **wr_rdma_buf,
74 struct smc_cdc_tx_pend **pend)
78 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
80 (struct smc_wr_tx_pend_priv **)pend);
82 /* abnormal termination */
84 smc_wr_tx_put_slot(link,
85 (struct smc_wr_tx_pend_priv *)pend);
91 static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
92 struct smc_cdc_tx_pend *pend)
95 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
96 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
98 offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
99 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
101 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
102 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
104 pend->cursor = conn->tx_curs_sent;
105 pend->p_cursor = conn->local_tx_ctrl.prod;
106 pend->ctrl_seq = conn->tx_cdc_seq;
109 int smc_cdc_msg_send(struct smc_connection *conn,
110 struct smc_wr_buf *wr_buf,
111 struct smc_cdc_tx_pend *pend)
113 struct smc_link *link = conn->lnk;
114 union smc_host_cursor cfed;
117 smc_cdc_add_pending_send(conn, pend);
120 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
121 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
123 atomic_inc(&conn->cdc_pend_tx_wr);
124 smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
126 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
128 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
129 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
132 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
133 atomic_dec(&conn->cdc_pend_tx_wr);
139 /* send a validation msg indicating the move of a conn to an other QP link */
140 int smcr_cdc_msg_send_validation(struct smc_connection *conn,
141 struct smc_cdc_tx_pend *pend,
142 struct smc_wr_buf *wr_buf)
144 struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
145 struct smc_link *link = conn->lnk;
146 struct smc_cdc_msg *peer;
149 peer = (struct smc_cdc_msg *)wr_buf;
150 peer->common.type = local->common.type;
151 peer->len = local->len;
152 peer->seqno = htons(conn->tx_cdc_seq_fin); /* seqno last compl. tx */
153 peer->token = htonl(local->token);
154 peer->prod_flags.failover_validation = 1;
156 /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
157 * can handle properly
159 smc_cdc_add_pending_send(conn, pend);
161 atomic_inc(&conn->cdc_pend_tx_wr);
162 smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
164 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
166 atomic_dec(&conn->cdc_pend_tx_wr);
171 static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
173 struct smc_cdc_tx_pend *pend;
174 struct smc_wr_buf *wr_buf;
175 struct smc_link *link;
181 if (!smc_wr_tx_link_hold(link))
183 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
187 spin_lock_bh(&conn->send_lock);
188 if (link != conn->lnk) {
189 /* link of connection changed, try again one time*/
190 spin_unlock_bh(&conn->send_lock);
191 smc_wr_tx_put_slot(link,
192 (struct smc_wr_tx_pend_priv *)pend);
193 smc_wr_tx_link_put(link);
199 rc = smc_cdc_msg_send(conn, wr_buf, pend);
200 spin_unlock_bh(&conn->send_lock);
202 smc_wr_tx_link_put(link);
206 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
210 if (!smc_conn_lgr_valid(conn) ||
211 (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
214 if (conn->lgr->is_smcd) {
215 spin_lock_bh(&conn->send_lock);
216 rc = smcd_cdc_msg_send(conn);
217 spin_unlock_bh(&conn->send_lock);
219 rc = smcr_cdc_get_slot_and_msg_send(conn);
225 void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
227 wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
230 /* Send a SMC-D CDC header.
231 * This increments the free space available in our send buffer.
232 * Also update the confirmed receive buffer with what was sent to the peer.
234 int smcd_cdc_msg_send(struct smc_connection *conn)
236 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
237 union smc_host_cursor curs;
238 struct smcd_cdc_msg cdc;
241 memset(&cdc, 0, sizeof(cdc));
242 cdc.common.type = SMC_CDC_MSG_TYPE;
243 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
244 cdc.prod.wrap = curs.wrap;
245 cdc.prod.count = curs.count;
246 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
247 cdc.cons.wrap = curs.wrap;
248 cdc.cons.count = curs.count;
249 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
250 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
251 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
254 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
255 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
256 /* Calculate transmitted data and increment free send buffer space */
257 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
258 &conn->tx_curs_sent);
259 /* increased by confirmed number of bytes */
260 smp_mb__before_atomic();
261 atomic_add(diff, &conn->sndbuf_space);
262 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
263 smp_mb__after_atomic();
264 smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
266 smc_tx_sndbuf_nonfull(smc);
270 /********************************* receive ***********************************/
272 static inline bool smc_cdc_before(u16 seq1, u16 seq2)
274 return (s16)(seq1 - seq2) < 0;
277 static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
280 struct smc_connection *conn = &smc->conn;
283 /* new data included urgent business */
284 smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
285 conn->urg_state = SMC_URG_VALID;
286 if (!sock_flag(&smc->sk, SOCK_URGINLINE))
287 /* we'll skip the urgent byte, so don't account for it */
289 base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
290 if (conn->urg_curs.count)
291 conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
293 conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
294 sk_send_sigurg(&smc->sk);
297 static void smc_cdc_msg_validate(struct smc_sock *smc, struct smc_cdc_msg *cdc,
298 struct smc_link *link)
300 struct smc_connection *conn = &smc->conn;
301 u16 recv_seq = ntohs(cdc->seqno);
304 /* check that seqnum was seen before */
305 diff = conn->local_rx_ctrl.seqno - recv_seq;
306 if (diff < 0) { /* diff larger than 0x7fff */
307 /* drop connection */
308 conn->out_of_sync = 1; /* prevent any further receives */
309 spin_lock_bh(&conn->send_lock);
310 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
312 spin_unlock_bh(&conn->send_lock);
313 sock_hold(&smc->sk); /* sock_put in abort_work */
314 if (!queue_work(smc_close_wq, &conn->abort_work))
319 static void smc_cdc_msg_recv_action(struct smc_sock *smc,
320 struct smc_cdc_msg *cdc)
322 union smc_host_cursor cons_old, prod_old;
323 struct smc_connection *conn = &smc->conn;
324 int diff_cons, diff_prod;
326 smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
327 smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
328 smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
330 diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
331 &conn->local_rx_ctrl.cons);
333 /* peer_rmbe_space is decreased during data transfer with RDMA
336 smp_mb__before_atomic();
337 atomic_add(diff_cons, &conn->peer_rmbe_space);
338 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
339 smp_mb__after_atomic();
342 diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
343 &conn->local_rx_ctrl.prod);
345 if (conn->local_rx_ctrl.prod_flags.urg_data_present)
346 smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
347 /* bytes_to_rcv is decreased in smc_recvmsg */
348 smp_mb__before_atomic();
349 atomic_add(diff_prod, &conn->bytes_to_rcv);
350 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
351 smp_mb__after_atomic();
352 smc->sk.sk_data_ready(&smc->sk);
354 if (conn->local_rx_ctrl.prod_flags.write_blocked)
355 smc->sk.sk_data_ready(&smc->sk);
356 if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
357 conn->urg_state = SMC_URG_NOTYET;
360 /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
361 if ((diff_cons && smc_tx_prepared_sends(conn)) ||
362 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
363 conn->local_rx_ctrl.prod_flags.urg_data_pending) {
364 if (!sock_owned_by_user(&smc->sk))
365 smc_tx_pending(conn);
367 conn->tx_in_release_sock = true;
370 if (diff_cons && conn->urg_tx_pend &&
371 atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
372 /* urg data confirmed by peer, indicate we're ready for more */
373 conn->urg_tx_pend = false;
374 smc->sk.sk_write_space(&smc->sk);
377 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
378 smc->sk.sk_err = ECONNRESET;
379 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
381 if (smc_cdc_rxed_any_close_or_senddone(conn)) {
382 smc->sk.sk_shutdown |= RCV_SHUTDOWN;
383 if (smc->clcsock && smc->clcsock->sk)
384 smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
385 sock_set_flag(&smc->sk, SOCK_DONE);
386 sock_hold(&smc->sk); /* sock_put in close_work */
387 if (!queue_work(smc_close_wq, &conn->close_work))
392 /* called under tasklet context */
393 static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
396 bh_lock_sock(&smc->sk);
397 smc_cdc_msg_recv_action(smc, cdc);
398 bh_unlock_sock(&smc->sk);
399 sock_put(&smc->sk); /* no free sk in softirq-context */
402 /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
403 * handler to indicate update in the DMBE.
408 static void smcd_cdc_rx_tsklet(struct tasklet_struct *t)
410 struct smc_connection *conn = from_tasklet(conn, t, rx_tsklet);
411 struct smcd_cdc_msg *data_cdc;
412 struct smcd_cdc_msg cdc;
413 struct smc_sock *smc;
415 if (!conn || conn->killed)
418 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
419 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
420 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
421 smc = container_of(conn, struct smc_sock, conn);
422 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
425 /* Initialize receive tasklet. Called from ISM device IRQ handler to start
428 void smcd_cdc_rx_init(struct smc_connection *conn)
430 tasklet_setup(&conn->rx_tsklet, smcd_cdc_rx_tsklet);
433 /***************************** init, exit, misc ******************************/
435 static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
437 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
438 struct smc_cdc_msg *cdc = buf;
439 struct smc_connection *conn;
440 struct smc_link_group *lgr;
441 struct smc_sock *smc;
443 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
444 return; /* short message */
445 if (cdc->len != SMC_WR_TX_SIZE)
446 return; /* invalid message */
448 /* lookup connection */
449 lgr = smc_get_lgr(link);
450 read_lock_bh(&lgr->conns_lock);
451 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
452 read_unlock_bh(&lgr->conns_lock);
453 if (!conn || conn->out_of_sync)
455 smc = container_of(conn, struct smc_sock, conn);
457 if (cdc->prod_flags.failover_validation) {
458 smc_cdc_msg_validate(smc, cdc, link);
461 if (smc_cdc_before(ntohs(cdc->seqno),
462 conn->local_rx_ctrl.seqno))
463 /* received seqno is old */
466 smc_cdc_msg_recv(smc, cdc);
469 static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
471 .handler = smc_cdc_rx_handler,
472 .type = SMC_CDC_MSG_TYPE
479 int __init smc_cdc_init(void)
481 struct smc_wr_rx_handler *handler;
484 for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
485 INIT_HLIST_NODE(&handler->list);
486 rc = smc_wr_rx_register_handler(handler);