1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
5 /* Greg Joyce <greg@opengridcomputing.com> */
6 /* Copyright (c) 2008-2019, IBM Corporation */
7 /* Copyright (c) 2017, Open Grid Computing, Inc. */
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/net.h>
12 #include <linux/inetdevice.h>
13 #include <net/addrconf.h>
14 #include <linux/workqueue.h>
17 #include <linux/inet.h>
18 #include <linux/tcp.h>
19 #include <trace/events/sock.h>
21 #include <rdma/iw_cm.h>
22 #include <rdma/ib_verbs.h>
23 #include <rdma/ib_user_verbs.h>
29 * Set to any combination of
30 * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
32 static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
33 static const bool relaxed_ird_negotiation = true;
35 static void siw_cm_llp_state_change(struct sock *s);
36 static void siw_cm_llp_data_ready(struct sock *s);
37 static void siw_cm_llp_write_space(struct sock *s);
38 static void siw_cm_llp_error_report(struct sock *s);
39 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
42 static void siw_sk_assign_cm_upcalls(struct sock *sk)
44 write_lock_bh(&sk->sk_callback_lock);
45 sk->sk_state_change = siw_cm_llp_state_change;
46 sk->sk_data_ready = siw_cm_llp_data_ready;
47 sk->sk_write_space = siw_cm_llp_write_space;
48 sk->sk_error_report = siw_cm_llp_error_report;
49 write_unlock_bh(&sk->sk_callback_lock);
52 static void siw_sk_save_upcalls(struct sock *sk)
54 struct siw_cep *cep = sk_to_cep(sk);
56 write_lock_bh(&sk->sk_callback_lock);
57 cep->sk_state_change = sk->sk_state_change;
58 cep->sk_data_ready = sk->sk_data_ready;
59 cep->sk_write_space = sk->sk_write_space;
60 cep->sk_error_report = sk->sk_error_report;
61 write_unlock_bh(&sk->sk_callback_lock);
64 static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
66 sk->sk_state_change = cep->sk_state_change;
67 sk->sk_data_ready = cep->sk_data_ready;
68 sk->sk_write_space = cep->sk_write_space;
69 sk->sk_error_report = cep->sk_error_report;
70 sk->sk_user_data = NULL;
73 static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp)
75 struct socket *s = cep->sock;
76 struct sock *sk = s->sk;
78 write_lock_bh(&sk->sk_callback_lock);
81 sk->sk_data_ready = siw_qp_llp_data_ready;
82 sk->sk_write_space = siw_qp_llp_write_space;
84 write_unlock_bh(&sk->sk_callback_lock);
87 static void siw_socket_disassoc(struct socket *s)
89 struct sock *sk = s->sk;
93 write_lock_bh(&sk->sk_callback_lock);
96 siw_sk_restore_upcalls(sk, cep);
99 pr_warn("siw: cannot restore sk callbacks: no ep\n");
101 write_unlock_bh(&sk->sk_callback_lock);
103 pr_warn("siw: cannot restore sk callbacks: no sk\n");
107 static void siw_rtr_data_ready(struct sock *sk)
110 struct siw_qp *qp = NULL;
111 read_descriptor_t rd_desc;
113 trace_sk_data_ready(sk);
115 read_lock(&sk->sk_callback_lock);
119 WARN(1, "No connection endpoint\n");
124 memset(&rd_desc, 0, sizeof(rd_desc));
125 rd_desc.arg.data = qp;
128 tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
130 * Check if first frame was successfully processed.
131 * Signal connection full establishment if yes.
132 * Failed data processing would have already scheduled
135 if (!qp->rx_stream.rx_suspend)
136 siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
138 read_unlock(&sk->sk_callback_lock);
140 siw_qp_socket_assoc(cep, qp);
143 static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep)
145 struct sock *sk = cep->sock->sk;
147 write_lock_bh(&sk->sk_callback_lock);
148 sk->sk_data_ready = siw_rtr_data_ready;
149 sk->sk_write_space = siw_qp_llp_write_space;
150 write_unlock_bh(&sk->sk_callback_lock);
153 static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
157 s->sk->sk_user_data = cep;
159 siw_sk_save_upcalls(s->sk);
160 siw_sk_assign_cm_upcalls(s->sk);
163 static struct siw_cep *siw_cep_alloc(struct siw_device *sdev)
165 struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
171 INIT_LIST_HEAD(&cep->listenq);
172 INIT_LIST_HEAD(&cep->devq);
173 INIT_LIST_HEAD(&cep->work_freelist);
175 kref_init(&cep->ref);
176 cep->state = SIW_EPSTATE_IDLE;
177 init_waitqueue_head(&cep->waitq);
178 spin_lock_init(&cep->lock);
180 cep->enhanced_rdma_conn_est = false;
182 spin_lock_irqsave(&sdev->lock, flags);
183 list_add_tail(&cep->devq, &sdev->cep_list);
184 spin_unlock_irqrestore(&sdev->lock, flags);
186 siw_dbg_cep(cep, "new endpoint\n");
190 static void siw_cm_free_work(struct siw_cep *cep)
192 struct list_head *w, *tmp;
193 struct siw_cm_work *work;
195 list_for_each_safe(w, tmp, &cep->work_freelist) {
196 work = list_entry(w, struct siw_cm_work, list);
197 list_del(&work->list);
202 static void siw_cancel_mpatimer(struct siw_cep *cep)
204 spin_lock_bh(&cep->lock);
205 if (cep->mpa_timer) {
206 if (cancel_delayed_work(&cep->mpa_timer->work)) {
208 kfree(cep->mpa_timer); /* not needed again */
210 cep->mpa_timer = NULL;
212 spin_unlock_bh(&cep->lock);
215 static void siw_put_work(struct siw_cm_work *work)
217 INIT_LIST_HEAD(&work->list);
218 spin_lock_bh(&work->cep->lock);
219 list_add(&work->list, &work->cep->work_freelist);
220 spin_unlock_bh(&work->cep->lock);
223 static void siw_cep_set_inuse(struct siw_cep *cep)
227 spin_lock_irqsave(&cep->lock, flags);
230 spin_unlock_irqrestore(&cep->lock, flags);
231 wait_event_interruptible(cep->waitq, !cep->in_use);
232 if (signal_pending(current))
233 flush_signals(current);
237 spin_unlock_irqrestore(&cep->lock, flags);
241 static void siw_cep_set_free(struct siw_cep *cep)
245 spin_lock_irqsave(&cep->lock, flags);
247 spin_unlock_irqrestore(&cep->lock, flags);
249 wake_up(&cep->waitq);
252 static void __siw_cep_dealloc(struct kref *ref)
254 struct siw_cep *cep = container_of(ref, struct siw_cep, ref);
255 struct siw_device *sdev = cep->sdev;
258 WARN_ON(cep->listen_cep);
260 /* kfree(NULL) is safe */
261 kfree(cep->mpa.pdata);
262 spin_lock_bh(&cep->lock);
263 if (!list_empty(&cep->work_freelist))
264 siw_cm_free_work(cep);
265 spin_unlock_bh(&cep->lock);
267 spin_lock_irqsave(&sdev->lock, flags);
268 list_del(&cep->devq);
269 spin_unlock_irqrestore(&sdev->lock, flags);
271 siw_dbg_cep(cep, "free endpoint\n");
275 static struct siw_cm_work *siw_get_work(struct siw_cep *cep)
277 struct siw_cm_work *work = NULL;
279 spin_lock_bh(&cep->lock);
280 if (!list_empty(&cep->work_freelist)) {
281 work = list_entry(cep->work_freelist.next, struct siw_cm_work,
283 list_del_init(&work->list);
285 spin_unlock_bh(&cep->lock);
289 static int siw_cm_alloc_work(struct siw_cep *cep, int num)
291 struct siw_cm_work *work;
294 work = kmalloc(sizeof(*work), GFP_KERNEL);
296 if (!(list_empty(&cep->work_freelist)))
297 siw_cm_free_work(cep);
301 INIT_LIST_HEAD(&work->list);
302 list_add(&work->list, &cep->work_freelist);
310 * Upcall to IWCM to inform about async connection events
312 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
315 struct iw_cm_event event;
318 memset(&event, 0, sizeof(event));
319 event.status = status;
320 event.event = reason;
322 if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
323 event.provider_data = cep;
324 id = cep->listen_cep->cm_id;
328 /* Signal IRD and ORD */
329 if (reason == IW_CM_EVENT_ESTABLISHED ||
330 reason == IW_CM_EVENT_CONNECT_REPLY) {
331 /* Signal negotiated IRD/ORD values we will use */
332 event.ird = cep->ird;
333 event.ord = cep->ord;
334 } else if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
335 event.ird = cep->ord;
336 event.ord = cep->ird;
338 /* Signal private data and address information */
339 if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
340 reason == IW_CM_EVENT_CONNECT_REPLY) {
341 u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
345 * hand over MPA private data
347 event.private_data_len = pd_len;
348 event.private_data = cep->mpa.pdata;
350 /* Hide MPA V2 IRD/ORD control */
351 if (cep->enhanced_rdma_conn_est) {
352 event.private_data_len -=
353 sizeof(struct mpa_v2_data);
354 event.private_data +=
355 sizeof(struct mpa_v2_data);
358 getname_local(cep->sock, &event.local_addr);
359 getname_peer(cep->sock, &event.remote_addr);
361 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
362 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
364 return id->event_handler(id, &event);
370 * Drops established LLP connection if present and not already
371 * scheduled for dropping. Called from user context, SQ workqueue
372 * or receive IRQ. Caller signals if socket can be immediately
373 * closed (basically, if not in IRQ).
375 void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
377 struct siw_cep *cep = qp->cep;
379 qp->rx_stream.rx_suspend = 1;
380 qp->tx_ctx.tx_suspend = 1;
386 siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP);
388 siw_cep_set_inuse(cep);
390 if (cep->state == SIW_EPSTATE_CLOSED) {
391 siw_dbg_cep(cep, "already closed\n");
394 siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
396 if (qp->term_info.valid)
397 siw_send_terminate(qp);
400 switch (cep->state) {
401 case SIW_EPSTATE_AWAIT_MPAREP:
402 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
406 case SIW_EPSTATE_RDMA_MODE:
407 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
410 case SIW_EPSTATE_IDLE:
411 case SIW_EPSTATE_LISTENING:
412 case SIW_EPSTATE_CONNECTING:
413 case SIW_EPSTATE_AWAIT_MPAREQ:
414 case SIW_EPSTATE_RECVD_MPAREQ:
415 case SIW_EPSTATE_CLOSED:
419 cep->cm_id->rem_ref(cep->cm_id);
423 cep->state = SIW_EPSTATE_CLOSED;
426 siw_socket_disassoc(cep->sock);
428 * Immediately close socket
430 sock_release(cep->sock);
438 siw_cep_set_free(cep);
442 void siw_cep_put(struct siw_cep *cep)
444 WARN_ON(kref_read(&cep->ref) < 1);
445 kref_put(&cep->ref, __siw_cep_dealloc);
448 void siw_cep_get(struct siw_cep *cep)
454 * Expects params->pd_len in host byte order
456 static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len)
458 struct socket *s = cep->sock;
459 struct mpa_rr *rr = &cep->mpa.hdr;
466 memset(&msg, 0, sizeof(msg));
468 iov[iovec_num].iov_base = rr;
469 iov[iovec_num].iov_len = sizeof(*rr);
470 mpa_len = sizeof(*rr);
472 if (cep->enhanced_rdma_conn_est) {
474 iov[iovec_num].iov_base = &cep->mpa.v2_ctrl;
475 iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl);
476 mpa_len += sizeof(cep->mpa.v2_ctrl);
480 iov[iovec_num].iov_base = (char *)pdata;
481 iov[iovec_num].iov_len = pd_len;
484 if (cep->enhanced_rdma_conn_est)
485 pd_len += sizeof(cep->mpa.v2_ctrl);
487 rr->params.pd_len = cpu_to_be16(pd_len);
489 rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len);
491 return rv < 0 ? rv : 0;
495 * Receive MPA Request/Reply header.
497 * Returns 0 if complete MPA Request/Reply header including
498 * eventual private data was received. Returns -EAGAIN if
499 * header was partially received or negative error code otherwise.
501 * Context: May be called in process context only
503 static int siw_recv_mpa_rr(struct siw_cep *cep)
505 struct mpa_rr *hdr = &cep->mpa.hdr;
506 struct socket *s = cep->sock;
510 if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
511 rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
512 sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd,
515 return -ECONNABORTED;
517 cep->mpa.bytes_rcvd += rcvd;
519 if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr))
522 if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA)
525 pd_len = be16_to_cpu(hdr->params.pd_len);
528 * At least the MPA Request/Reply header (frame not including
529 * private data) has been received.
530 * Receive (or continue receiving) any private data.
532 to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr));
536 * We must have hdr->params.pd_len == 0 and thus received a
537 * complete MPA Request/Reply frame.
538 * Check against peer protocol violation.
542 rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
547 siw_dbg_cep(cep, "peer EOF\n");
551 siw_dbg_cep(cep, "error: %d\n", rcvd);
554 siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
560 * At this point, we must have hdr->params.pd_len != 0.
561 * A private data buffer gets allocated if hdr->params.pd_len != 0.
563 if (!cep->mpa.pdata) {
564 cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
569 s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
570 to_rcv + 4, MSG_DONTWAIT);
578 cep->mpa.bytes_rcvd += rcvd;
580 if (to_rcv == rcvd) {
581 siw_dbg_cep(cep, "%d bytes private data received\n", pd_len);
590 * Read MPA Request from socket and signal new connection to IWCM
591 * if success. Caller must hold lock on corresponding listening CEP.
593 static int siw_proc_mpareq(struct siw_cep *cep)
599 rv = siw_recv_mpa_rr(cep);
605 version = __mpa_rr_revision(req->params.bits);
606 pd_len = be16_to_cpu(req->params.pd_len);
608 if (version > MPA_REVISION_2)
609 /* allow for 0, 1, and 2 only */
612 if (memcmp(req->key, MPA_KEY_REQ, 16))
615 /* Prepare for sending MPA reply */
616 memcpy(req->key, MPA_KEY_REP, 16);
618 if (version == MPA_REVISION_2 &&
619 (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
621 * MPA version 2 must signal IRD/ORD values and P2P mode
622 * in private data if header flag MPA_RR_FLAG_ENHANCED
625 if (pd_len < sizeof(struct mpa_v2_data))
628 cep->enhanced_rdma_conn_est = true;
631 /* MPA Markers: currently not supported. Marker TX to be added. */
632 if (req->params.bits & MPA_RR_FLAG_MARKERS)
635 if (req->params.bits & MPA_RR_FLAG_CRC) {
637 * RFC 5044, page 27: CRC MUST be used if peer requests it.
638 * siw specific: 'mpa_crc_strict' parameter to reject
639 * connection with CRC if local CRC off enforced by
640 * 'mpa_crc_strict' module parameter.
642 if (!mpa_crc_required && mpa_crc_strict)
645 /* Enable CRC if requested by module parameter */
646 if (mpa_crc_required)
647 req->params.bits |= MPA_RR_FLAG_CRC;
649 if (cep->enhanced_rdma_conn_est) {
650 struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata;
653 * Peer requested ORD becomes requested local IRD,
654 * peer requested IRD becomes requested local ORD.
655 * IRD and ORD get limited by global maximum values.
657 cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
658 cep->ord = min(cep->ord, SIW_MAX_ORD_QP);
659 cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
660 cep->ird = min(cep->ird, SIW_MAX_IRD_QP);
662 /* May get overwritten by locally negotiated values */
663 cep->mpa.v2_ctrl.ird = htons(cep->ird);
664 cep->mpa.v2_ctrl.ord = htons(cep->ord);
667 * Support for peer sent zero length Write or Read to
668 * let local side enter RTS. Writes are preferred.
669 * Sends would require pre-posting a Receive and are
671 * Propose zero length Write if none of Read and Write
674 if (v2->ird & MPA_V2_PEER_TO_PEER) {
675 cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
677 if (v2->ord & MPA_V2_RDMA_WRITE_RTR)
678 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
679 else if (v2->ord & MPA_V2_RDMA_READ_RTR)
680 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR;
682 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
686 cep->state = SIW_EPSTATE_RECVD_MPAREQ;
688 /* Keep reference until IWCM accepts/rejects */
690 rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
697 siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n",
698 req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
699 mpa_crc_required, mpa_crc_strict,
700 req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
702 req->params.bits &= ~MPA_RR_FLAG_MARKERS;
703 req->params.bits |= MPA_RR_FLAG_REJECT;
705 if (!mpa_crc_required && mpa_crc_strict)
706 req->params.bits &= ~MPA_RR_FLAG_CRC;
709 kfree(cep->mpa.pdata);
711 cep->mpa.pdata = NULL;
713 siw_send_mpareqrep(cep, NULL, 0);
718 static int siw_proc_mpareply(struct siw_cep *cep)
720 struct siw_qp_attrs qp_attrs;
721 enum siw_qp_attr_mask qp_attr_mask;
722 struct siw_qp *qp = cep->qp;
727 bool ird_insufficient = false;
728 enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
730 rv = siw_recv_mpa_rr(cep);
734 siw_cancel_mpatimer(cep);
738 if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
739 /* allow for 0, 1, and 2 only */
743 if (memcmp(rep->key, MPA_KEY_REP, 16)) {
744 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA,
745 LLP_ECODE_INVALID_REQ_RESP, 0);
746 siw_send_terminate(qp);
750 if (rep->params.bits & MPA_RR_FLAG_REJECT) {
751 siw_dbg_cep(cep, "got mpa reject\n");
752 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
756 if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) {
757 siw_dbg_cep(cep, "peer allows GSO on TX\n");
758 qp->tx_ctx.gso_seg_limit = 0;
760 if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
761 (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) ||
762 (mpa_crc_strict && !mpa_crc_required &&
763 (rep->params.bits & MPA_RR_FLAG_CRC))) {
764 siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n",
765 rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
766 mpa_crc_required, mpa_crc_strict,
767 rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
769 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
773 if (cep->enhanced_rdma_conn_est) {
774 struct mpa_v2_data *v2;
776 if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 ||
777 !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) {
779 * Protocol failure: The responder MUST reply with
780 * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED.
782 siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n",
783 __mpa_rr_revision(rep->params.bits),
784 rep->params.bits & MPA_RR_FLAG_ENHANCED ?
788 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
792 v2 = (struct mpa_v2_data *)cep->mpa.pdata;
793 rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
794 rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
796 if (cep->ird < rep_ord &&
797 (relaxed_ird_negotiation == false ||
798 rep_ord > cep->sdev->attrs.max_ird)) {
799 siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n",
801 cep->sdev->attrs.max_ord);
802 ird_insufficient = true;
804 if (cep->ord > rep_ird && relaxed_ird_negotiation == false) {
805 siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord,
807 ird_insufficient = true;
810 * Always report negotiated peer values to user,
811 * even if IRD/ORD negotiation failed
816 if (ird_insufficient) {
818 * If the initiator IRD is insuffient for the
819 * responder ORD, send a TERM.
821 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
823 LLP_ECODE_INSUFFICIENT_IRD, 0);
824 siw_send_terminate(qp);
828 if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER)
830 cep->mpa.v2_ctrl_req.ord &
831 (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR);
834 * Check if we requested P2P mode, and if peer agrees
836 if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
837 if ((mpa_p2p_mode & v2->ord) == 0) {
839 * We requested RTR mode(s), but the peer
840 * did not pick any mode we support.
843 "rtr mode: req %2x, got %2x\n",
845 v2->ord & (MPA_V2_RDMA_WRITE_RTR |
846 MPA_V2_RDMA_READ_RTR));
848 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
850 LLP_ECODE_NO_MATCHING_RTR,
852 siw_send_terminate(qp);
856 mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR |
857 MPA_V2_RDMA_READ_RTR);
860 memset(&qp_attrs, 0, sizeof(qp_attrs));
862 if (rep->params.bits & MPA_RR_FLAG_CRC)
863 qp_attrs.flags = SIW_MPA_CRC;
865 qp_attrs.irq_size = cep->ird;
866 qp_attrs.orq_size = cep->ord;
867 qp_attrs.sk = cep->sock;
868 qp_attrs.state = SIW_QP_STATE_RTS;
870 qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
871 SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA;
873 /* Move socket RX/TX under QP control */
874 down_write(&qp->state_lock);
875 if (qp->attrs.state > SIW_QP_STATE_RTR) {
877 up_write(&qp->state_lock);
880 rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask);
882 siw_qp_socket_assoc(cep, qp);
884 up_write(&qp->state_lock);
886 /* Send extra RDMA frame to trigger peer RTS if negotiated */
887 if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
888 rv = siw_qp_mpa_rts(qp, mpa_p2p_mode);
893 rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
895 cep->state = SIW_EPSTATE_RDMA_MODE;
902 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
908 * siw_accept_newconn - accept an incoming pending connection
911 static void siw_accept_newconn(struct siw_cep *cep)
913 struct socket *s = cep->sock;
914 struct socket *new_s = NULL;
915 struct siw_cep *new_cep = NULL;
916 int rv = 0; /* debug only. should disappear */
918 if (cep->state != SIW_EPSTATE_LISTENING)
921 new_cep = siw_cep_alloc(cep->sdev);
926 * 4: Allocate a sufficient number of work elements
927 * to allow concurrent handling of local + peer close
928 * events, MPA header processing + MPA timeout.
930 if (siw_cm_alloc_work(new_cep, 4) != 0)
934 * Copy saved socket callbacks from listening CEP
935 * and assign new socket with new CEP
937 new_cep->sk_state_change = cep->sk_state_change;
938 new_cep->sk_data_ready = cep->sk_data_ready;
939 new_cep->sk_write_space = cep->sk_write_space;
940 new_cep->sk_error_report = cep->sk_error_report;
942 rv = kernel_accept(s, &new_s, O_NONBLOCK);
945 * Connection already aborted by peer..?
947 siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv);
950 new_cep->sock = new_s;
951 siw_cep_get(new_cep);
952 new_s->sk->sk_user_data = new_cep;
954 if (siw_tcp_nagle == false)
955 tcp_sock_set_nodelay(new_s->sk);
956 new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
958 rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
962 * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep.
964 new_cep->listen_cep = cep;
967 if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
969 * MPA REQ already queued
971 siw_dbg_cep(cep, "immediate mpa request\n");
973 siw_cep_set_inuse(new_cep);
974 rv = siw_proc_mpareq(new_cep);
977 new_cep->listen_cep = NULL;
979 siw_cep_set_free(new_cep);
983 siw_cep_set_free(new_cep);
989 siw_cep_put(new_cep);
992 siw_socket_disassoc(new_s);
994 new_cep->sock = NULL;
996 siw_dbg_cep(cep, "error %d\n", rv);
999 static void siw_cm_work_handler(struct work_struct *w)
1001 struct siw_cm_work *work;
1002 struct siw_cep *cep;
1003 int release_cep = 0, rv = 0;
1005 work = container_of(w, struct siw_cm_work, work.work);
1008 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1009 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1010 work->type, cep->state);
1012 siw_cep_set_inuse(cep);
1014 switch (work->type) {
1015 case SIW_CM_WORK_ACCEPT:
1016 siw_accept_newconn(cep);
1019 case SIW_CM_WORK_READ_MPAHDR:
1020 if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1021 if (cep->listen_cep) {
1022 siw_cep_set_inuse(cep->listen_cep);
1024 if (cep->listen_cep->state ==
1025 SIW_EPSTATE_LISTENING)
1026 rv = siw_proc_mpareq(cep);
1030 siw_cep_set_free(cep->listen_cep);
1032 if (rv != -EAGAIN) {
1033 siw_cep_put(cep->listen_cep);
1034 cep->listen_cep = NULL;
1039 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1040 rv = siw_proc_mpareply(cep);
1043 * CEP already moved out of MPA handshake.
1044 * any connection management already done.
1045 * silently ignore the mpa packet.
1047 if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1048 cep->sock->sk->sk_data_ready(cep->sock->sk);
1049 siw_dbg_cep(cep, "already in RDMA mode");
1051 siw_dbg_cep(cep, "out of state: %d\n",
1055 if (rv && rv != -EAGAIN)
1059 case SIW_CM_WORK_CLOSE_LLP:
1061 * QP scheduled LLP close
1063 if (cep->qp && cep->qp->term_info.valid)
1064 siw_send_terminate(cep->qp);
1067 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1072 case SIW_CM_WORK_PEER_CLOSE:
1074 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1076 * MPA reply not received, but connection drop
1078 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1080 } else if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1082 * NOTE: IW_CM_EVENT_DISCONNECT is given just
1083 * to transition IWCM into CLOSING.
1085 siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
1086 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1089 * for other states there is no connection
1090 * known to the IWCM.
1093 if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) {
1095 * Wait for the ulp/CM to call accept/reject
1098 "mpa req recvd, wait for ULP\n");
1099 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1101 * Socket close before MPA request received.
1103 siw_dbg_cep(cep, "no mpareq: drop listener\n");
1104 siw_cep_put(cep->listen_cep);
1105 cep->listen_cep = NULL;
1111 case SIW_CM_WORK_MPATIMEOUT:
1112 cep->mpa_timer = NULL;
1114 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1116 * MPA request timed out:
1117 * Hide any partially received private data and signal
1120 cep->mpa.hdr.params.pd_len = 0;
1123 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1127 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1129 * No MPA request received after peer TCP stream setup.
1131 if (cep->listen_cep) {
1132 siw_cep_put(cep->listen_cep);
1133 cep->listen_cep = NULL;
1140 WARN(1, "Undefined CM work type: %d\n", work->type);
1144 "release: timer=%s, QP[%u]\n",
1145 cep->mpa_timer ? "y" : "n",
1146 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1148 siw_cancel_mpatimer(cep);
1150 cep->state = SIW_EPSTATE_CLOSED;
1153 struct siw_qp *qp = cep->qp;
1155 * Serialize a potential race with application
1156 * closing the QP and calling siw_qp_cm_drop()
1159 siw_cep_set_free(cep);
1161 siw_qp_llp_close(qp);
1164 siw_cep_set_inuse(cep);
1169 siw_socket_disassoc(cep->sock);
1170 sock_release(cep->sock);
1174 cep->cm_id->rem_ref(cep->cm_id);
1179 siw_cep_set_free(cep);
1184 static struct workqueue_struct *siw_cm_wq;
1186 int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1188 struct siw_cm_work *work = siw_get_work(cep);
1189 unsigned long delay = 0;
1192 siw_dbg_cep(cep, "failed with no work available\n");
1200 INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
1202 if (type == SIW_CM_WORK_MPATIMEOUT) {
1203 cep->mpa_timer = work;
1205 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP)
1206 delay = MPAREQ_TIMEOUT;
1208 delay = MPAREP_TIMEOUT;
1210 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1211 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1213 queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 static void siw_cm_llp_data_ready(struct sock *sk)
1220 struct siw_cep *cep;
1222 trace_sk_data_ready(sk);
1224 read_lock(&sk->sk_callback_lock);
1226 cep = sk_to_cep(sk);
1230 siw_dbg_cep(cep, "state: %d\n", cep->state);
1232 switch (cep->state) {
1233 case SIW_EPSTATE_RDMA_MODE:
1234 case SIW_EPSTATE_LISTENING:
1237 case SIW_EPSTATE_AWAIT_MPAREQ:
1238 case SIW_EPSTATE_AWAIT_MPAREP:
1239 siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
1243 siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state);
1247 read_unlock(&sk->sk_callback_lock);
1250 static void siw_cm_llp_write_space(struct sock *sk)
1252 struct siw_cep *cep = sk_to_cep(sk);
1255 siw_dbg_cep(cep, "state: %d\n", cep->state);
1258 static void siw_cm_llp_error_report(struct sock *sk)
1260 struct siw_cep *cep = sk_to_cep(sk);
1263 siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n",
1264 sk->sk_err, sk->sk_state, cep->state);
1265 cep->sk_error_report(sk);
1269 static void siw_cm_llp_state_change(struct sock *sk)
1271 struct siw_cep *cep;
1272 void (*orig_state_change)(struct sock *s);
1274 read_lock(&sk->sk_callback_lock);
1276 cep = sk_to_cep(sk);
1278 /* endpoint already disassociated */
1279 read_unlock(&sk->sk_callback_lock);
1282 orig_state_change = cep->sk_state_change;
1284 siw_dbg_cep(cep, "state: %d\n", cep->state);
1286 switch (sk->sk_state) {
1287 case TCP_ESTABLISHED:
1289 * handle accepting socket as special case where only
1290 * new connection is possible
1292 siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT);
1296 case TCP_CLOSE_WAIT:
1298 cep->qp->tx_ctx.tx_suspend = 1;
1299 siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE);
1303 siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
1305 read_unlock(&sk->sk_callback_lock);
1306 orig_state_change(sk);
1309 static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
1310 struct sockaddr *raddr, bool afonly)
1313 size_t size = laddr->sa_family == AF_INET ?
1314 sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
1317 * Make address available again asap.
1319 sock_set_reuseaddr(s->sk);
1322 rv = ip6_sock_set_v6only(s->sk);
1327 rv = s->ops->bind(s, laddr, size);
1331 rv = s->ops->connect(s, raddr, size, flags);
1333 return rv < 0 ? rv : 0;
1336 int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1338 struct siw_device *sdev = to_siw_dev(id->device);
1340 struct siw_cep *cep = NULL;
1341 struct socket *s = NULL;
1342 struct sockaddr *laddr = (struct sockaddr *)&id->local_addr,
1343 *raddr = (struct sockaddr *)&id->remote_addr;
1344 bool p2p_mode = peer_to_peer, v4 = true;
1345 u16 pd_len = params->private_data_len;
1346 int version = mpa_version, rv;
1348 if (pd_len > MPA_MAX_PRIVDATA)
1351 if (params->ird > sdev->attrs.max_ird ||
1352 params->ord > sdev->attrs.max_ord)
1355 if (laddr->sa_family == AF_INET6)
1357 else if (laddr->sa_family != AF_INET)
1358 return -EAFNOSUPPORT;
1361 * Respect any iwarp port mapping: Use mapped remote address
1362 * if valid. Local address must not be mapped, since siw
1363 * uses kernel TCP stack.
1365 if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) ||
1366 to_sockaddr_in6(id->remote_addr).sin6_port != 0)
1367 raddr = (struct sockaddr *)&id->m_remote_addr;
1369 qp = siw_qp_id2obj(sdev, params->qpn);
1371 WARN(1, "[QP %u] does not exist\n", params->qpn);
1375 siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
1378 rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
1383 * NOTE: For simplification, connect() is called in blocking
1384 * mode. Might be reconsidered for async connection setup at
1387 rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
1389 siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
1392 if (siw_tcp_nagle == false)
1393 tcp_sock_set_nodelay(s->sk);
1394 cep = siw_cep_alloc(sdev);
1399 siw_cep_set_inuse(cep);
1401 /* Associate QP with CEP */
1405 /* siw_qp_get(qp) already done by QP lookup */
1412 * 4: Allocate a sufficient number of work elements
1413 * to allow concurrent handling of local + peer close
1414 * events, MPA header processing + MPA timeout.
1416 rv = siw_cm_alloc_work(cep, 4);
1421 cep->ird = params->ird;
1422 cep->ord = params->ord;
1424 if (p2p_mode && cep->ord == 0)
1427 cep->state = SIW_EPSTATE_CONNECTING;
1430 * Associate CEP with socket
1432 siw_cep_socket_assoc(cep, s);
1434 cep->state = SIW_EPSTATE_AWAIT_MPAREP;
1437 * Set MPA Request bits: CRC if required, no MPA Markers,
1438 * MPA Rev. according to module parameter 'mpa_version', Key 'Request'.
1440 cep->mpa.hdr.params.bits = 0;
1441 if (version > MPA_REVISION_2) {
1442 pr_warn("Setting MPA version to %u\n", MPA_REVISION_2);
1443 version = MPA_REVISION_2;
1444 /* Adjust also module parameter */
1445 mpa_version = MPA_REVISION_2;
1447 __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version);
1450 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP;
1452 if (mpa_crc_required)
1453 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC;
1456 * If MPA version == 2:
1457 * o Include ORD and IRD.
1458 * o Indicate peer-to-peer mode, if required by module
1459 * parameter 'peer_to_peer'.
1461 if (version == MPA_REVISION_2) {
1462 cep->enhanced_rdma_conn_est = true;
1463 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED;
1465 cep->mpa.v2_ctrl.ird = htons(cep->ird);
1466 cep->mpa.v2_ctrl.ord = htons(cep->ord);
1469 cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
1470 cep->mpa.v2_ctrl.ord |= rtr_type;
1472 /* Remember own P2P mode requested */
1473 cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird;
1474 cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord;
1476 memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16);
1478 rv = siw_send_mpareqrep(cep, params->private_data, pd_len);
1480 * Reset private data.
1482 cep->mpa.hdr.params.pd_len = 0;
1485 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1487 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1488 siw_cep_set_free(cep);
1493 siw_dbg(id->device, "failed: %d\n", rv);
1496 siw_socket_disassoc(s);
1508 cep->state = SIW_EPSTATE_CLOSED;
1510 siw_cep_set_free(cep);
1524 * siw_accept - Let SoftiWARP accept an RDMA connection request
1526 * @id: New connection management id to be used for accepted
1527 * connection request
1528 * @params: Connection parameters provided by ULP for accepting connection
1530 * Transition QP to RTS state, associate new CM id @id with accepted CEP
1531 * and get prepared for TCP input by installing socket callbacks.
1532 * Then send MPA Reply and generate the "connection established" event.
1533 * Socket callbacks must be installed before sending MPA Reply, because
1534 * the latter may cause a first RDMA message to arrive from the RDMA Initiator
1535 * side very quickly, at which time the socket callbacks must be ready.
1537 int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1539 struct siw_device *sdev = to_siw_dev(id->device);
1540 struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1542 struct siw_qp_attrs qp_attrs;
1543 int rv, max_priv_data = MPA_MAX_PRIVDATA;
1544 bool wait_for_peer_rts = false;
1546 siw_cep_set_inuse(cep);
1549 /* Free lingering inbound private data */
1550 if (cep->mpa.hdr.params.pd_len) {
1551 cep->mpa.hdr.params.pd_len = 0;
1552 kfree(cep->mpa.pdata);
1553 cep->mpa.pdata = NULL;
1555 siw_cancel_mpatimer(cep);
1557 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1558 siw_dbg_cep(cep, "out of state\n");
1560 siw_cep_set_free(cep);
1565 qp = siw_qp_id2obj(sdev, params->qpn);
1567 WARN(1, "[QP %d] does not exist\n", params->qpn);
1568 siw_cep_set_free(cep);
1573 down_write(&qp->state_lock);
1574 if (qp->attrs.state > SIW_QP_STATE_RTR) {
1576 up_write(&qp->state_lock);
1579 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1581 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1582 siw_dbg_cep(cep, "peer allows GSO on TX\n");
1583 qp->tx_ctx.gso_seg_limit = 0;
1585 if (params->ord > sdev->attrs.max_ord ||
1586 params->ird > sdev->attrs.max_ird) {
1589 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1590 qp_id(qp), params->ord, sdev->attrs.max_ord,
1591 params->ird, sdev->attrs.max_ird);
1593 up_write(&qp->state_lock);
1596 if (cep->enhanced_rdma_conn_est)
1597 max_priv_data -= sizeof(struct mpa_v2_data);
1599 if (params->private_data_len > max_priv_data) {
1602 "[QP %u]: private data length: %d (max %d)\n",
1603 qp_id(qp), params->private_data_len, max_priv_data);
1605 up_write(&qp->state_lock);
1608 if (cep->enhanced_rdma_conn_est) {
1609 if (params->ord > cep->ord) {
1610 if (relaxed_ird_negotiation) {
1611 params->ord = cep->ord;
1613 cep->ird = params->ird;
1614 cep->ord = params->ord;
1616 up_write(&qp->state_lock);
1620 if (params->ird < cep->ird) {
1621 if (relaxed_ird_negotiation &&
1622 cep->ird <= sdev->attrs.max_ird)
1623 params->ird = cep->ird;
1626 up_write(&qp->state_lock);
1630 if (cep->mpa.v2_ctrl.ord &
1631 (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR))
1632 wait_for_peer_rts = true;
1634 * Signal back negotiated IRD and ORD values
1636 cep->mpa.v2_ctrl.ord =
1637 htons(params->ord & MPA_IRD_ORD_MASK) |
1638 (cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD);
1639 cep->mpa.v2_ctrl.ird =
1640 htons(params->ird & MPA_IRD_ORD_MASK) |
1641 (cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD);
1643 cep->ird = params->ird;
1644 cep->ord = params->ord;
1649 memset(&qp_attrs, 0, sizeof(qp_attrs));
1650 qp_attrs.orq_size = cep->ord;
1651 qp_attrs.irq_size = cep->ird;
1652 qp_attrs.sk = cep->sock;
1653 if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC)
1654 qp_attrs.flags = SIW_MPA_CRC;
1655 qp_attrs.state = SIW_QP_STATE_RTS;
1657 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1659 /* Associate QP with CEP */
1663 /* siw_qp_get(qp) already done by QP lookup */
1666 cep->state = SIW_EPSTATE_RDMA_MODE;
1668 /* Move socket RX/TX under QP control */
1669 rv = siw_qp_modify(qp, &qp_attrs,
1670 SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
1671 SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
1673 up_write(&qp->state_lock);
1678 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1679 qp_id(qp), params->private_data_len);
1681 rv = siw_send_mpareqrep(cep, params->private_data,
1682 params->private_data_len);
1686 if (wait_for_peer_rts) {
1687 siw_sk_assign_rtr_upcalls(cep);
1689 siw_qp_socket_assoc(cep, qp);
1690 rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
1694 siw_cep_set_free(cep);
1698 siw_socket_disassoc(cep->sock);
1699 sock_release(cep->sock);
1702 cep->state = SIW_EPSTATE_CLOSED;
1705 cep->cm_id->rem_ref(id);
1715 siw_cep_set_free(cep);
1724 * Local connection reject case. Send private data back to peer,
1725 * close connection and dereference connection id.
1727 int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1729 struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1731 siw_cep_set_inuse(cep);
1734 siw_cancel_mpatimer(cep);
1736 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1737 siw_dbg_cep(cep, "out of state\n");
1739 siw_cep_set_free(cep);
1740 siw_cep_put(cep); /* put last reference */
1744 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1747 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
1748 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
1749 siw_send_mpareqrep(cep, pdata, pd_len);
1751 siw_socket_disassoc(cep->sock);
1752 sock_release(cep->sock);
1755 cep->state = SIW_EPSTATE_CLOSED;
1757 siw_cep_set_free(cep);
1764 * siw_create_listen - Create resources for a listener's IWCM ID @id
1766 * Starts listen on the socket address id->local_addr.
1769 int siw_create_listen(struct iw_cm_id *id, int backlog)
1772 struct siw_cep *cep = NULL;
1773 struct siw_device *sdev = to_siw_dev(id->device);
1774 int addr_family = id->local_addr.ss_family;
1777 if (addr_family != AF_INET && addr_family != AF_INET6)
1778 return -EAFNOSUPPORT;
1780 rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
1785 * Allow binding local port when still in TIME_WAIT from last close.
1787 sock_set_reuseaddr(s->sk);
1789 if (addr_family == AF_INET) {
1790 struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
1792 /* For wildcard addr, limit binding to current device only */
1793 if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
1794 s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1796 rv = s->ops->bind(s, (struct sockaddr *)laddr,
1797 sizeof(struct sockaddr_in));
1799 struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
1802 rv = ip6_sock_set_v6only(s->sk);
1805 "ip6_sock_set_v6only erro: %d\n", rv);
1810 /* For wildcard addr, limit binding to current device only */
1811 if (ipv6_addr_any(&laddr->sin6_addr))
1812 s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1814 rv = s->ops->bind(s, (struct sockaddr *)laddr,
1815 sizeof(struct sockaddr_in6));
1818 siw_dbg(id->device, "socket bind error: %d\n", rv);
1821 cep = siw_cep_alloc(sdev);
1826 siw_cep_socket_assoc(cep, s);
1828 rv = siw_cm_alloc_work(cep, backlog);
1831 "alloc_work error %d, backlog %d\n",
1835 rv = s->ops->listen(s, backlog);
1837 siw_dbg(id->device, "listen error %d\n", rv);
1844 * In case of a wildcard rdma_listen on a multi-homed device,
1845 * a listener's IWCM id is associated with more than one listening CEP.
1847 * We currently use id->provider_data in three different ways:
1849 * o For a listener's IWCM id, id->provider_data points to
1850 * the list_head of the list of listening CEPs.
1851 * Uses: siw_create_listen(), siw_destroy_listen()
1853 * o For each accepted passive-side IWCM id, id->provider_data
1854 * points to the CEP itself. This is a consequence of
1855 * - siw_cm_upcall() setting event.provider_data = cep and
1856 * - the IWCM's cm_conn_req_handler() setting provider_data of the
1857 * new passive-side IWCM id equal to event.provider_data
1858 * Uses: siw_accept(), siw_reject()
1860 * o For an active-side IWCM id, id->provider_data is not used at all.
1863 if (!id->provider_data) {
1865 kmalloc(sizeof(struct list_head), GFP_KERNEL);
1866 if (!id->provider_data) {
1870 INIT_LIST_HEAD((struct list_head *)id->provider_data);
1872 list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
1873 cep->state = SIW_EPSTATE_LISTENING;
1875 siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
1880 siw_dbg(id->device, "failed: %d\n", rv);
1883 siw_cep_set_inuse(cep);
1886 cep->cm_id->rem_ref(cep->cm_id);
1890 siw_socket_disassoc(s);
1891 cep->state = SIW_EPSTATE_CLOSED;
1893 siw_cep_set_free(cep);
1901 static void siw_drop_listeners(struct iw_cm_id *id)
1903 struct list_head *p, *tmp;
1906 * In case of a wildcard rdma_listen on a multi-homed device,
1907 * a listener's IWCM id is associated with more than one listening CEP.
1909 list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
1910 struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
1914 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1916 siw_cep_set_inuse(cep);
1919 cep->cm_id->rem_ref(cep->cm_id);
1923 siw_socket_disassoc(cep->sock);
1924 sock_release(cep->sock);
1927 cep->state = SIW_EPSTATE_CLOSED;
1928 siw_cep_set_free(cep);
1933 int siw_destroy_listen(struct iw_cm_id *id)
1935 if (!id->provider_data) {
1936 siw_dbg(id->device, "no cep(s)\n");
1939 siw_drop_listeners(id);
1940 kfree(id->provider_data);
1941 id->provider_data = NULL;
1946 int siw_cm_init(void)
1949 * create_single_workqueue for strict ordering
1951 siw_cm_wq = create_singlethread_workqueue("siw_cm_wq");
1958 void siw_cm_exit(void)
1961 destroy_workqueue(siw_cm_wq);