1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
6 * Author(s): Long Li <longli@microsoft.com>,
7 * Hyunchul Lee <hyc.lee@gmail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
20 #define SUBMOD_NAME "smb_direct"
22 #include <linux/kthread.h>
23 #include <linux/list.h>
24 #include <linux/mempool.h>
25 #include <linux/highmem.h>
26 #include <linux/scatterlist.h>
27 #include <rdma/ib_verbs.h>
28 #include <rdma/rdma_cm.h>
32 #include "connection.h"
33 #include "smb_common.h"
34 #include "smbstatus.h"
35 #include "transport_rdma.h"
37 #define SMB_DIRECT_PORT 5445
39 #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
41 /* SMB_DIRECT negotiation timeout in seconds */
42 #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
44 #define SMB_DIRECT_MAX_SEND_SGES 8
45 #define SMB_DIRECT_MAX_RECV_SGES 1
48 * Default maximum number of RDMA read/write outstanding on this connection
49 * This value is possibly decreased during QP creation on hardware limit
51 #define SMB_DIRECT_CM_INITIATOR_DEPTH 8
53 /* Maximum number of retries on data transfer operations */
54 #define SMB_DIRECT_CM_RETRY 6
55 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
56 #define SMB_DIRECT_CM_RNR_RETRY 0
59 * User configurable initial values per SMB_DIRECT transport connection
60 * as defined in [MS-SMBD] 3.1.1.1
61 * Those may change after a SMB_DIRECT negotiation
63 /* The local peer's maximum number of credits to grant to the peer */
64 static int smb_direct_receive_credit_max = 255;
66 /* The remote peer's credit request of local peer */
67 static int smb_direct_send_credit_target = 255;
69 /* The maximum single message size can be sent to remote peer */
70 static int smb_direct_max_send_size = 8192;
72 /* The maximum fragmented upper-layer payload receive size supported */
73 static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
75 /* The maximum single-message size which can be received */
76 static int smb_direct_max_receive_size = 8192;
78 static int smb_direct_max_read_write_size = 1024 * 1024;
80 static int smb_direct_max_outstanding_rw_ops = 8;
82 static struct smb_direct_listener {
83 struct rdma_cm_id *cm_id;
84 } smb_direct_listener;
86 static struct workqueue_struct *smb_direct_wq;
88 enum smb_direct_status {
89 SMB_DIRECT_CS_NEW = 0,
90 SMB_DIRECT_CS_CONNECTED,
91 SMB_DIRECT_CS_DISCONNECTING,
92 SMB_DIRECT_CS_DISCONNECTED,
95 struct smb_direct_transport {
96 struct ksmbd_transport transport;
98 enum smb_direct_status status;
99 bool full_packet_received;
100 wait_queue_head_t wait_status;
102 struct rdma_cm_id *cm_id;
103 struct ib_cq *send_cq;
104 struct ib_cq *recv_cq;
110 int max_fragmented_send_size;
111 int max_fragmented_recv_size;
112 int max_rdma_rw_size;
114 spinlock_t reassembly_queue_lock;
115 struct list_head reassembly_queue;
116 int reassembly_data_length;
117 int reassembly_queue_length;
118 int first_entry_offset;
119 wait_queue_head_t wait_reassembly_queue;
121 spinlock_t receive_credit_lock;
123 int count_avail_recvmsg;
125 int recv_credit_target;
127 spinlock_t recvmsg_queue_lock;
128 struct list_head recvmsg_queue;
130 spinlock_t empty_recvmsg_queue_lock;
131 struct list_head empty_recvmsg_queue;
133 int send_credit_target;
134 atomic_t send_credits;
135 spinlock_t lock_new_recv_credits;
136 int new_recv_credits;
137 atomic_t rw_avail_ops;
139 wait_queue_head_t wait_send_credits;
140 wait_queue_head_t wait_rw_avail_ops;
142 mempool_t *sendmsg_mempool;
143 struct kmem_cache *sendmsg_cache;
144 mempool_t *recvmsg_mempool;
145 struct kmem_cache *recvmsg_cache;
147 wait_queue_head_t wait_send_payload_pending;
148 atomic_t send_payload_pending;
149 wait_queue_head_t wait_send_pending;
150 atomic_t send_pending;
152 struct delayed_work post_recv_credits_work;
153 struct work_struct send_immediate_work;
154 struct work_struct disconnect_work;
156 bool negotiation_requested;
159 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
162 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
163 SMB_DIRECT_MSG_DATA_TRANSFER
166 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
168 struct smb_direct_send_ctx {
169 struct list_head msg_list;
171 bool need_invalidate_rkey;
172 unsigned int remote_key;
175 struct smb_direct_sendmsg {
176 struct smb_direct_transport *transport;
177 struct ib_send_wr wr;
178 struct list_head list;
180 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
185 struct smb_direct_recvmsg {
186 struct smb_direct_transport *transport;
187 struct list_head list;
195 struct smb_direct_rdma_rw_msg {
196 struct smb_direct_transport *t;
198 struct completion *completion;
199 struct rdma_rw_ctx rw_ctx;
201 struct scatterlist sg_list[0];
204 static inline int get_buf_page_count(void *buf, int size)
206 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
207 (uintptr_t)buf / PAGE_SIZE;
210 static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
211 static void smb_direct_post_recv_credits(struct work_struct *work);
212 static int smb_direct_post_send_data(struct smb_direct_transport *t,
213 struct smb_direct_send_ctx *send_ctx,
214 struct kvec *iov, int niov,
215 int remaining_data_length);
217 static inline struct smb_direct_transport *
218 smb_trans_direct_transfort(struct ksmbd_transport *t)
220 return container_of(t, struct smb_direct_transport, transport);
224 *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
226 return (void *)recvmsg->packet;
229 static inline bool is_receive_credit_post_required(int receive_credits,
230 int avail_recvmsg_count)
232 return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
233 avail_recvmsg_count >= (receive_credits >> 2);
237 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
239 struct smb_direct_recvmsg *recvmsg = NULL;
241 spin_lock(&t->recvmsg_queue_lock);
242 if (!list_empty(&t->recvmsg_queue)) {
243 recvmsg = list_first_entry(&t->recvmsg_queue,
244 struct smb_direct_recvmsg,
246 list_del(&recvmsg->list);
248 spin_unlock(&t->recvmsg_queue_lock);
252 static void put_recvmsg(struct smb_direct_transport *t,
253 struct smb_direct_recvmsg *recvmsg)
255 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
256 recvmsg->sge.length, DMA_FROM_DEVICE);
258 spin_lock(&t->recvmsg_queue_lock);
259 list_add(&recvmsg->list, &t->recvmsg_queue);
260 spin_unlock(&t->recvmsg_queue_lock);
264 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
266 struct smb_direct_recvmsg *recvmsg = NULL;
268 spin_lock(&t->empty_recvmsg_queue_lock);
269 if (!list_empty(&t->empty_recvmsg_queue)) {
270 recvmsg = list_first_entry(&t->empty_recvmsg_queue,
271 struct smb_direct_recvmsg, list);
272 list_del(&recvmsg->list);
274 spin_unlock(&t->empty_recvmsg_queue_lock);
278 static void put_empty_recvmsg(struct smb_direct_transport *t,
279 struct smb_direct_recvmsg *recvmsg)
281 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
282 recvmsg->sge.length, DMA_FROM_DEVICE);
284 spin_lock(&t->empty_recvmsg_queue_lock);
285 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
286 spin_unlock(&t->empty_recvmsg_queue_lock);
289 static void enqueue_reassembly(struct smb_direct_transport *t,
290 struct smb_direct_recvmsg *recvmsg,
293 spin_lock(&t->reassembly_queue_lock);
294 list_add_tail(&recvmsg->list, &t->reassembly_queue);
295 t->reassembly_queue_length++;
297 * Make sure reassembly_data_length is updated after list and
298 * reassembly_queue_length are updated. On the dequeue side
299 * reassembly_data_length is checked without a lock to determine
300 * if reassembly_queue_length and list is up to date
303 t->reassembly_data_length += data_length;
304 spin_unlock(&t->reassembly_queue_lock);
307 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
309 if (!list_empty(&t->reassembly_queue))
310 return list_first_entry(&t->reassembly_queue,
311 struct smb_direct_recvmsg, list);
316 static void smb_direct_disconnect_rdma_work(struct work_struct *work)
318 struct smb_direct_transport *t =
319 container_of(work, struct smb_direct_transport,
322 if (t->status == SMB_DIRECT_CS_CONNECTED) {
323 t->status = SMB_DIRECT_CS_DISCONNECTING;
324 rdma_disconnect(t->cm_id);
329 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
331 if (t->status == SMB_DIRECT_CS_CONNECTED)
332 queue_work(smb_direct_wq, &t->disconnect_work);
335 static void smb_direct_send_immediate_work(struct work_struct *work)
337 struct smb_direct_transport *t = container_of(work,
338 struct smb_direct_transport, send_immediate_work);
340 if (t->status != SMB_DIRECT_CS_CONNECTED)
343 smb_direct_post_send_data(t, NULL, NULL, 0, 0);
346 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
348 struct smb_direct_transport *t;
349 struct ksmbd_conn *conn;
351 t = kzalloc(sizeof(*t), GFP_KERNEL);
358 t->status = SMB_DIRECT_CS_NEW;
359 init_waitqueue_head(&t->wait_status);
361 spin_lock_init(&t->reassembly_queue_lock);
362 INIT_LIST_HEAD(&t->reassembly_queue);
363 t->reassembly_data_length = 0;
364 t->reassembly_queue_length = 0;
365 init_waitqueue_head(&t->wait_reassembly_queue);
366 init_waitqueue_head(&t->wait_send_credits);
367 init_waitqueue_head(&t->wait_rw_avail_ops);
369 spin_lock_init(&t->receive_credit_lock);
370 spin_lock_init(&t->recvmsg_queue_lock);
371 INIT_LIST_HEAD(&t->recvmsg_queue);
373 spin_lock_init(&t->empty_recvmsg_queue_lock);
374 INIT_LIST_HEAD(&t->empty_recvmsg_queue);
376 init_waitqueue_head(&t->wait_send_payload_pending);
377 atomic_set(&t->send_payload_pending, 0);
378 init_waitqueue_head(&t->wait_send_pending);
379 atomic_set(&t->send_pending, 0);
381 spin_lock_init(&t->lock_new_recv_credits);
383 INIT_DELAYED_WORK(&t->post_recv_credits_work,
384 smb_direct_post_recv_credits);
385 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
386 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
388 conn = ksmbd_conn_alloc();
391 conn->transport = KSMBD_TRANS(t);
392 KSMBD_TRANS(t)->conn = conn;
393 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
400 static void free_transport(struct smb_direct_transport *t)
402 struct smb_direct_recvmsg *recvmsg;
404 wake_up_interruptible(&t->wait_send_credits);
406 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
407 wait_event(t->wait_send_payload_pending,
408 atomic_read(&t->send_payload_pending) == 0);
409 wait_event(t->wait_send_pending,
410 atomic_read(&t->send_pending) == 0);
412 cancel_work_sync(&t->disconnect_work);
413 cancel_delayed_work_sync(&t->post_recv_credits_work);
414 cancel_work_sync(&t->send_immediate_work);
418 ib_destroy_qp(t->qp);
421 ksmbd_debug(RDMA, "drain the reassembly queue\n");
423 spin_lock(&t->reassembly_queue_lock);
424 recvmsg = get_first_reassembly(t);
426 list_del(&recvmsg->list);
427 spin_unlock(&t->reassembly_queue_lock);
428 put_recvmsg(t, recvmsg);
430 spin_unlock(&t->reassembly_queue_lock);
433 t->reassembly_data_length = 0;
436 ib_free_cq(t->send_cq);
438 ib_free_cq(t->recv_cq);
440 ib_dealloc_pd(t->pd);
442 rdma_destroy_id(t->cm_id);
444 smb_direct_destroy_pools(t);
445 ksmbd_conn_free(KSMBD_TRANS(t)->conn);
449 static struct smb_direct_sendmsg
450 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
452 struct smb_direct_sendmsg *msg;
454 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
456 return ERR_PTR(-ENOMEM);
458 INIT_LIST_HEAD(&msg->list);
463 static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
464 struct smb_direct_sendmsg *msg)
468 if (msg->num_sge > 0) {
469 ib_dma_unmap_single(t->cm_id->device,
470 msg->sge[0].addr, msg->sge[0].length,
472 for (i = 1; i < msg->num_sge; i++)
473 ib_dma_unmap_page(t->cm_id->device,
474 msg->sge[i].addr, msg->sge[i].length,
477 mempool_free(msg, t->sendmsg_mempool);
480 static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
482 switch (recvmsg->type) {
483 case SMB_DIRECT_MSG_DATA_TRANSFER: {
484 struct smb_direct_data_transfer *req =
485 (struct smb_direct_data_transfer *)recvmsg->packet;
486 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
487 + le32_to_cpu(req->data_offset) - 4);
489 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
490 le16_to_cpu(req->credits_granted),
491 le16_to_cpu(req->credits_requested),
492 req->data_length, req->remaining_data_length,
493 hdr->ProtocolId, hdr->Command);
496 case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
497 struct smb_direct_negotiate_req *req =
498 (struct smb_direct_negotiate_req *)recvmsg->packet;
500 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
501 le16_to_cpu(req->min_version),
502 le16_to_cpu(req->max_version),
503 le16_to_cpu(req->credits_requested),
504 le32_to_cpu(req->preferred_send_size),
505 le32_to_cpu(req->max_receive_size),
506 le32_to_cpu(req->max_fragmented_size));
507 if (le16_to_cpu(req->min_version) > 0x0100 ||
508 le16_to_cpu(req->max_version) < 0x0100)
510 if (le16_to_cpu(req->credits_requested) <= 0 ||
511 le32_to_cpu(req->max_receive_size) <= 128 ||
512 le32_to_cpu(req->max_fragmented_size) <=
514 return -ECONNABORTED;
524 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
526 struct smb_direct_recvmsg *recvmsg;
527 struct smb_direct_transport *t;
529 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
530 t = recvmsg->transport;
532 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
533 if (wc->status != IB_WC_WR_FLUSH_ERR) {
534 pr_err("Recv error. status='%s (%d)' opcode=%d\n",
535 ib_wc_status_msg(wc->status), wc->status,
537 smb_direct_disconnect_rdma_connection(t);
539 put_empty_recvmsg(t, recvmsg);
543 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
544 ib_wc_status_msg(wc->status), wc->status,
547 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
548 recvmsg->sge.length, DMA_FROM_DEVICE);
550 switch (recvmsg->type) {
551 case SMB_DIRECT_MSG_NEGOTIATE_REQ:
552 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
553 put_empty_recvmsg(t, recvmsg);
556 t->negotiation_requested = true;
557 t->full_packet_received = true;
558 wake_up_interruptible(&t->wait_status);
560 case SMB_DIRECT_MSG_DATA_TRANSFER: {
561 struct smb_direct_data_transfer *data_transfer =
562 (struct smb_direct_data_transfer *)recvmsg->packet;
563 unsigned int data_length;
564 int avail_recvmsg_count, receive_credits;
567 offsetof(struct smb_direct_data_transfer, padding)) {
568 put_empty_recvmsg(t, recvmsg);
572 data_length = le32_to_cpu(data_transfer->data_length);
574 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
576 put_empty_recvmsg(t, recvmsg);
580 if (t->full_packet_received)
581 recvmsg->first_segment = true;
583 if (le32_to_cpu(data_transfer->remaining_data_length))
584 t->full_packet_received = false;
586 t->full_packet_received = true;
588 enqueue_reassembly(t, recvmsg, (int)data_length);
589 wake_up_interruptible(&t->wait_reassembly_queue);
591 spin_lock(&t->receive_credit_lock);
592 receive_credits = --(t->recv_credits);
593 avail_recvmsg_count = t->count_avail_recvmsg;
594 spin_unlock(&t->receive_credit_lock);
596 put_empty_recvmsg(t, recvmsg);
598 spin_lock(&t->receive_credit_lock);
599 receive_credits = --(t->recv_credits);
600 avail_recvmsg_count = ++(t->count_avail_recvmsg);
601 spin_unlock(&t->receive_credit_lock);
604 t->recv_credit_target =
605 le16_to_cpu(data_transfer->credits_requested);
606 atomic_add(le16_to_cpu(data_transfer->credits_granted),
609 if (le16_to_cpu(data_transfer->flags) &
610 SMB_DIRECT_RESPONSE_REQUESTED)
611 queue_work(smb_direct_wq, &t->send_immediate_work);
613 if (atomic_read(&t->send_credits) > 0)
614 wake_up_interruptible(&t->wait_send_credits);
616 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
617 mod_delayed_work(smb_direct_wq,
618 &t->post_recv_credits_work, 0);
626 static int smb_direct_post_recv(struct smb_direct_transport *t,
627 struct smb_direct_recvmsg *recvmsg)
629 struct ib_recv_wr wr;
632 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
633 recvmsg->packet, t->max_recv_size,
635 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
638 recvmsg->sge.length = t->max_recv_size;
639 recvmsg->sge.lkey = t->pd->local_dma_lkey;
640 recvmsg->cqe.done = recv_done;
642 wr.wr_cqe = &recvmsg->cqe;
644 wr.sg_list = &recvmsg->sge;
647 ret = ib_post_recv(t->qp, &wr, NULL);
649 pr_err("Can't post recv: %d\n", ret);
650 ib_dma_unmap_single(t->cm_id->device,
651 recvmsg->sge.addr, recvmsg->sge.length,
653 smb_direct_disconnect_rdma_connection(t);
659 static int smb_direct_read(struct ksmbd_transport *t, char *buf,
662 struct smb_direct_recvmsg *recvmsg;
663 struct smb_direct_data_transfer *data_transfer;
664 int to_copy, to_read, data_read, offset;
665 u32 data_length, remaining_data_length, data_offset;
667 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
670 if (st->status != SMB_DIRECT_CS_CONNECTED) {
671 pr_err("disconnected\n");
676 * No need to hold the reassembly queue lock all the time as we are
677 * the only one reading from the front of the queue. The transport
678 * may add more entries to the back of the queue at the same time
680 if (st->reassembly_data_length >= size) {
682 int queue_removed = 0;
685 * Need to make sure reassembly_data_length is read before
686 * reading reassembly_queue_length and calling
687 * get_first_reassembly. This call is lock free
688 * as we never read at the end of the queue which are being
689 * updated in SOFTIRQ as more data is received
692 queue_length = st->reassembly_queue_length;
695 offset = st->first_entry_offset;
696 while (data_read < size) {
697 recvmsg = get_first_reassembly(st);
698 data_transfer = smb_direct_recvmsg_payload(recvmsg);
699 data_length = le32_to_cpu(data_transfer->data_length);
700 remaining_data_length =
701 le32_to_cpu(data_transfer->remaining_data_length);
702 data_offset = le32_to_cpu(data_transfer->data_offset);
705 * The upper layer expects RFC1002 length at the
706 * beginning of the payload. Return it to indicate
707 * the total length of the packet. This minimize the
708 * change to upper layer packet processing logic. This
709 * will be eventually remove when an intermediate
710 * transport layer is added
712 if (recvmsg->first_segment && size == 4) {
713 unsigned int rfc1002_len =
714 data_length + remaining_data_length;
715 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
717 recvmsg->first_segment = false;
719 "returning rfc1002 length %d\n",
721 goto read_rfc1002_done;
724 to_copy = min_t(int, data_length - offset, to_read);
725 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
728 /* move on to the next buffer? */
729 if (to_copy == data_length - offset) {
732 * No need to lock if we are not at the
736 list_del(&recvmsg->list);
738 spin_lock_irq(&st->reassembly_queue_lock);
739 list_del(&recvmsg->list);
740 spin_unlock_irq(&st->reassembly_queue_lock);
743 put_recvmsg(st, recvmsg);
750 data_read += to_copy;
753 spin_lock_irq(&st->reassembly_queue_lock);
754 st->reassembly_data_length -= data_read;
755 st->reassembly_queue_length -= queue_removed;
756 spin_unlock_irq(&st->reassembly_queue_lock);
758 spin_lock(&st->receive_credit_lock);
759 st->count_avail_recvmsg += queue_removed;
760 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
761 spin_unlock(&st->receive_credit_lock);
762 mod_delayed_work(smb_direct_wq,
763 &st->post_recv_credits_work, 0);
765 spin_unlock(&st->receive_credit_lock);
768 st->first_entry_offset = offset;
770 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
771 data_read, st->reassembly_data_length,
772 st->first_entry_offset);
777 ksmbd_debug(RDMA, "wait_event on more data\n");
778 rc = wait_event_interruptible(st->wait_reassembly_queue,
779 st->reassembly_data_length >= size ||
780 st->status != SMB_DIRECT_CS_CONNECTED);
787 static void smb_direct_post_recv_credits(struct work_struct *work)
789 struct smb_direct_transport *t = container_of(work,
790 struct smb_direct_transport, post_recv_credits_work.work);
791 struct smb_direct_recvmsg *recvmsg;
792 int receive_credits, credits = 0;
796 spin_lock(&t->receive_credit_lock);
797 receive_credits = t->recv_credits;
798 spin_unlock(&t->receive_credit_lock);
800 if (receive_credits < t->recv_credit_target) {
803 recvmsg = get_free_recvmsg(t);
805 recvmsg = get_empty_recvmsg(t);
815 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
816 recvmsg->first_segment = false;
818 ret = smb_direct_post_recv(t, recvmsg);
820 pr_err("Can't post recv: %d\n", ret);
821 put_recvmsg(t, recvmsg);
828 spin_lock(&t->receive_credit_lock);
829 t->recv_credits += credits;
830 t->count_avail_recvmsg -= credits;
831 spin_unlock(&t->receive_credit_lock);
833 spin_lock(&t->lock_new_recv_credits);
834 t->new_recv_credits += credits;
835 spin_unlock(&t->lock_new_recv_credits);
838 queue_work(smb_direct_wq, &t->send_immediate_work);
841 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
843 struct smb_direct_sendmsg *sendmsg, *sibling;
844 struct smb_direct_transport *t;
845 struct list_head *pos, *prev, *end;
847 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
848 t = sendmsg->transport;
850 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
851 ib_wc_status_msg(wc->status), wc->status,
854 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
855 pr_err("Send error. status='%s (%d)', opcode=%d\n",
856 ib_wc_status_msg(wc->status), wc->status,
858 smb_direct_disconnect_rdma_connection(t);
861 if (sendmsg->num_sge > 1) {
862 if (atomic_dec_and_test(&t->send_payload_pending))
863 wake_up(&t->wait_send_payload_pending);
865 if (atomic_dec_and_test(&t->send_pending))
866 wake_up(&t->wait_send_pending);
869 /* iterate and free the list of messages in reverse. the list's head
872 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
873 prev != end; pos = prev, prev = prev->prev) {
874 sibling = container_of(pos, struct smb_direct_sendmsg, list);
875 smb_direct_free_sendmsg(t, sibling);
878 sibling = container_of(pos, struct smb_direct_sendmsg, list);
879 smb_direct_free_sendmsg(t, sibling);
882 static int manage_credits_prior_sending(struct smb_direct_transport *t)
886 spin_lock(&t->lock_new_recv_credits);
887 new_credits = t->new_recv_credits;
888 t->new_recv_credits = 0;
889 spin_unlock(&t->lock_new_recv_credits);
894 static int smb_direct_post_send(struct smb_direct_transport *t,
895 struct ib_send_wr *wr)
900 atomic_inc(&t->send_payload_pending);
902 atomic_inc(&t->send_pending);
904 ret = ib_post_send(t->qp, wr, NULL);
906 pr_err("failed to post send: %d\n", ret);
907 if (wr->num_sge > 1) {
908 if (atomic_dec_and_test(&t->send_payload_pending))
909 wake_up(&t->wait_send_payload_pending);
911 if (atomic_dec_and_test(&t->send_pending))
912 wake_up(&t->wait_send_pending);
914 smb_direct_disconnect_rdma_connection(t);
919 static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
920 struct smb_direct_send_ctx *send_ctx,
921 bool need_invalidate_rkey,
922 unsigned int remote_key)
924 INIT_LIST_HEAD(&send_ctx->msg_list);
925 send_ctx->wr_cnt = 0;
926 send_ctx->need_invalidate_rkey = need_invalidate_rkey;
927 send_ctx->remote_key = remote_key;
930 static int smb_direct_flush_send_list(struct smb_direct_transport *t,
931 struct smb_direct_send_ctx *send_ctx,
934 struct smb_direct_sendmsg *first, *last;
937 if (list_empty(&send_ctx->msg_list))
940 first = list_first_entry(&send_ctx->msg_list,
941 struct smb_direct_sendmsg,
943 last = list_last_entry(&send_ctx->msg_list,
944 struct smb_direct_sendmsg,
947 last->wr.send_flags = IB_SEND_SIGNALED;
948 last->wr.wr_cqe = &last->cqe;
949 if (is_last && send_ctx->need_invalidate_rkey) {
950 last->wr.opcode = IB_WR_SEND_WITH_INV;
951 last->wr.ex.invalidate_rkey = send_ctx->remote_key;
954 ret = smb_direct_post_send(t, &first->wr);
956 smb_direct_send_ctx_init(t, send_ctx,
957 send_ctx->need_invalidate_rkey,
958 send_ctx->remote_key);
960 atomic_add(send_ctx->wr_cnt, &t->send_credits);
961 wake_up(&t->wait_send_credits);
962 list_for_each_entry_safe(first, last, &send_ctx->msg_list,
964 smb_direct_free_sendmsg(t, first);
970 static int wait_for_credits(struct smb_direct_transport *t,
971 wait_queue_head_t *waitq, atomic_t *credits)
976 if (atomic_dec_return(credits) >= 0)
980 ret = wait_event_interruptible(*waitq,
981 atomic_read(credits) > 0 ||
982 t->status != SMB_DIRECT_CS_CONNECTED);
984 if (t->status != SMB_DIRECT_CS_CONNECTED)
991 static int wait_for_send_credits(struct smb_direct_transport *t,
992 struct smb_direct_send_ctx *send_ctx)
997 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
998 ret = smb_direct_flush_send_list(t, send_ctx, false);
1003 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
1006 static int smb_direct_create_header(struct smb_direct_transport *t,
1007 int size, int remaining_data_length,
1008 struct smb_direct_sendmsg **sendmsg_out)
1010 struct smb_direct_sendmsg *sendmsg;
1011 struct smb_direct_data_transfer *packet;
1015 sendmsg = smb_direct_alloc_sendmsg(t);
1016 if (IS_ERR(sendmsg))
1017 return PTR_ERR(sendmsg);
1019 /* Fill in the packet header */
1020 packet = (struct smb_direct_data_transfer *)sendmsg->packet;
1021 packet->credits_requested = cpu_to_le16(t->send_credit_target);
1022 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1025 packet->reserved = 0;
1027 packet->data_offset = 0;
1029 packet->data_offset = cpu_to_le32(24);
1030 packet->data_length = cpu_to_le32(size);
1031 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1032 packet->padding = 0;
1035 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1036 le16_to_cpu(packet->credits_requested),
1037 le16_to_cpu(packet->credits_granted),
1038 le32_to_cpu(packet->data_offset),
1039 le32_to_cpu(packet->data_length),
1040 le32_to_cpu(packet->remaining_data_length));
1042 /* Map the packet to DMA */
1043 header_length = sizeof(struct smb_direct_data_transfer);
1044 /* If this is a packet without payload, don't send padding */
1047 offsetof(struct smb_direct_data_transfer, padding);
1049 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1053 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1055 smb_direct_free_sendmsg(t, sendmsg);
1059 sendmsg->num_sge = 1;
1060 sendmsg->sge[0].length = header_length;
1061 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1063 *sendmsg_out = sendmsg;
1067 static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
1069 bool high = is_vmalloc_addr(buf);
1074 if (nentries < get_buf_page_count(buf, size))
1077 offset = offset_in_page(buf);
1080 len = min_t(int, PAGE_SIZE - offset, size);
1082 page = vmalloc_to_page(buf);
1084 page = kmap_to_page(buf);
1088 sg_set_page(sg_list, page, len, offset);
1089 sg_list = sg_next(sg_list);
1099 static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
1100 struct scatterlist *sg_list, int nentries,
1101 enum dma_data_direction dir)
1105 npages = get_sg_list(buf, size, sg_list, nentries);
1108 return ib_dma_map_sg(device, sg_list, npages, dir);
1111 static int post_sendmsg(struct smb_direct_transport *t,
1112 struct smb_direct_send_ctx *send_ctx,
1113 struct smb_direct_sendmsg *msg)
1117 for (i = 0; i < msg->num_sge; i++)
1118 ib_dma_sync_single_for_device(t->cm_id->device,
1119 msg->sge[i].addr, msg->sge[i].length,
1122 msg->cqe.done = send_done;
1123 msg->wr.opcode = IB_WR_SEND;
1124 msg->wr.sg_list = &msg->sge[0];
1125 msg->wr.num_sge = msg->num_sge;
1126 msg->wr.next = NULL;
1129 msg->wr.wr_cqe = NULL;
1130 msg->wr.send_flags = 0;
1131 if (!list_empty(&send_ctx->msg_list)) {
1132 struct smb_direct_sendmsg *last;
1134 last = list_last_entry(&send_ctx->msg_list,
1135 struct smb_direct_sendmsg,
1137 last->wr.next = &msg->wr;
1139 list_add_tail(&msg->list, &send_ctx->msg_list);
1144 msg->wr.wr_cqe = &msg->cqe;
1145 msg->wr.send_flags = IB_SEND_SIGNALED;
1146 return smb_direct_post_send(t, &msg->wr);
1149 static int smb_direct_post_send_data(struct smb_direct_transport *t,
1150 struct smb_direct_send_ctx *send_ctx,
1151 struct kvec *iov, int niov,
1152 int remaining_data_length)
1155 struct smb_direct_sendmsg *msg;
1157 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
1159 ret = wait_for_send_credits(t, send_ctx);
1164 for (i = 0; i < niov; i++)
1165 data_length += iov[i].iov_len;
1167 ret = smb_direct_create_header(t, data_length, remaining_data_length,
1170 atomic_inc(&t->send_credits);
1174 for (i = 0; i < niov; i++) {
1178 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
1179 sg_cnt = get_mapped_sg_list(t->cm_id->device,
1180 iov[i].iov_base, iov[i].iov_len,
1181 sg, SMB_DIRECT_MAX_SEND_SGES - 1,
1184 pr_err("failed to map buffer\n");
1187 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
1188 pr_err("buffer not fitted into sges\n");
1190 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
1195 for (j = 0; j < sg_cnt; j++) {
1196 sge = &msg->sge[msg->num_sge];
1197 sge->addr = sg_dma_address(&sg[j]);
1198 sge->length = sg_dma_len(&sg[j]);
1199 sge->lkey = t->pd->local_dma_lkey;
1204 ret = post_sendmsg(t, send_ctx, msg);
1209 smb_direct_free_sendmsg(t, msg);
1210 atomic_inc(&t->send_credits);
1214 static int smb_direct_writev(struct ksmbd_transport *t,
1215 struct kvec *iov, int niovs, int buflen,
1216 bool need_invalidate, unsigned int remote_key)
1218 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1219 int remaining_data_length;
1221 int max_iov_size = st->max_send_size -
1222 sizeof(struct smb_direct_data_transfer);
1225 struct smb_direct_send_ctx send_ctx;
1227 if (st->status != SMB_DIRECT_CS_CONNECTED)
1230 //FIXME: skip RFC1002 header..
1232 iov[0].iov_base += 4;
1233 iov[0].iov_len -= 4;
1235 remaining_data_length = buflen;
1236 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1238 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
1242 buflen += iov[i].iov_len;
1243 if (buflen > max_iov_size) {
1245 remaining_data_length -=
1246 (buflen - iov[i].iov_len);
1247 ret = smb_direct_post_send_data(st, &send_ctx,
1248 &iov[start], i - start,
1249 remaining_data_length);
1253 /* iov[start] is too big, break it */
1254 int nvec = (buflen + max_iov_size - 1) /
1257 for (j = 0; j < nvec; j++) {
1259 (char *)iov[start].iov_base +
1262 min_t(int, max_iov_size,
1263 buflen - max_iov_size * j);
1264 remaining_data_length -= vec.iov_len;
1265 ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
1266 remaining_data_length);
1279 /* send out all remaining vecs */
1280 remaining_data_length -= buflen;
1281 ret = smb_direct_post_send_data(st, &send_ctx,
1282 &iov[start], i - start,
1283 remaining_data_length);
1292 ret = smb_direct_flush_send_list(st, &send_ctx, true);
1295 * As an optimization, we don't wait for individual I/O to finish
1296 * before sending the next one.
1297 * Send them all and wait for pending send count to get to 0
1298 * that means all the I/Os have been out and we are good to return
1301 wait_event(st->wait_send_payload_pending,
1302 atomic_read(&st->send_payload_pending) == 0);
1306 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
1307 enum dma_data_direction dir)
1309 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
1310 struct smb_direct_rdma_rw_msg, cqe);
1311 struct smb_direct_transport *t = msg->t;
1313 if (wc->status != IB_WC_SUCCESS) {
1314 pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1315 wc->opcode, ib_wc_status_msg(wc->status), wc->status);
1316 smb_direct_disconnect_rdma_connection(t);
1319 if (atomic_inc_return(&t->rw_avail_ops) > 0)
1320 wake_up(&t->wait_rw_avail_ops);
1322 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
1323 msg->sg_list, msg->sgt.nents, dir);
1324 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1325 complete(msg->completion);
1329 static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1331 read_write_done(cq, wc, DMA_FROM_DEVICE);
1334 static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1336 read_write_done(cq, wc, DMA_TO_DEVICE);
1339 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
1340 int buf_len, u32 remote_key, u64 remote_offset,
1341 u32 remote_len, bool is_read)
1343 struct smb_direct_rdma_rw_msg *msg;
1345 DECLARE_COMPLETION_ONSTACK(completion);
1346 struct ib_send_wr *first_wr = NULL;
1348 ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
1353 msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
1354 sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
1356 atomic_inc(&t->rw_avail_ops);
1360 msg->sgt.sgl = &msg->sg_list[0];
1361 ret = sg_alloc_table_chained(&msg->sgt,
1362 get_buf_page_count(buf, buf_len),
1363 msg->sg_list, SG_CHUNK_SIZE);
1365 atomic_inc(&t->rw_avail_ops);
1370 ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
1372 pr_err("failed to get pages\n");
1376 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
1377 msg->sg_list, get_buf_page_count(buf, buf_len),
1378 0, remote_offset, remote_key,
1379 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1381 pr_err("failed to init rdma_rw_ctx: %d\n", ret);
1386 msg->cqe.done = is_read ? read_done : write_done;
1387 msg->completion = &completion;
1388 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
1391 ret = ib_post_send(t->qp, first_wr, NULL);
1393 pr_err("failed to post send wr: %d\n", ret);
1397 wait_for_completion(&completion);
1401 atomic_inc(&t->rw_avail_ops);
1403 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
1404 msg->sg_list, msg->sgt.nents,
1405 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1406 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1411 static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
1412 unsigned int buflen, u32 remote_key,
1413 u64 remote_offset, u32 remote_len)
1415 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1416 remote_key, remote_offset,
1420 static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
1421 unsigned int buflen, u32 remote_key,
1422 u64 remote_offset, u32 remote_len)
1424 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1425 remote_key, remote_offset,
1429 static void smb_direct_disconnect(struct ksmbd_transport *t)
1431 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1433 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
1435 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1436 wait_event_interruptible(st->wait_status,
1437 st->status == SMB_DIRECT_CS_DISCONNECTED);
1441 static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
1442 struct rdma_cm_event *event)
1444 struct smb_direct_transport *t = cm_id->context;
1446 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
1447 cm_id, rdma_event_msg(event->event), event->event);
1449 switch (event->event) {
1450 case RDMA_CM_EVENT_ESTABLISHED: {
1451 t->status = SMB_DIRECT_CS_CONNECTED;
1452 wake_up_interruptible(&t->wait_status);
1455 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1456 case RDMA_CM_EVENT_DISCONNECTED: {
1457 t->status = SMB_DIRECT_CS_DISCONNECTED;
1458 wake_up_interruptible(&t->wait_status);
1459 wake_up_interruptible(&t->wait_reassembly_queue);
1460 wake_up(&t->wait_send_credits);
1463 case RDMA_CM_EVENT_CONNECT_ERROR: {
1464 t->status = SMB_DIRECT_CS_DISCONNECTED;
1465 wake_up_interruptible(&t->wait_status);
1469 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
1470 cm_id, rdma_event_msg(event->event),
1477 static void smb_direct_qpair_handler(struct ib_event *event, void *context)
1479 struct smb_direct_transport *t = context;
1481 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
1482 t->cm_id, ib_event_msg(event->event), event->event);
1484 switch (event->event) {
1485 case IB_EVENT_CQ_ERR:
1486 case IB_EVENT_QP_FATAL:
1487 smb_direct_disconnect_rdma_connection(t);
1494 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
1497 struct smb_direct_sendmsg *sendmsg;
1498 struct smb_direct_negotiate_resp *resp;
1501 sendmsg = smb_direct_alloc_sendmsg(t);
1502 if (IS_ERR(sendmsg))
1505 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
1507 memset(resp, 0, sizeof(*resp));
1508 resp->min_version = cpu_to_le16(0x0100);
1509 resp->max_version = cpu_to_le16(0x0100);
1510 resp->status = STATUS_NOT_SUPPORTED;
1512 resp->status = STATUS_SUCCESS;
1513 resp->min_version = SMB_DIRECT_VERSION_LE;
1514 resp->max_version = SMB_DIRECT_VERSION_LE;
1515 resp->negotiated_version = SMB_DIRECT_VERSION_LE;
1517 resp->credits_requested =
1518 cpu_to_le16(t->send_credit_target);
1519 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1520 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
1521 resp->preferred_send_size = cpu_to_le32(t->max_send_size);
1522 resp->max_receive_size = cpu_to_le32(t->max_recv_size);
1523 resp->max_fragmented_size =
1524 cpu_to_le32(t->max_fragmented_recv_size);
1527 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1528 (void *)resp, sizeof(*resp),
1530 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1532 smb_direct_free_sendmsg(t, sendmsg);
1536 sendmsg->num_sge = 1;
1537 sendmsg->sge[0].length = sizeof(*resp);
1538 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1540 ret = post_sendmsg(t, NULL, sendmsg);
1542 smb_direct_free_sendmsg(t, sendmsg);
1546 wait_event(t->wait_send_pending,
1547 atomic_read(&t->send_pending) == 0);
1551 static int smb_direct_accept_client(struct smb_direct_transport *t)
1553 struct rdma_conn_param conn_param;
1554 struct ib_port_immutable port_immutable;
1558 memset(&conn_param, 0, sizeof(conn_param));
1559 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
1560 SMB_DIRECT_CM_INITIATOR_DEPTH);
1561 conn_param.responder_resources = 0;
1563 t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
1566 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1567 ird_ord_hdr[0] = conn_param.responder_resources;
1569 conn_param.private_data = ird_ord_hdr;
1570 conn_param.private_data_len = sizeof(ird_ord_hdr);
1572 conn_param.private_data = NULL;
1573 conn_param.private_data_len = 0;
1575 conn_param.retry_count = SMB_DIRECT_CM_RETRY;
1576 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
1577 conn_param.flow_control = 0;
1579 ret = rdma_accept(t->cm_id, &conn_param);
1581 pr_err("error at rdma_accept: %d\n", ret);
1585 wait_event_interruptible(t->wait_status,
1586 t->status != SMB_DIRECT_CS_NEW);
1587 if (t->status != SMB_DIRECT_CS_CONNECTED)
1592 static int smb_direct_negotiate(struct smb_direct_transport *t)
1595 struct smb_direct_recvmsg *recvmsg;
1596 struct smb_direct_negotiate_req *req;
1598 recvmsg = get_free_recvmsg(t);
1601 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
1603 ret = smb_direct_post_recv(t, recvmsg);
1605 pr_err("Can't post recv: %d\n", ret);
1609 t->negotiation_requested = false;
1610 ret = smb_direct_accept_client(t);
1612 pr_err("Can't accept client\n");
1616 smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
1618 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
1619 ret = wait_event_interruptible_timeout(t->wait_status,
1620 t->negotiation_requested ||
1621 t->status == SMB_DIRECT_CS_DISCONNECTED,
1622 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
1623 if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) {
1624 ret = ret < 0 ? ret : -ETIMEDOUT;
1628 ret = smb_direct_check_recvmsg(recvmsg);
1629 if (ret == -ECONNABORTED)
1632 req = (struct smb_direct_negotiate_req *)recvmsg->packet;
1633 t->max_recv_size = min_t(int, t->max_recv_size,
1634 le32_to_cpu(req->preferred_send_size));
1635 t->max_send_size = min_t(int, t->max_send_size,
1636 le32_to_cpu(req->max_receive_size));
1637 t->max_fragmented_send_size =
1638 le32_to_cpu(req->max_fragmented_size);
1640 ret = smb_direct_send_negotiate_response(t, ret);
1643 put_recvmsg(t, recvmsg);
1647 static int smb_direct_init_params(struct smb_direct_transport *t,
1648 struct ib_qp_cap *cap)
1650 struct ib_device *device = t->cm_id->device;
1651 int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
1653 /* need 2 more sge. because a SMB_DIRECT header will be mapped,
1654 * and maybe a send buffer could be not page aligned.
1656 t->max_send_size = smb_direct_max_send_size;
1657 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
1658 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
1659 pr_err("max_send_size %d is too large\n", t->max_send_size);
1664 * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
1665 * read/writes. HCA guarantees at least max_send_sge of sges for
1666 * a RDMA read/write work request, and if memory registration is used,
1667 * we need reg_mr, local_inv wrs for each read/write.
1669 t->max_rdma_rw_size = smb_direct_max_read_write_size;
1670 max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1671 max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
1672 max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
1674 max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
1676 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
1677 if (max_send_wrs > device->attrs.max_cqe ||
1678 max_send_wrs > device->attrs.max_qp_wr) {
1679 pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
1680 smb_direct_send_credit_target,
1681 smb_direct_max_outstanding_rw_ops);
1682 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
1683 device->attrs.max_cqe, device->attrs.max_qp_wr);
1687 if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
1688 smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
1689 pr_err("consider lowering receive_credit_max = %d\n",
1690 smb_direct_receive_credit_max);
1691 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1692 device->attrs.max_cqe, device->attrs.max_qp_wr);
1696 if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
1697 pr_err("warning: device max_send_sge = %d too small\n",
1698 device->attrs.max_send_sge);
1701 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
1702 pr_err("warning: device max_recv_sge = %d too small\n",
1703 device->attrs.max_recv_sge);
1707 t->recv_credits = 0;
1708 t->count_avail_recvmsg = 0;
1710 t->recv_credit_max = smb_direct_receive_credit_max;
1711 t->recv_credit_target = 10;
1712 t->new_recv_credits = 0;
1714 t->send_credit_target = smb_direct_send_credit_target;
1715 atomic_set(&t->send_credits, 0);
1716 atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
1718 t->max_send_size = smb_direct_max_send_size;
1719 t->max_recv_size = smb_direct_max_receive_size;
1720 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
1722 cap->max_send_wr = max_send_wrs;
1723 cap->max_recv_wr = t->recv_credit_max;
1724 cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
1725 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
1726 cap->max_inline_data = 0;
1727 cap->max_rdma_ctxs = 0;
1731 static void smb_direct_destroy_pools(struct smb_direct_transport *t)
1733 struct smb_direct_recvmsg *recvmsg;
1735 while ((recvmsg = get_free_recvmsg(t)))
1736 mempool_free(recvmsg, t->recvmsg_mempool);
1737 while ((recvmsg = get_empty_recvmsg(t)))
1738 mempool_free(recvmsg, t->recvmsg_mempool);
1740 mempool_destroy(t->recvmsg_mempool);
1741 t->recvmsg_mempool = NULL;
1743 kmem_cache_destroy(t->recvmsg_cache);
1744 t->recvmsg_cache = NULL;
1746 mempool_destroy(t->sendmsg_mempool);
1747 t->sendmsg_mempool = NULL;
1749 kmem_cache_destroy(t->sendmsg_cache);
1750 t->sendmsg_cache = NULL;
1753 static int smb_direct_create_pools(struct smb_direct_transport *t)
1757 struct smb_direct_recvmsg *recvmsg;
1759 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
1760 t->sendmsg_cache = kmem_cache_create(name,
1761 sizeof(struct smb_direct_sendmsg) +
1762 sizeof(struct smb_direct_negotiate_resp),
1763 0, SLAB_HWCACHE_ALIGN, NULL);
1764 if (!t->sendmsg_cache)
1767 t->sendmsg_mempool = mempool_create(t->send_credit_target,
1768 mempool_alloc_slab, mempool_free_slab,
1770 if (!t->sendmsg_mempool)
1773 snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
1774 t->recvmsg_cache = kmem_cache_create(name,
1775 sizeof(struct smb_direct_recvmsg) +
1777 0, SLAB_HWCACHE_ALIGN, NULL);
1778 if (!t->recvmsg_cache)
1781 t->recvmsg_mempool =
1782 mempool_create(t->recv_credit_max, mempool_alloc_slab,
1783 mempool_free_slab, t->recvmsg_cache);
1784 if (!t->recvmsg_mempool)
1787 INIT_LIST_HEAD(&t->recvmsg_queue);
1789 for (i = 0; i < t->recv_credit_max; i++) {
1790 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
1793 recvmsg->transport = t;
1794 list_add(&recvmsg->list, &t->recvmsg_queue);
1796 t->count_avail_recvmsg = t->recv_credit_max;
1800 smb_direct_destroy_pools(t);
1804 static int smb_direct_create_qpair(struct smb_direct_transport *t,
1805 struct ib_qp_cap *cap)
1808 struct ib_qp_init_attr qp_attr;
1810 t->pd = ib_alloc_pd(t->cm_id->device, 0);
1811 if (IS_ERR(t->pd)) {
1812 pr_err("Can't create RDMA PD\n");
1813 ret = PTR_ERR(t->pd);
1818 t->send_cq = ib_alloc_cq(t->cm_id->device, t,
1819 t->send_credit_target, 0, IB_POLL_WORKQUEUE);
1820 if (IS_ERR(t->send_cq)) {
1821 pr_err("Can't create RDMA send CQ\n");
1822 ret = PTR_ERR(t->send_cq);
1827 t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
1828 cap->max_send_wr + cap->max_rdma_ctxs,
1829 0, IB_POLL_WORKQUEUE);
1830 if (IS_ERR(t->recv_cq)) {
1831 pr_err("Can't create RDMA recv CQ\n");
1832 ret = PTR_ERR(t->recv_cq);
1837 memset(&qp_attr, 0, sizeof(qp_attr));
1838 qp_attr.event_handler = smb_direct_qpair_handler;
1839 qp_attr.qp_context = t;
1841 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1842 qp_attr.qp_type = IB_QPT_RC;
1843 qp_attr.send_cq = t->send_cq;
1844 qp_attr.recv_cq = t->recv_cq;
1845 qp_attr.port_num = ~0;
1847 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
1849 pr_err("Can't create RDMA QP: %d\n", ret);
1853 t->qp = t->cm_id->qp;
1854 t->cm_id->event_handler = smb_direct_cm_handler;
1859 ib_destroy_qp(t->qp);
1863 ib_destroy_cq(t->recv_cq);
1867 ib_destroy_cq(t->send_cq);
1871 ib_dealloc_pd(t->pd);
1877 static int smb_direct_prepare(struct ksmbd_transport *t)
1879 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1881 struct ib_qp_cap qp_cap;
1883 ret = smb_direct_init_params(st, &qp_cap);
1885 pr_err("Can't configure RDMA parameters\n");
1889 ret = smb_direct_create_pools(st);
1891 pr_err("Can't init RDMA pool: %d\n", ret);
1895 ret = smb_direct_create_qpair(st, &qp_cap);
1897 pr_err("Can't accept RDMA client: %d\n", ret);
1901 ret = smb_direct_negotiate(st);
1903 pr_err("Can't negotiate: %d\n", ret);
1907 st->status = SMB_DIRECT_CS_CONNECTED;
1911 static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
1913 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
1915 if (attrs->max_fast_reg_page_list_len == 0)
1920 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
1922 struct smb_direct_transport *t;
1924 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
1926 "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
1927 new_cm_id->device->attrs.device_cap_flags);
1928 return -EPROTONOSUPPORT;
1931 t = alloc_transport(new_cm_id);
1935 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
1936 KSMBD_TRANS(t)->conn, "ksmbd:r%u",
1938 if (IS_ERR(KSMBD_TRANS(t)->handler)) {
1939 int ret = PTR_ERR(KSMBD_TRANS(t)->handler);
1941 pr_err("Can't start thread\n");
1949 static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
1950 struct rdma_cm_event *event)
1952 switch (event->event) {
1953 case RDMA_CM_EVENT_CONNECT_REQUEST: {
1954 int ret = smb_direct_handle_connect_request(cm_id);
1957 pr_err("Can't create transport: %d\n", ret);
1961 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
1966 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
1967 cm_id, rdma_event_msg(event->event), event->event);
1973 static int smb_direct_listen(int port)
1976 struct rdma_cm_id *cm_id;
1977 struct sockaddr_in sin = {
1978 .sin_family = AF_INET,
1979 .sin_addr.s_addr = htonl(INADDR_ANY),
1980 .sin_port = htons(port),
1983 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
1984 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
1985 if (IS_ERR(cm_id)) {
1986 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
1987 return PTR_ERR(cm_id);
1990 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
1992 pr_err("Can't bind: %d\n", ret);
1996 smb_direct_listener.cm_id = cm_id;
1998 ret = rdma_listen(cm_id, 10);
2000 pr_err("Can't listen: %d\n", ret);
2005 smb_direct_listener.cm_id = NULL;
2006 rdma_destroy_id(cm_id);
2010 int ksmbd_rdma_init(void)
2014 smb_direct_listener.cm_id = NULL;
2016 /* When a client is running out of send credits, the credits are
2017 * granted by the server's sending a packet using this queue.
2018 * This avoids the situation that a clients cannot send packets
2019 * for lack of credits
2021 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
2022 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
2026 ret = smb_direct_listen(SMB_DIRECT_PORT);
2028 destroy_workqueue(smb_direct_wq);
2029 smb_direct_wq = NULL;
2030 pr_err("Can't listen: %d\n", ret);
2034 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
2035 smb_direct_listener.cm_id);
2039 int ksmbd_rdma_destroy(void)
2041 if (smb_direct_listener.cm_id)
2042 rdma_destroy_id(smb_direct_listener.cm_id);
2043 smb_direct_listener.cm_id = NULL;
2045 if (smb_direct_wq) {
2046 flush_workqueue(smb_direct_wq);
2047 destroy_workqueue(smb_direct_wq);
2048 smb_direct_wq = NULL;
2053 bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
2055 struct ib_device *ibdev;
2056 bool rdma_capable = false;
2058 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
2060 if (rdma_frwr_is_supported(&ibdev->attrs))
2061 rdma_capable = true;
2062 ib_device_put(ibdev);
2064 return rdma_capable;
2067 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
2068 .prepare = smb_direct_prepare,
2069 .disconnect = smb_direct_disconnect,
2070 .writev = smb_direct_writev,
2071 .read = smb_direct_read,
2072 .rdma_read = smb_direct_rdma_read,
2073 .rdma_write = smb_direct_rdma_write,