1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
6 * Author(s): Long Li <longli@microsoft.com>,
7 * Hyunchul Lee <hyc.lee@gmail.com>
10 #define SUBMOD_NAME "smb_direct"
12 #include <linux/kthread.h>
13 #include <linux/list.h>
14 #include <linux/mempool.h>
15 #include <linux/highmem.h>
16 #include <linux/scatterlist.h>
17 #include <rdma/ib_verbs.h>
18 #include <rdma/rdma_cm.h>
22 #include "connection.h"
23 #include "smb_common.h"
24 #include "smbstatus.h"
25 #include "transport_rdma.h"
27 #define SMB_DIRECT_PORT_IWARP 5445
28 #define SMB_DIRECT_PORT_INFINIBAND 445
30 #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
32 /* SMB_DIRECT negotiation timeout in seconds */
33 #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
35 #define SMB_DIRECT_MAX_SEND_SGES 6
36 #define SMB_DIRECT_MAX_RECV_SGES 1
39 * Default maximum number of RDMA read/write outstanding on this connection
40 * This value is possibly decreased during QP creation on hardware limit
42 #define SMB_DIRECT_CM_INITIATOR_DEPTH 8
44 /* Maximum number of retries on data transfer operations */
45 #define SMB_DIRECT_CM_RETRY 6
46 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
47 #define SMB_DIRECT_CM_RNR_RETRY 0
50 * User configurable initial values per SMB_DIRECT transport connection
51 * as defined in [MS-SMBD] 3.1.1.1
52 * Those may change after a SMB_DIRECT negotiation
55 /* Set 445 port to SMB Direct port by default */
56 static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
58 /* The local peer's maximum number of credits to grant to the peer */
59 static int smb_direct_receive_credit_max = 255;
61 /* The remote peer's credit request of local peer */
62 static int smb_direct_send_credit_target = 255;
64 /* The maximum single message size can be sent to remote peer */
65 static int smb_direct_max_send_size = 1364;
67 /* The maximum fragmented upper-layer payload receive size supported */
68 static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
70 /* The maximum single-message size which can be received */
71 static int smb_direct_max_receive_size = 1364;
73 static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
75 static LIST_HEAD(smb_direct_device_list);
76 static DEFINE_RWLOCK(smb_direct_device_lock);
78 struct smb_direct_device {
79 struct ib_device *ib_dev;
80 struct list_head list;
83 static struct smb_direct_listener {
84 struct rdma_cm_id *cm_id;
85 } smb_direct_listener;
87 static struct workqueue_struct *smb_direct_wq;
89 enum smb_direct_status {
90 SMB_DIRECT_CS_NEW = 0,
91 SMB_DIRECT_CS_CONNECTED,
92 SMB_DIRECT_CS_DISCONNECTING,
93 SMB_DIRECT_CS_DISCONNECTED,
96 struct smb_direct_transport {
97 struct ksmbd_transport transport;
99 enum smb_direct_status status;
100 bool full_packet_received;
101 wait_queue_head_t wait_status;
103 struct rdma_cm_id *cm_id;
104 struct ib_cq *send_cq;
105 struct ib_cq *recv_cq;
111 int max_fragmented_send_size;
112 int max_fragmented_recv_size;
113 int max_rdma_rw_size;
115 spinlock_t reassembly_queue_lock;
116 struct list_head reassembly_queue;
117 int reassembly_data_length;
118 int reassembly_queue_length;
119 int first_entry_offset;
120 wait_queue_head_t wait_reassembly_queue;
122 spinlock_t receive_credit_lock;
124 int count_avail_recvmsg;
126 int recv_credit_target;
128 spinlock_t recvmsg_queue_lock;
129 struct list_head recvmsg_queue;
131 spinlock_t empty_recvmsg_queue_lock;
132 struct list_head empty_recvmsg_queue;
134 int send_credit_target;
135 atomic_t send_credits;
136 spinlock_t lock_new_recv_credits;
137 int new_recv_credits;
139 int pages_per_rw_credit;
142 wait_queue_head_t wait_send_credits;
143 wait_queue_head_t wait_rw_credits;
145 mempool_t *sendmsg_mempool;
146 struct kmem_cache *sendmsg_cache;
147 mempool_t *recvmsg_mempool;
148 struct kmem_cache *recvmsg_cache;
150 wait_queue_head_t wait_send_pending;
151 atomic_t send_pending;
153 struct delayed_work post_recv_credits_work;
154 struct work_struct send_immediate_work;
155 struct work_struct disconnect_work;
157 bool negotiation_requested;
160 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
163 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
164 SMB_DIRECT_MSG_DATA_TRANSFER
167 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
169 struct smb_direct_send_ctx {
170 struct list_head msg_list;
172 bool need_invalidate_rkey;
173 unsigned int remote_key;
176 struct smb_direct_sendmsg {
177 struct smb_direct_transport *transport;
178 struct ib_send_wr wr;
179 struct list_head list;
181 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
186 struct smb_direct_recvmsg {
187 struct smb_direct_transport *transport;
188 struct list_head list;
196 struct smb_direct_rdma_rw_msg {
197 struct smb_direct_transport *t;
200 struct completion *completion;
201 struct list_head list;
202 struct rdma_rw_ctx rw_ctx;
204 struct scatterlist sg_list[];
207 void init_smbd_max_io_size(unsigned int sz)
209 sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
210 smb_direct_max_read_write_size = sz;
213 unsigned int get_smbd_max_read_write_size(void)
215 return smb_direct_max_read_write_size;
218 static inline int get_buf_page_count(void *buf, int size)
220 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
221 (uintptr_t)buf / PAGE_SIZE;
224 static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
225 static void smb_direct_post_recv_credits(struct work_struct *work);
226 static int smb_direct_post_send_data(struct smb_direct_transport *t,
227 struct smb_direct_send_ctx *send_ctx,
228 struct kvec *iov, int niov,
229 int remaining_data_length);
231 static inline struct smb_direct_transport *
232 smb_trans_direct_transfort(struct ksmbd_transport *t)
234 return container_of(t, struct smb_direct_transport, transport);
238 *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
240 return (void *)recvmsg->packet;
243 static inline bool is_receive_credit_post_required(int receive_credits,
244 int avail_recvmsg_count)
246 return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
247 avail_recvmsg_count >= (receive_credits >> 2);
251 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
253 struct smb_direct_recvmsg *recvmsg = NULL;
255 spin_lock(&t->recvmsg_queue_lock);
256 if (!list_empty(&t->recvmsg_queue)) {
257 recvmsg = list_first_entry(&t->recvmsg_queue,
258 struct smb_direct_recvmsg,
260 list_del(&recvmsg->list);
262 spin_unlock(&t->recvmsg_queue_lock);
266 static void put_recvmsg(struct smb_direct_transport *t,
267 struct smb_direct_recvmsg *recvmsg)
269 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
270 recvmsg->sge.length, DMA_FROM_DEVICE);
272 spin_lock(&t->recvmsg_queue_lock);
273 list_add(&recvmsg->list, &t->recvmsg_queue);
274 spin_unlock(&t->recvmsg_queue_lock);
278 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
280 struct smb_direct_recvmsg *recvmsg = NULL;
282 spin_lock(&t->empty_recvmsg_queue_lock);
283 if (!list_empty(&t->empty_recvmsg_queue)) {
284 recvmsg = list_first_entry(&t->empty_recvmsg_queue,
285 struct smb_direct_recvmsg, list);
286 list_del(&recvmsg->list);
288 spin_unlock(&t->empty_recvmsg_queue_lock);
292 static void put_empty_recvmsg(struct smb_direct_transport *t,
293 struct smb_direct_recvmsg *recvmsg)
295 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
296 recvmsg->sge.length, DMA_FROM_DEVICE);
298 spin_lock(&t->empty_recvmsg_queue_lock);
299 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
300 spin_unlock(&t->empty_recvmsg_queue_lock);
303 static void enqueue_reassembly(struct smb_direct_transport *t,
304 struct smb_direct_recvmsg *recvmsg,
307 spin_lock(&t->reassembly_queue_lock);
308 list_add_tail(&recvmsg->list, &t->reassembly_queue);
309 t->reassembly_queue_length++;
311 * Make sure reassembly_data_length is updated after list and
312 * reassembly_queue_length are updated. On the dequeue side
313 * reassembly_data_length is checked without a lock to determine
314 * if reassembly_queue_length and list is up to date
317 t->reassembly_data_length += data_length;
318 spin_unlock(&t->reassembly_queue_lock);
321 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
323 if (!list_empty(&t->reassembly_queue))
324 return list_first_entry(&t->reassembly_queue,
325 struct smb_direct_recvmsg, list);
330 static void smb_direct_disconnect_rdma_work(struct work_struct *work)
332 struct smb_direct_transport *t =
333 container_of(work, struct smb_direct_transport,
336 if (t->status == SMB_DIRECT_CS_CONNECTED) {
337 t->status = SMB_DIRECT_CS_DISCONNECTING;
338 rdma_disconnect(t->cm_id);
343 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
345 if (t->status == SMB_DIRECT_CS_CONNECTED)
346 queue_work(smb_direct_wq, &t->disconnect_work);
349 static void smb_direct_send_immediate_work(struct work_struct *work)
351 struct smb_direct_transport *t = container_of(work,
352 struct smb_direct_transport, send_immediate_work);
354 if (t->status != SMB_DIRECT_CS_CONNECTED)
357 smb_direct_post_send_data(t, NULL, NULL, 0, 0);
360 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
362 struct smb_direct_transport *t;
363 struct ksmbd_conn *conn;
365 t = kzalloc(sizeof(*t), GFP_KERNEL);
372 t->status = SMB_DIRECT_CS_NEW;
373 init_waitqueue_head(&t->wait_status);
375 spin_lock_init(&t->reassembly_queue_lock);
376 INIT_LIST_HEAD(&t->reassembly_queue);
377 t->reassembly_data_length = 0;
378 t->reassembly_queue_length = 0;
379 init_waitqueue_head(&t->wait_reassembly_queue);
380 init_waitqueue_head(&t->wait_send_credits);
381 init_waitqueue_head(&t->wait_rw_credits);
383 spin_lock_init(&t->receive_credit_lock);
384 spin_lock_init(&t->recvmsg_queue_lock);
385 INIT_LIST_HEAD(&t->recvmsg_queue);
387 spin_lock_init(&t->empty_recvmsg_queue_lock);
388 INIT_LIST_HEAD(&t->empty_recvmsg_queue);
390 init_waitqueue_head(&t->wait_send_pending);
391 atomic_set(&t->send_pending, 0);
393 spin_lock_init(&t->lock_new_recv_credits);
395 INIT_DELAYED_WORK(&t->post_recv_credits_work,
396 smb_direct_post_recv_credits);
397 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
398 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
400 conn = ksmbd_conn_alloc();
403 conn->transport = KSMBD_TRANS(t);
404 KSMBD_TRANS(t)->conn = conn;
405 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
412 static void free_transport(struct smb_direct_transport *t)
414 struct smb_direct_recvmsg *recvmsg;
416 wake_up_interruptible(&t->wait_send_credits);
418 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
419 wait_event(t->wait_send_pending,
420 atomic_read(&t->send_pending) == 0);
422 cancel_work_sync(&t->disconnect_work);
423 cancel_delayed_work_sync(&t->post_recv_credits_work);
424 cancel_work_sync(&t->send_immediate_work);
428 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
429 ib_destroy_qp(t->qp);
432 ksmbd_debug(RDMA, "drain the reassembly queue\n");
434 spin_lock(&t->reassembly_queue_lock);
435 recvmsg = get_first_reassembly(t);
437 list_del(&recvmsg->list);
438 spin_unlock(&t->reassembly_queue_lock);
439 put_recvmsg(t, recvmsg);
441 spin_unlock(&t->reassembly_queue_lock);
444 t->reassembly_data_length = 0;
447 ib_free_cq(t->send_cq);
449 ib_free_cq(t->recv_cq);
451 ib_dealloc_pd(t->pd);
453 rdma_destroy_id(t->cm_id);
455 smb_direct_destroy_pools(t);
456 ksmbd_conn_free(KSMBD_TRANS(t)->conn);
460 static struct smb_direct_sendmsg
461 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
463 struct smb_direct_sendmsg *msg;
465 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
467 return ERR_PTR(-ENOMEM);
469 INIT_LIST_HEAD(&msg->list);
474 static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
475 struct smb_direct_sendmsg *msg)
479 if (msg->num_sge > 0) {
480 ib_dma_unmap_single(t->cm_id->device,
481 msg->sge[0].addr, msg->sge[0].length,
483 for (i = 1; i < msg->num_sge; i++)
484 ib_dma_unmap_page(t->cm_id->device,
485 msg->sge[i].addr, msg->sge[i].length,
488 mempool_free(msg, t->sendmsg_mempool);
491 static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
493 switch (recvmsg->type) {
494 case SMB_DIRECT_MSG_DATA_TRANSFER: {
495 struct smb_direct_data_transfer *req =
496 (struct smb_direct_data_transfer *)recvmsg->packet;
497 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
498 + le32_to_cpu(req->data_offset));
500 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
501 le16_to_cpu(req->credits_granted),
502 le16_to_cpu(req->credits_requested),
503 req->data_length, req->remaining_data_length,
504 hdr->ProtocolId, hdr->Command);
507 case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
508 struct smb_direct_negotiate_req *req =
509 (struct smb_direct_negotiate_req *)recvmsg->packet;
511 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
512 le16_to_cpu(req->min_version),
513 le16_to_cpu(req->max_version),
514 le16_to_cpu(req->credits_requested),
515 le32_to_cpu(req->preferred_send_size),
516 le32_to_cpu(req->max_receive_size),
517 le32_to_cpu(req->max_fragmented_size));
518 if (le16_to_cpu(req->min_version) > 0x0100 ||
519 le16_to_cpu(req->max_version) < 0x0100)
521 if (le16_to_cpu(req->credits_requested) <= 0 ||
522 le32_to_cpu(req->max_receive_size) <= 128 ||
523 le32_to_cpu(req->max_fragmented_size) <=
525 return -ECONNABORTED;
535 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
537 struct smb_direct_recvmsg *recvmsg;
538 struct smb_direct_transport *t;
540 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
541 t = recvmsg->transport;
543 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
544 if (wc->status != IB_WC_WR_FLUSH_ERR) {
545 pr_err("Recv error. status='%s (%d)' opcode=%d\n",
546 ib_wc_status_msg(wc->status), wc->status,
548 smb_direct_disconnect_rdma_connection(t);
550 put_empty_recvmsg(t, recvmsg);
554 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
555 ib_wc_status_msg(wc->status), wc->status,
558 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
559 recvmsg->sge.length, DMA_FROM_DEVICE);
561 switch (recvmsg->type) {
562 case SMB_DIRECT_MSG_NEGOTIATE_REQ:
563 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
564 put_empty_recvmsg(t, recvmsg);
567 t->negotiation_requested = true;
568 t->full_packet_received = true;
569 t->status = SMB_DIRECT_CS_CONNECTED;
570 enqueue_reassembly(t, recvmsg, 0);
571 wake_up_interruptible(&t->wait_status);
573 case SMB_DIRECT_MSG_DATA_TRANSFER: {
574 struct smb_direct_data_transfer *data_transfer =
575 (struct smb_direct_data_transfer *)recvmsg->packet;
576 unsigned int data_length;
577 int avail_recvmsg_count, receive_credits;
580 offsetof(struct smb_direct_data_transfer, padding)) {
581 put_empty_recvmsg(t, recvmsg);
585 data_length = le32_to_cpu(data_transfer->data_length);
587 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
589 put_empty_recvmsg(t, recvmsg);
593 if (t->full_packet_received)
594 recvmsg->first_segment = true;
596 if (le32_to_cpu(data_transfer->remaining_data_length))
597 t->full_packet_received = false;
599 t->full_packet_received = true;
601 enqueue_reassembly(t, recvmsg, (int)data_length);
602 wake_up_interruptible(&t->wait_reassembly_queue);
604 spin_lock(&t->receive_credit_lock);
605 receive_credits = --(t->recv_credits);
606 avail_recvmsg_count = t->count_avail_recvmsg;
607 spin_unlock(&t->receive_credit_lock);
609 put_empty_recvmsg(t, recvmsg);
611 spin_lock(&t->receive_credit_lock);
612 receive_credits = --(t->recv_credits);
613 avail_recvmsg_count = ++(t->count_avail_recvmsg);
614 spin_unlock(&t->receive_credit_lock);
617 t->recv_credit_target =
618 le16_to_cpu(data_transfer->credits_requested);
619 atomic_add(le16_to_cpu(data_transfer->credits_granted),
622 if (le16_to_cpu(data_transfer->flags) &
623 SMB_DIRECT_RESPONSE_REQUESTED)
624 queue_work(smb_direct_wq, &t->send_immediate_work);
626 if (atomic_read(&t->send_credits) > 0)
627 wake_up_interruptible(&t->wait_send_credits);
629 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
630 mod_delayed_work(smb_direct_wq,
631 &t->post_recv_credits_work, 0);
639 static int smb_direct_post_recv(struct smb_direct_transport *t,
640 struct smb_direct_recvmsg *recvmsg)
642 struct ib_recv_wr wr;
645 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
646 recvmsg->packet, t->max_recv_size,
648 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
651 recvmsg->sge.length = t->max_recv_size;
652 recvmsg->sge.lkey = t->pd->local_dma_lkey;
653 recvmsg->cqe.done = recv_done;
655 wr.wr_cqe = &recvmsg->cqe;
657 wr.sg_list = &recvmsg->sge;
660 ret = ib_post_recv(t->qp, &wr, NULL);
662 pr_err("Can't post recv: %d\n", ret);
663 ib_dma_unmap_single(t->cm_id->device,
664 recvmsg->sge.addr, recvmsg->sge.length,
666 smb_direct_disconnect_rdma_connection(t);
672 static int smb_direct_read(struct ksmbd_transport *t, char *buf,
675 struct smb_direct_recvmsg *recvmsg;
676 struct smb_direct_data_transfer *data_transfer;
677 int to_copy, to_read, data_read, offset;
678 u32 data_length, remaining_data_length, data_offset;
680 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
683 if (st->status != SMB_DIRECT_CS_CONNECTED) {
684 pr_err("disconnected\n");
689 * No need to hold the reassembly queue lock all the time as we are
690 * the only one reading from the front of the queue. The transport
691 * may add more entries to the back of the queue at the same time
693 if (st->reassembly_data_length >= size) {
695 int queue_removed = 0;
698 * Need to make sure reassembly_data_length is read before
699 * reading reassembly_queue_length and calling
700 * get_first_reassembly. This call is lock free
701 * as we never read at the end of the queue which are being
702 * updated in SOFTIRQ as more data is received
705 queue_length = st->reassembly_queue_length;
708 offset = st->first_entry_offset;
709 while (data_read < size) {
710 recvmsg = get_first_reassembly(st);
711 data_transfer = smb_direct_recvmsg_payload(recvmsg);
712 data_length = le32_to_cpu(data_transfer->data_length);
713 remaining_data_length =
714 le32_to_cpu(data_transfer->remaining_data_length);
715 data_offset = le32_to_cpu(data_transfer->data_offset);
718 * The upper layer expects RFC1002 length at the
719 * beginning of the payload. Return it to indicate
720 * the total length of the packet. This minimize the
721 * change to upper layer packet processing logic. This
722 * will be eventually remove when an intermediate
723 * transport layer is added
725 if (recvmsg->first_segment && size == 4) {
726 unsigned int rfc1002_len =
727 data_length + remaining_data_length;
728 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
730 recvmsg->first_segment = false;
732 "returning rfc1002 length %d\n",
734 goto read_rfc1002_done;
737 to_copy = min_t(int, data_length - offset, to_read);
738 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
741 /* move on to the next buffer? */
742 if (to_copy == data_length - offset) {
745 * No need to lock if we are not at the
749 list_del(&recvmsg->list);
751 spin_lock_irq(&st->reassembly_queue_lock);
752 list_del(&recvmsg->list);
753 spin_unlock_irq(&st->reassembly_queue_lock);
756 put_recvmsg(st, recvmsg);
763 data_read += to_copy;
766 spin_lock_irq(&st->reassembly_queue_lock);
767 st->reassembly_data_length -= data_read;
768 st->reassembly_queue_length -= queue_removed;
769 spin_unlock_irq(&st->reassembly_queue_lock);
771 spin_lock(&st->receive_credit_lock);
772 st->count_avail_recvmsg += queue_removed;
773 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
774 spin_unlock(&st->receive_credit_lock);
775 mod_delayed_work(smb_direct_wq,
776 &st->post_recv_credits_work, 0);
778 spin_unlock(&st->receive_credit_lock);
781 st->first_entry_offset = offset;
783 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
784 data_read, st->reassembly_data_length,
785 st->first_entry_offset);
790 ksmbd_debug(RDMA, "wait_event on more data\n");
791 rc = wait_event_interruptible(st->wait_reassembly_queue,
792 st->reassembly_data_length >= size ||
793 st->status != SMB_DIRECT_CS_CONNECTED);
800 static void smb_direct_post_recv_credits(struct work_struct *work)
802 struct smb_direct_transport *t = container_of(work,
803 struct smb_direct_transport, post_recv_credits_work.work);
804 struct smb_direct_recvmsg *recvmsg;
805 int receive_credits, credits = 0;
809 spin_lock(&t->receive_credit_lock);
810 receive_credits = t->recv_credits;
811 spin_unlock(&t->receive_credit_lock);
813 if (receive_credits < t->recv_credit_target) {
816 recvmsg = get_free_recvmsg(t);
818 recvmsg = get_empty_recvmsg(t);
828 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
829 recvmsg->first_segment = false;
831 ret = smb_direct_post_recv(t, recvmsg);
833 pr_err("Can't post recv: %d\n", ret);
834 put_recvmsg(t, recvmsg);
841 spin_lock(&t->receive_credit_lock);
842 t->recv_credits += credits;
843 t->count_avail_recvmsg -= credits;
844 spin_unlock(&t->receive_credit_lock);
846 spin_lock(&t->lock_new_recv_credits);
847 t->new_recv_credits += credits;
848 spin_unlock(&t->lock_new_recv_credits);
851 queue_work(smb_direct_wq, &t->send_immediate_work);
854 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
856 struct smb_direct_sendmsg *sendmsg, *sibling;
857 struct smb_direct_transport *t;
858 struct list_head *pos, *prev, *end;
860 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
861 t = sendmsg->transport;
863 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
864 ib_wc_status_msg(wc->status), wc->status,
867 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
868 pr_err("Send error. status='%s (%d)', opcode=%d\n",
869 ib_wc_status_msg(wc->status), wc->status,
871 smb_direct_disconnect_rdma_connection(t);
874 if (atomic_dec_and_test(&t->send_pending))
875 wake_up(&t->wait_send_pending);
877 /* iterate and free the list of messages in reverse. the list's head
880 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
881 prev != end; pos = prev, prev = prev->prev) {
882 sibling = container_of(pos, struct smb_direct_sendmsg, list);
883 smb_direct_free_sendmsg(t, sibling);
886 sibling = container_of(pos, struct smb_direct_sendmsg, list);
887 smb_direct_free_sendmsg(t, sibling);
890 static int manage_credits_prior_sending(struct smb_direct_transport *t)
894 spin_lock(&t->lock_new_recv_credits);
895 new_credits = t->new_recv_credits;
896 t->new_recv_credits = 0;
897 spin_unlock(&t->lock_new_recv_credits);
902 static int smb_direct_post_send(struct smb_direct_transport *t,
903 struct ib_send_wr *wr)
907 atomic_inc(&t->send_pending);
908 ret = ib_post_send(t->qp, wr, NULL);
910 pr_err("failed to post send: %d\n", ret);
911 if (atomic_dec_and_test(&t->send_pending))
912 wake_up(&t->wait_send_pending);
913 smb_direct_disconnect_rdma_connection(t);
918 static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
919 struct smb_direct_send_ctx *send_ctx,
920 bool need_invalidate_rkey,
921 unsigned int remote_key)
923 INIT_LIST_HEAD(&send_ctx->msg_list);
924 send_ctx->wr_cnt = 0;
925 send_ctx->need_invalidate_rkey = need_invalidate_rkey;
926 send_ctx->remote_key = remote_key;
929 static int smb_direct_flush_send_list(struct smb_direct_transport *t,
930 struct smb_direct_send_ctx *send_ctx,
933 struct smb_direct_sendmsg *first, *last;
936 if (list_empty(&send_ctx->msg_list))
939 first = list_first_entry(&send_ctx->msg_list,
940 struct smb_direct_sendmsg,
942 last = list_last_entry(&send_ctx->msg_list,
943 struct smb_direct_sendmsg,
946 last->wr.send_flags = IB_SEND_SIGNALED;
947 last->wr.wr_cqe = &last->cqe;
948 if (is_last && send_ctx->need_invalidate_rkey) {
949 last->wr.opcode = IB_WR_SEND_WITH_INV;
950 last->wr.ex.invalidate_rkey = send_ctx->remote_key;
953 ret = smb_direct_post_send(t, &first->wr);
955 smb_direct_send_ctx_init(t, send_ctx,
956 send_ctx->need_invalidate_rkey,
957 send_ctx->remote_key);
959 atomic_add(send_ctx->wr_cnt, &t->send_credits);
960 wake_up(&t->wait_send_credits);
961 list_for_each_entry_safe(first, last, &send_ctx->msg_list,
963 smb_direct_free_sendmsg(t, first);
969 static int wait_for_credits(struct smb_direct_transport *t,
970 wait_queue_head_t *waitq, atomic_t *total_credits,
976 if (atomic_sub_return(needed, total_credits) >= 0)
979 atomic_add(needed, total_credits);
980 ret = wait_event_interruptible(*waitq,
981 atomic_read(total_credits) >= needed ||
982 t->status != SMB_DIRECT_CS_CONNECTED);
984 if (t->status != SMB_DIRECT_CS_CONNECTED)
991 static int wait_for_send_credits(struct smb_direct_transport *t,
992 struct smb_direct_send_ctx *send_ctx)
997 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
998 ret = smb_direct_flush_send_list(t, send_ctx, false);
1003 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1);
1006 static int wait_for_rw_credits(struct smb_direct_transport *t, int credits)
1008 return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits);
1011 static int calc_rw_credits(struct smb_direct_transport *t,
1012 char *buf, unsigned int len)
1014 return DIV_ROUND_UP(get_buf_page_count(buf, len),
1015 t->pages_per_rw_credit);
1018 static int smb_direct_create_header(struct smb_direct_transport *t,
1019 int size, int remaining_data_length,
1020 struct smb_direct_sendmsg **sendmsg_out)
1022 struct smb_direct_sendmsg *sendmsg;
1023 struct smb_direct_data_transfer *packet;
1027 sendmsg = smb_direct_alloc_sendmsg(t);
1028 if (IS_ERR(sendmsg))
1029 return PTR_ERR(sendmsg);
1031 /* Fill in the packet header */
1032 packet = (struct smb_direct_data_transfer *)sendmsg->packet;
1033 packet->credits_requested = cpu_to_le16(t->send_credit_target);
1034 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1037 packet->reserved = 0;
1039 packet->data_offset = 0;
1041 packet->data_offset = cpu_to_le32(24);
1042 packet->data_length = cpu_to_le32(size);
1043 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1044 packet->padding = 0;
1047 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1048 le16_to_cpu(packet->credits_requested),
1049 le16_to_cpu(packet->credits_granted),
1050 le32_to_cpu(packet->data_offset),
1051 le32_to_cpu(packet->data_length),
1052 le32_to_cpu(packet->remaining_data_length));
1054 /* Map the packet to DMA */
1055 header_length = sizeof(struct smb_direct_data_transfer);
1056 /* If this is a packet without payload, don't send padding */
1059 offsetof(struct smb_direct_data_transfer, padding);
1061 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1065 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1067 smb_direct_free_sendmsg(t, sendmsg);
1071 sendmsg->num_sge = 1;
1072 sendmsg->sge[0].length = header_length;
1073 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1075 *sendmsg_out = sendmsg;
1079 static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
1081 bool high = is_vmalloc_addr(buf);
1086 if (size <= 0 || nentries < get_buf_page_count(buf, size))
1089 offset = offset_in_page(buf);
1092 len = min_t(int, PAGE_SIZE - offset, size);
1094 page = vmalloc_to_page(buf);
1096 page = kmap_to_page(buf);
1100 sg_set_page(sg_list, page, len, offset);
1101 sg_list = sg_next(sg_list);
1111 static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
1112 struct scatterlist *sg_list, int nentries,
1113 enum dma_data_direction dir)
1117 npages = get_sg_list(buf, size, sg_list, nentries);
1120 return ib_dma_map_sg(device, sg_list, npages, dir);
1123 static int post_sendmsg(struct smb_direct_transport *t,
1124 struct smb_direct_send_ctx *send_ctx,
1125 struct smb_direct_sendmsg *msg)
1129 for (i = 0; i < msg->num_sge; i++)
1130 ib_dma_sync_single_for_device(t->cm_id->device,
1131 msg->sge[i].addr, msg->sge[i].length,
1134 msg->cqe.done = send_done;
1135 msg->wr.opcode = IB_WR_SEND;
1136 msg->wr.sg_list = &msg->sge[0];
1137 msg->wr.num_sge = msg->num_sge;
1138 msg->wr.next = NULL;
1141 msg->wr.wr_cqe = NULL;
1142 msg->wr.send_flags = 0;
1143 if (!list_empty(&send_ctx->msg_list)) {
1144 struct smb_direct_sendmsg *last;
1146 last = list_last_entry(&send_ctx->msg_list,
1147 struct smb_direct_sendmsg,
1149 last->wr.next = &msg->wr;
1151 list_add_tail(&msg->list, &send_ctx->msg_list);
1156 msg->wr.wr_cqe = &msg->cqe;
1157 msg->wr.send_flags = IB_SEND_SIGNALED;
1158 return smb_direct_post_send(t, &msg->wr);
1161 static int smb_direct_post_send_data(struct smb_direct_transport *t,
1162 struct smb_direct_send_ctx *send_ctx,
1163 struct kvec *iov, int niov,
1164 int remaining_data_length)
1167 struct smb_direct_sendmsg *msg;
1169 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
1171 ret = wait_for_send_credits(t, send_ctx);
1176 for (i = 0; i < niov; i++)
1177 data_length += iov[i].iov_len;
1179 ret = smb_direct_create_header(t, data_length, remaining_data_length,
1182 atomic_inc(&t->send_credits);
1186 for (i = 0; i < niov; i++) {
1190 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
1191 sg_cnt = get_mapped_sg_list(t->cm_id->device,
1192 iov[i].iov_base, iov[i].iov_len,
1193 sg, SMB_DIRECT_MAX_SEND_SGES - 1,
1196 pr_err("failed to map buffer\n");
1199 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
1200 pr_err("buffer not fitted into sges\n");
1202 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
1207 for (j = 0; j < sg_cnt; j++) {
1208 sge = &msg->sge[msg->num_sge];
1209 sge->addr = sg_dma_address(&sg[j]);
1210 sge->length = sg_dma_len(&sg[j]);
1211 sge->lkey = t->pd->local_dma_lkey;
1216 ret = post_sendmsg(t, send_ctx, msg);
1221 smb_direct_free_sendmsg(t, msg);
1222 atomic_inc(&t->send_credits);
1226 static int smb_direct_writev(struct ksmbd_transport *t,
1227 struct kvec *iov, int niovs, int buflen,
1228 bool need_invalidate, unsigned int remote_key)
1230 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1231 int remaining_data_length;
1233 int max_iov_size = st->max_send_size -
1234 sizeof(struct smb_direct_data_transfer);
1237 struct smb_direct_send_ctx send_ctx;
1239 if (st->status != SMB_DIRECT_CS_CONNECTED)
1242 //FIXME: skip RFC1002 header..
1244 iov[0].iov_base += 4;
1245 iov[0].iov_len -= 4;
1247 remaining_data_length = buflen;
1248 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1250 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
1254 buflen += iov[i].iov_len;
1255 if (buflen > max_iov_size) {
1257 remaining_data_length -=
1258 (buflen - iov[i].iov_len);
1259 ret = smb_direct_post_send_data(st, &send_ctx,
1260 &iov[start], i - start,
1261 remaining_data_length);
1265 /* iov[start] is too big, break it */
1266 int nvec = (buflen + max_iov_size - 1) /
1269 for (j = 0; j < nvec; j++) {
1271 (char *)iov[start].iov_base +
1274 min_t(int, max_iov_size,
1275 buflen - max_iov_size * j);
1276 remaining_data_length -= vec.iov_len;
1277 ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
1278 remaining_data_length);
1291 /* send out all remaining vecs */
1292 remaining_data_length -= buflen;
1293 ret = smb_direct_post_send_data(st, &send_ctx,
1294 &iov[start], i - start,
1295 remaining_data_length);
1304 ret = smb_direct_flush_send_list(st, &send_ctx, true);
1307 * As an optimization, we don't wait for individual I/O to finish
1308 * before sending the next one.
1309 * Send them all and wait for pending send count to get to 0
1310 * that means all the I/Os have been out and we are good to return
1313 wait_event(st->wait_send_pending,
1314 atomic_read(&st->send_pending) == 0);
1318 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
1319 struct smb_direct_rdma_rw_msg *msg,
1320 enum dma_data_direction dir)
1322 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
1323 msg->sgt.sgl, msg->sgt.nents, dir);
1324 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1328 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
1329 enum dma_data_direction dir)
1331 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
1332 struct smb_direct_rdma_rw_msg, cqe);
1333 struct smb_direct_transport *t = msg->t;
1335 if (wc->status != IB_WC_SUCCESS) {
1337 pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1338 wc->opcode, ib_wc_status_msg(wc->status), wc->status);
1339 if (wc->status != IB_WC_WR_FLUSH_ERR)
1340 smb_direct_disconnect_rdma_connection(t);
1343 complete(msg->completion);
1346 static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1348 read_write_done(cq, wc, DMA_FROM_DEVICE);
1351 static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1353 read_write_done(cq, wc, DMA_TO_DEVICE);
1356 static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
1357 void *buf, int buf_len,
1358 struct smb2_buffer_desc_v1 *desc,
1359 unsigned int desc_len,
1362 struct smb_direct_rdma_rw_msg *msg, *next_msg;
1364 DECLARE_COMPLETION_ONSTACK(completion);
1365 struct ib_send_wr *first_wr;
1366 LIST_HEAD(msg_list);
1369 unsigned int desc_buf_len;
1370 size_t total_length = 0;
1372 if (t->status != SMB_DIRECT_CS_CONNECTED)
1375 /* calculate needed credits */
1378 for (i = 0; i < desc_len / sizeof(*desc); i++) {
1379 desc_buf_len = le32_to_cpu(desc[i].length);
1381 credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
1382 desc_buf += desc_buf_len;
1383 total_length += desc_buf_len;
1384 if (desc_buf_len == 0 || total_length > buf_len ||
1385 total_length > t->max_rdma_rw_size)
1389 ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
1390 is_read ? "read" : "write", buf_len, credits_needed);
1392 ret = wait_for_rw_credits(t, credits_needed);
1396 /* build rdma_rw_ctx for each descriptor */
1398 for (i = 0; i < desc_len / sizeof(*desc); i++) {
1399 msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
1400 sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
1406 desc_buf_len = le32_to_cpu(desc[i].length);
1409 msg->cqe.done = is_read ? read_done : write_done;
1410 msg->completion = &completion;
1412 msg->sgt.sgl = &msg->sg_list[0];
1413 ret = sg_alloc_table_chained(&msg->sgt,
1414 get_buf_page_count(desc_buf, desc_buf_len),
1415 msg->sg_list, SG_CHUNK_SIZE);
1422 ret = get_sg_list(desc_buf, desc_buf_len,
1423 msg->sgt.sgl, msg->sgt.orig_nents);
1425 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1430 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
1432 get_buf_page_count(desc_buf, desc_buf_len),
1434 le64_to_cpu(desc[i].offset),
1435 le32_to_cpu(desc[i].token),
1436 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1438 pr_err("failed to init rdma_rw_ctx: %d\n", ret);
1439 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1444 list_add_tail(&msg->list, &msg_list);
1445 desc_buf += desc_buf_len;
1448 /* concatenate work requests of rdma_rw_ctxs */
1450 list_for_each_entry_reverse(msg, &msg_list, list) {
1451 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
1452 &msg->cqe, first_wr);
1455 ret = ib_post_send(t->qp, first_wr, NULL);
1457 pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
1461 msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list);
1462 wait_for_completion(&completion);
1465 list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
1466 list_del(&msg->list);
1467 smb_direct_free_rdma_rw_msg(t, msg,
1468 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1470 atomic_add(credits_needed, &t->rw_credits);
1471 wake_up(&t->wait_rw_credits);
1475 static int smb_direct_rdma_write(struct ksmbd_transport *t,
1476 void *buf, unsigned int buflen,
1477 struct smb2_buffer_desc_v1 *desc,
1478 unsigned int desc_len)
1480 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1481 desc, desc_len, false);
1484 static int smb_direct_rdma_read(struct ksmbd_transport *t,
1485 void *buf, unsigned int buflen,
1486 struct smb2_buffer_desc_v1 *desc,
1487 unsigned int desc_len)
1489 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
1490 desc, desc_len, true);
1493 static void smb_direct_disconnect(struct ksmbd_transport *t)
1495 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1497 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
1499 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1500 wait_event_interruptible(st->wait_status,
1501 st->status == SMB_DIRECT_CS_DISCONNECTED);
1505 static void smb_direct_shutdown(struct ksmbd_transport *t)
1507 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1509 ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
1511 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1514 static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
1515 struct rdma_cm_event *event)
1517 struct smb_direct_transport *t = cm_id->context;
1519 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
1520 cm_id, rdma_event_msg(event->event), event->event);
1522 switch (event->event) {
1523 case RDMA_CM_EVENT_ESTABLISHED: {
1524 t->status = SMB_DIRECT_CS_CONNECTED;
1525 wake_up_interruptible(&t->wait_status);
1528 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1529 case RDMA_CM_EVENT_DISCONNECTED: {
1532 t->status = SMB_DIRECT_CS_DISCONNECTED;
1533 wake_up_interruptible(&t->wait_status);
1534 wake_up_interruptible(&t->wait_reassembly_queue);
1535 wake_up(&t->wait_send_credits);
1538 case RDMA_CM_EVENT_CONNECT_ERROR: {
1539 t->status = SMB_DIRECT_CS_DISCONNECTED;
1540 wake_up_interruptible(&t->wait_status);
1544 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
1545 cm_id, rdma_event_msg(event->event),
1552 static void smb_direct_qpair_handler(struct ib_event *event, void *context)
1554 struct smb_direct_transport *t = context;
1556 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
1557 t->cm_id, ib_event_msg(event->event), event->event);
1559 switch (event->event) {
1560 case IB_EVENT_CQ_ERR:
1561 case IB_EVENT_QP_FATAL:
1562 smb_direct_disconnect_rdma_connection(t);
1569 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
1572 struct smb_direct_sendmsg *sendmsg;
1573 struct smb_direct_negotiate_resp *resp;
1576 sendmsg = smb_direct_alloc_sendmsg(t);
1577 if (IS_ERR(sendmsg))
1580 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
1582 memset(resp, 0, sizeof(*resp));
1583 resp->min_version = cpu_to_le16(0x0100);
1584 resp->max_version = cpu_to_le16(0x0100);
1585 resp->status = STATUS_NOT_SUPPORTED;
1587 resp->status = STATUS_SUCCESS;
1588 resp->min_version = SMB_DIRECT_VERSION_LE;
1589 resp->max_version = SMB_DIRECT_VERSION_LE;
1590 resp->negotiated_version = SMB_DIRECT_VERSION_LE;
1592 resp->credits_requested =
1593 cpu_to_le16(t->send_credit_target);
1594 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1595 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
1596 resp->preferred_send_size = cpu_to_le32(t->max_send_size);
1597 resp->max_receive_size = cpu_to_le32(t->max_recv_size);
1598 resp->max_fragmented_size =
1599 cpu_to_le32(t->max_fragmented_recv_size);
1602 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1603 (void *)resp, sizeof(*resp),
1605 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1607 smb_direct_free_sendmsg(t, sendmsg);
1611 sendmsg->num_sge = 1;
1612 sendmsg->sge[0].length = sizeof(*resp);
1613 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1615 ret = post_sendmsg(t, NULL, sendmsg);
1617 smb_direct_free_sendmsg(t, sendmsg);
1621 wait_event(t->wait_send_pending,
1622 atomic_read(&t->send_pending) == 0);
1626 static int smb_direct_accept_client(struct smb_direct_transport *t)
1628 struct rdma_conn_param conn_param;
1629 struct ib_port_immutable port_immutable;
1633 memset(&conn_param, 0, sizeof(conn_param));
1634 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
1635 SMB_DIRECT_CM_INITIATOR_DEPTH);
1636 conn_param.responder_resources = 0;
1638 t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
1641 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1642 ird_ord_hdr[0] = conn_param.responder_resources;
1644 conn_param.private_data = ird_ord_hdr;
1645 conn_param.private_data_len = sizeof(ird_ord_hdr);
1647 conn_param.private_data = NULL;
1648 conn_param.private_data_len = 0;
1650 conn_param.retry_count = SMB_DIRECT_CM_RETRY;
1651 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
1652 conn_param.flow_control = 0;
1654 ret = rdma_accept(t->cm_id, &conn_param);
1656 pr_err("error at rdma_accept: %d\n", ret);
1662 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
1665 struct smb_direct_recvmsg *recvmsg;
1667 recvmsg = get_free_recvmsg(t);
1670 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
1672 ret = smb_direct_post_recv(t, recvmsg);
1674 pr_err("Can't post recv: %d\n", ret);
1678 t->negotiation_requested = false;
1679 ret = smb_direct_accept_client(t);
1681 pr_err("Can't accept client\n");
1685 smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
1688 put_recvmsg(t, recvmsg);
1692 static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
1694 return min_t(unsigned int,
1695 t->cm_id->device->attrs.max_fast_reg_page_list_len,
1699 static int smb_direct_init_params(struct smb_direct_transport *t,
1700 struct ib_qp_cap *cap)
1702 struct ib_device *device = t->cm_id->device;
1703 int max_send_sges, max_rw_wrs, max_send_wrs;
1704 unsigned int max_sge_per_wr, wrs_per_credit;
1706 /* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
1707 * SMB2 response could be mapped.
1709 t->max_send_size = smb_direct_max_send_size;
1710 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
1711 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
1712 pr_err("max_send_size %d is too large\n", t->max_send_size);
1716 /* Calculate the number of work requests for RDMA R/W.
1717 * The maximum number of pages which can be registered
1718 * with one Memory region can be transferred with one
1719 * R/W credit. And at least 4 work requests for each credit
1720 * are needed for MR registration, RDMA R/W, local & remote
1723 t->max_rdma_rw_size = smb_direct_max_read_write_size;
1724 t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
1725 t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
1726 (t->pages_per_rw_credit - 1) *
1729 max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
1730 device->attrs.max_sge_rd);
1731 max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
1733 wrs_per_credit = max_t(unsigned int, 4,
1734 DIV_ROUND_UP(t->pages_per_rw_credit,
1735 max_sge_per_wr) + 1);
1736 max_rw_wrs = t->max_rw_credits * wrs_per_credit;
1738 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
1739 if (max_send_wrs > device->attrs.max_cqe ||
1740 max_send_wrs > device->attrs.max_qp_wr) {
1741 pr_err("consider lowering send_credit_target = %d\n",
1742 smb_direct_send_credit_target);
1743 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
1744 device->attrs.max_cqe, device->attrs.max_qp_wr);
1748 if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
1749 smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
1750 pr_err("consider lowering receive_credit_max = %d\n",
1751 smb_direct_receive_credit_max);
1752 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1753 device->attrs.max_cqe, device->attrs.max_qp_wr);
1757 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
1758 pr_err("warning: device max_recv_sge = %d too small\n",
1759 device->attrs.max_recv_sge);
1763 t->recv_credits = 0;
1764 t->count_avail_recvmsg = 0;
1766 t->recv_credit_max = smb_direct_receive_credit_max;
1767 t->recv_credit_target = 10;
1768 t->new_recv_credits = 0;
1770 t->send_credit_target = smb_direct_send_credit_target;
1771 atomic_set(&t->send_credits, 0);
1772 atomic_set(&t->rw_credits, t->max_rw_credits);
1774 t->max_send_size = smb_direct_max_send_size;
1775 t->max_recv_size = smb_direct_max_receive_size;
1776 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
1778 cap->max_send_wr = max_send_wrs;
1779 cap->max_recv_wr = t->recv_credit_max;
1780 cap->max_send_sge = max_sge_per_wr;
1781 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
1782 cap->max_inline_data = 0;
1783 cap->max_rdma_ctxs = t->max_rw_credits;
1787 static void smb_direct_destroy_pools(struct smb_direct_transport *t)
1789 struct smb_direct_recvmsg *recvmsg;
1791 while ((recvmsg = get_free_recvmsg(t)))
1792 mempool_free(recvmsg, t->recvmsg_mempool);
1793 while ((recvmsg = get_empty_recvmsg(t)))
1794 mempool_free(recvmsg, t->recvmsg_mempool);
1796 mempool_destroy(t->recvmsg_mempool);
1797 t->recvmsg_mempool = NULL;
1799 kmem_cache_destroy(t->recvmsg_cache);
1800 t->recvmsg_cache = NULL;
1802 mempool_destroy(t->sendmsg_mempool);
1803 t->sendmsg_mempool = NULL;
1805 kmem_cache_destroy(t->sendmsg_cache);
1806 t->sendmsg_cache = NULL;
1809 static int smb_direct_create_pools(struct smb_direct_transport *t)
1813 struct smb_direct_recvmsg *recvmsg;
1815 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
1816 t->sendmsg_cache = kmem_cache_create(name,
1817 sizeof(struct smb_direct_sendmsg) +
1818 sizeof(struct smb_direct_negotiate_resp),
1819 0, SLAB_HWCACHE_ALIGN, NULL);
1820 if (!t->sendmsg_cache)
1823 t->sendmsg_mempool = mempool_create(t->send_credit_target,
1824 mempool_alloc_slab, mempool_free_slab,
1826 if (!t->sendmsg_mempool)
1829 snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
1830 t->recvmsg_cache = kmem_cache_create(name,
1831 sizeof(struct smb_direct_recvmsg) +
1833 0, SLAB_HWCACHE_ALIGN, NULL);
1834 if (!t->recvmsg_cache)
1837 t->recvmsg_mempool =
1838 mempool_create(t->recv_credit_max, mempool_alloc_slab,
1839 mempool_free_slab, t->recvmsg_cache);
1840 if (!t->recvmsg_mempool)
1843 INIT_LIST_HEAD(&t->recvmsg_queue);
1845 for (i = 0; i < t->recv_credit_max; i++) {
1846 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
1849 recvmsg->transport = t;
1850 list_add(&recvmsg->list, &t->recvmsg_queue);
1852 t->count_avail_recvmsg = t->recv_credit_max;
1856 smb_direct_destroy_pools(t);
1860 static int smb_direct_create_qpair(struct smb_direct_transport *t,
1861 struct ib_qp_cap *cap)
1864 struct ib_qp_init_attr qp_attr;
1867 t->pd = ib_alloc_pd(t->cm_id->device, 0);
1868 if (IS_ERR(t->pd)) {
1869 pr_err("Can't create RDMA PD\n");
1870 ret = PTR_ERR(t->pd);
1875 t->send_cq = ib_alloc_cq(t->cm_id->device, t,
1876 smb_direct_send_credit_target + cap->max_rdma_ctxs,
1877 0, IB_POLL_WORKQUEUE);
1878 if (IS_ERR(t->send_cq)) {
1879 pr_err("Can't create RDMA send CQ\n");
1880 ret = PTR_ERR(t->send_cq);
1885 t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
1886 t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
1887 if (IS_ERR(t->recv_cq)) {
1888 pr_err("Can't create RDMA recv CQ\n");
1889 ret = PTR_ERR(t->recv_cq);
1894 memset(&qp_attr, 0, sizeof(qp_attr));
1895 qp_attr.event_handler = smb_direct_qpair_handler;
1896 qp_attr.qp_context = t;
1898 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1899 qp_attr.qp_type = IB_QPT_RC;
1900 qp_attr.send_cq = t->send_cq;
1901 qp_attr.recv_cq = t->recv_cq;
1902 qp_attr.port_num = ~0;
1904 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
1906 pr_err("Can't create RDMA QP: %d\n", ret);
1910 t->qp = t->cm_id->qp;
1911 t->cm_id->event_handler = smb_direct_cm_handler;
1913 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1914 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
1915 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
1916 t->max_rw_credits, IB_MR_TYPE_MEM_REG,
1917 t->pages_per_rw_credit, 0);
1919 pr_err("failed to init mr pool count %d pages %d\n",
1920 t->max_rw_credits, t->pages_per_rw_credit);
1928 ib_destroy_qp(t->qp);
1932 ib_destroy_cq(t->recv_cq);
1936 ib_destroy_cq(t->send_cq);
1940 ib_dealloc_pd(t->pd);
1946 static int smb_direct_prepare(struct ksmbd_transport *t)
1948 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1949 struct smb_direct_recvmsg *recvmsg;
1950 struct smb_direct_negotiate_req *req;
1953 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
1954 ret = wait_event_interruptible_timeout(st->wait_status,
1955 st->negotiation_requested ||
1956 st->status == SMB_DIRECT_CS_DISCONNECTED,
1957 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
1958 if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
1959 return ret < 0 ? ret : -ETIMEDOUT;
1961 recvmsg = get_first_reassembly(st);
1963 return -ECONNABORTED;
1965 ret = smb_direct_check_recvmsg(recvmsg);
1966 if (ret == -ECONNABORTED)
1969 req = (struct smb_direct_negotiate_req *)recvmsg->packet;
1970 st->max_recv_size = min_t(int, st->max_recv_size,
1971 le32_to_cpu(req->preferred_send_size));
1972 st->max_send_size = min_t(int, st->max_send_size,
1973 le32_to_cpu(req->max_receive_size));
1974 st->max_fragmented_send_size =
1975 le32_to_cpu(req->max_fragmented_size);
1976 st->max_fragmented_recv_size =
1977 (st->recv_credit_max * st->max_recv_size) / 2;
1979 ret = smb_direct_send_negotiate_response(st, ret);
1981 spin_lock_irq(&st->reassembly_queue_lock);
1982 st->reassembly_queue_length--;
1983 list_del(&recvmsg->list);
1984 spin_unlock_irq(&st->reassembly_queue_lock);
1985 put_recvmsg(st, recvmsg);
1990 static int smb_direct_connect(struct smb_direct_transport *st)
1993 struct ib_qp_cap qp_cap;
1995 ret = smb_direct_init_params(st, &qp_cap);
1997 pr_err("Can't configure RDMA parameters\n");
2001 ret = smb_direct_create_pools(st);
2003 pr_err("Can't init RDMA pool: %d\n", ret);
2007 ret = smb_direct_create_qpair(st, &qp_cap);
2009 pr_err("Can't accept RDMA client: %d\n", ret);
2013 ret = smb_direct_prepare_negotiation(st);
2015 pr_err("Can't negotiate: %d\n", ret);
2021 static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
2023 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
2025 if (attrs->max_fast_reg_page_list_len == 0)
2030 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
2032 struct smb_direct_transport *t;
2035 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
2037 "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
2038 new_cm_id->device->attrs.device_cap_flags);
2039 return -EPROTONOSUPPORT;
2042 t = alloc_transport(new_cm_id);
2046 ret = smb_direct_connect(t);
2050 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
2051 KSMBD_TRANS(t)->conn, "ksmbd:r%u",
2053 if (IS_ERR(KSMBD_TRANS(t)->handler)) {
2054 ret = PTR_ERR(KSMBD_TRANS(t)->handler);
2055 pr_err("Can't start thread\n");
2065 static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
2066 struct rdma_cm_event *event)
2068 switch (event->event) {
2069 case RDMA_CM_EVENT_CONNECT_REQUEST: {
2070 int ret = smb_direct_handle_connect_request(cm_id);
2073 pr_err("Can't create transport: %d\n", ret);
2077 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
2082 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
2083 cm_id, rdma_event_msg(event->event), event->event);
2089 static int smb_direct_listen(int port)
2092 struct rdma_cm_id *cm_id;
2093 struct sockaddr_in sin = {
2094 .sin_family = AF_INET,
2095 .sin_addr.s_addr = htonl(INADDR_ANY),
2096 .sin_port = htons(port),
2099 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
2100 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
2101 if (IS_ERR(cm_id)) {
2102 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
2103 return PTR_ERR(cm_id);
2106 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2108 pr_err("Can't bind: %d\n", ret);
2112 smb_direct_listener.cm_id = cm_id;
2114 ret = rdma_listen(cm_id, 10);
2116 pr_err("Can't listen: %d\n", ret);
2121 smb_direct_listener.cm_id = NULL;
2122 rdma_destroy_id(cm_id);
2126 static int smb_direct_ib_client_add(struct ib_device *ib_dev)
2128 struct smb_direct_device *smb_dev;
2130 /* Set 5445 port if device type is iWARP(No IB) */
2131 if (ib_dev->node_type != RDMA_NODE_IB_CA)
2132 smb_direct_port = SMB_DIRECT_PORT_IWARP;
2134 if (!ib_dev->ops.get_netdev ||
2135 !rdma_frwr_is_supported(&ib_dev->attrs))
2138 smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
2141 smb_dev->ib_dev = ib_dev;
2143 write_lock(&smb_direct_device_lock);
2144 list_add(&smb_dev->list, &smb_direct_device_list);
2145 write_unlock(&smb_direct_device_lock);
2147 ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
2151 static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
2154 struct smb_direct_device *smb_dev, *tmp;
2156 write_lock(&smb_direct_device_lock);
2157 list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
2158 if (smb_dev->ib_dev == ib_dev) {
2159 list_del(&smb_dev->list);
2164 write_unlock(&smb_direct_device_lock);
2167 static struct ib_client smb_direct_ib_client = {
2168 .name = "ksmbd_smb_direct_ib",
2169 .add = smb_direct_ib_client_add,
2170 .remove = smb_direct_ib_client_remove,
2173 int ksmbd_rdma_init(void)
2177 smb_direct_listener.cm_id = NULL;
2179 ret = ib_register_client(&smb_direct_ib_client);
2181 pr_err("failed to ib_register_client\n");
2185 /* When a client is running out of send credits, the credits are
2186 * granted by the server's sending a packet using this queue.
2187 * This avoids the situation that a clients cannot send packets
2188 * for lack of credits
2190 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
2191 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
2195 ret = smb_direct_listen(smb_direct_port);
2197 destroy_workqueue(smb_direct_wq);
2198 smb_direct_wq = NULL;
2199 pr_err("Can't listen: %d\n", ret);
2203 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
2204 smb_direct_listener.cm_id);
2208 void ksmbd_rdma_destroy(void)
2210 if (!smb_direct_listener.cm_id)
2213 ib_unregister_client(&smb_direct_ib_client);
2214 rdma_destroy_id(smb_direct_listener.cm_id);
2216 smb_direct_listener.cm_id = NULL;
2218 if (smb_direct_wq) {
2219 destroy_workqueue(smb_direct_wq);
2220 smb_direct_wq = NULL;
2224 bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
2226 struct smb_direct_device *smb_dev;
2228 bool rdma_capable = false;
2230 read_lock(&smb_direct_device_lock);
2231 list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
2232 for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
2233 struct net_device *ndev;
2235 ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
2240 if (ndev == netdev) {
2242 rdma_capable = true;
2249 read_unlock(&smb_direct_device_lock);
2251 if (rdma_capable == false) {
2252 struct ib_device *ibdev;
2254 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
2256 if (rdma_frwr_is_supported(&ibdev->attrs))
2257 rdma_capable = true;
2258 ib_device_put(ibdev);
2262 return rdma_capable;
2265 static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
2266 .prepare = smb_direct_prepare,
2267 .disconnect = smb_direct_disconnect,
2268 .shutdown = smb_direct_shutdown,
2269 .writev = smb_direct_writev,
2270 .read = smb_direct_read,
2271 .rdma_read = smb_direct_rdma_read,
2272 .rdma_write = smb_direct_rdma_write,