2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
71 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
72 MODULE_VERSION(NTB_TRANSPORT_VER);
73 MODULE_LICENSE("Dual BSD/GPL");
74 MODULE_AUTHOR("Intel Corporation");
76 static unsigned long max_mw_size;
77 module_param(max_mw_size, ulong, 0644);
78 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
80 static unsigned int transport_mtu = 0x10000;
81 module_param(transport_mtu, uint, 0644);
82 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
84 static unsigned char max_num_clients;
85 module_param(max_num_clients, byte, 0644);
86 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
88 static unsigned int copy_bytes = 1024;
89 module_param(copy_bytes, uint, 0644);
90 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
93 module_param(use_dma, bool, 0644);
94 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
96 static struct dentry *nt_debugfs_dir;
98 /* Only two-ports NTB devices are supported */
99 #define PIDX NTB_DEF_PEER_IDX
101 struct ntb_queue_entry {
102 /* ntb_queue list reference */
103 struct list_head entry;
104 /* pointers to data to be transferred */
111 unsigned int tx_index;
112 unsigned int rx_index;
114 struct ntb_transport_qp *qp;
116 struct ntb_payload_header __iomem *tx_hdr;
117 struct ntb_payload_header *rx_hdr;
125 struct ntb_transport_qp {
126 struct ntb_transport_ctx *transport;
127 struct ntb_dev *ndev;
129 struct dma_chan *tx_dma_chan;
130 struct dma_chan *rx_dma_chan;
136 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
139 struct ntb_rx_info __iomem *rx_info;
140 struct ntb_rx_info *remote_rx_info;
142 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
143 void *data, int len);
144 struct list_head tx_free_q;
145 spinlock_t ntb_tx_free_q_lock;
147 dma_addr_t tx_mw_phys;
148 unsigned int tx_index;
149 unsigned int tx_max_entry;
150 unsigned int tx_max_frame;
152 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
153 void *data, int len);
154 struct list_head rx_post_q;
155 struct list_head rx_pend_q;
156 struct list_head rx_free_q;
157 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
158 spinlock_t ntb_rx_q_lock;
160 unsigned int rx_index;
161 unsigned int rx_max_entry;
162 unsigned int rx_max_frame;
163 unsigned int rx_alloc_entry;
164 dma_cookie_t last_cookie;
165 struct tasklet_struct rxc_db_work;
167 void (*event_handler)(void *data, int status);
168 struct delayed_work link_work;
169 struct work_struct link_cleanup;
171 struct dentry *debugfs_dir;
172 struct dentry *debugfs_stats;
191 struct ntb_transport_mw {
192 phys_addr_t phys_addr;
193 resource_size_t phys_size;
194 resource_size_t xlat_align;
195 resource_size_t xlat_align_size;
203 struct ntb_transport_client_dev {
204 struct list_head entry;
205 struct ntb_transport_ctx *nt;
209 struct ntb_transport_ctx {
210 struct list_head entry;
211 struct list_head client_devs;
213 struct ntb_dev *ndev;
215 struct ntb_transport_mw *mw_vec;
216 struct ntb_transport_qp *qp_vec;
217 unsigned int mw_count;
218 unsigned int qp_count;
223 struct delayed_work link_work;
224 struct work_struct link_cleanup;
226 struct dentry *debugfs_node_dir;
230 DESC_DONE_FLAG = BIT(0),
231 LINK_DOWN_FLAG = BIT(1),
234 struct ntb_payload_header {
249 #define dev_client_dev(__dev) \
250 container_of((__dev), struct ntb_transport_client_dev, dev)
252 #define drv_client(__drv) \
253 container_of((__drv), struct ntb_transport_client, driver)
255 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
256 #define NTB_QP_DEF_NUM_ENTRIES 100
257 #define NTB_LINK_DOWN_TIMEOUT 10
259 static void ntb_transport_rxc_db(unsigned long data);
260 static const struct ntb_ctx_ops ntb_transport_ops;
261 static struct ntb_client ntb_transport_client;
262 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
263 struct ntb_queue_entry *entry);
264 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
265 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
266 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
269 static int ntb_transport_bus_match(struct device *dev,
270 struct device_driver *drv)
272 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
275 static int ntb_transport_bus_probe(struct device *dev)
277 const struct ntb_transport_client *client;
282 client = drv_client(dev->driver);
283 rc = client->probe(dev);
290 static int ntb_transport_bus_remove(struct device *dev)
292 const struct ntb_transport_client *client;
294 client = drv_client(dev->driver);
302 static struct bus_type ntb_transport_bus = {
303 .name = "ntb_transport",
304 .match = ntb_transport_bus_match,
305 .probe = ntb_transport_bus_probe,
306 .remove = ntb_transport_bus_remove,
309 static LIST_HEAD(ntb_transport_list);
311 static int ntb_bus_init(struct ntb_transport_ctx *nt)
313 list_add_tail(&nt->entry, &ntb_transport_list);
317 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
319 struct ntb_transport_client_dev *client_dev, *cd;
321 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
322 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
323 dev_name(&client_dev->dev));
324 list_del(&client_dev->entry);
325 device_unregister(&client_dev->dev);
328 list_del(&nt->entry);
331 static void ntb_transport_client_release(struct device *dev)
333 struct ntb_transport_client_dev *client_dev;
335 client_dev = dev_client_dev(dev);
340 * ntb_transport_unregister_client_dev - Unregister NTB client device
341 * @device_name: Name of NTB client device
343 * Unregister an NTB client device with the NTB transport layer
345 void ntb_transport_unregister_client_dev(char *device_name)
347 struct ntb_transport_client_dev *client, *cd;
348 struct ntb_transport_ctx *nt;
350 list_for_each_entry(nt, &ntb_transport_list, entry)
351 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
352 if (!strncmp(dev_name(&client->dev), device_name,
353 strlen(device_name))) {
354 list_del(&client->entry);
355 device_unregister(&client->dev);
358 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
361 * ntb_transport_register_client_dev - Register NTB client device
362 * @device_name: Name of NTB client device
364 * Register an NTB client device with the NTB transport layer
366 int ntb_transport_register_client_dev(char *device_name)
368 struct ntb_transport_client_dev *client_dev;
369 struct ntb_transport_ctx *nt;
373 if (list_empty(&ntb_transport_list))
376 list_for_each_entry(nt, &ntb_transport_list, entry) {
379 node = dev_to_node(&nt->ndev->dev);
381 client_dev = kzalloc_node(sizeof(*client_dev),
388 dev = &client_dev->dev;
390 /* setup and register client devices */
391 dev_set_name(dev, "%s%d", device_name, i);
392 dev->bus = &ntb_transport_bus;
393 dev->release = ntb_transport_client_release;
394 dev->parent = &nt->ndev->dev;
396 rc = device_register(dev);
402 list_add_tail(&client_dev->entry, &nt->client_devs);
409 ntb_transport_unregister_client_dev(device_name);
413 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
416 * ntb_transport_register_client - Register NTB client driver
417 * @drv: NTB client driver to be registered
419 * Register an NTB client driver with the NTB transport layer
421 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
423 int ntb_transport_register_client(struct ntb_transport_client *drv)
425 drv->driver.bus = &ntb_transport_bus;
427 if (list_empty(&ntb_transport_list))
430 return driver_register(&drv->driver);
432 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
435 * ntb_transport_unregister_client - Unregister NTB client driver
436 * @drv: NTB client driver to be unregistered
438 * Unregister an NTB client driver with the NTB transport layer
440 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
442 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
444 driver_unregister(&drv->driver);
446 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
448 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
451 struct ntb_transport_qp *qp;
453 ssize_t ret, out_offset, out_count;
455 qp = filp->private_data;
457 if (!qp || !qp->link_is_up)
462 buf = kmalloc(out_count, GFP_KERNEL);
467 out_offset += snprintf(buf + out_offset, out_count - out_offset,
468 "\nNTB QP stats:\n\n");
469 out_offset += snprintf(buf + out_offset, out_count - out_offset,
470 "rx_bytes - \t%llu\n", qp->rx_bytes);
471 out_offset += snprintf(buf + out_offset, out_count - out_offset,
472 "rx_pkts - \t%llu\n", qp->rx_pkts);
473 out_offset += snprintf(buf + out_offset, out_count - out_offset,
474 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
475 out_offset += snprintf(buf + out_offset, out_count - out_offset,
476 "rx_async - \t%llu\n", qp->rx_async);
477 out_offset += snprintf(buf + out_offset, out_count - out_offset,
478 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
479 out_offset += snprintf(buf + out_offset, out_count - out_offset,
480 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
482 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "rx_buff - \t0x%p\n", qp->rx_buff);
487 out_offset += snprintf(buf + out_offset, out_count - out_offset,
488 "rx_index - \t%u\n", qp->rx_index);
489 out_offset += snprintf(buf + out_offset, out_count - out_offset,
490 "rx_max_entry - \t%u\n", qp->rx_max_entry);
491 out_offset += snprintf(buf + out_offset, out_count - out_offset,
492 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
494 out_offset += snprintf(buf + out_offset, out_count - out_offset,
495 "tx_bytes - \t%llu\n", qp->tx_bytes);
496 out_offset += snprintf(buf + out_offset, out_count - out_offset,
497 "tx_pkts - \t%llu\n", qp->tx_pkts);
498 out_offset += snprintf(buf + out_offset, out_count - out_offset,
499 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
500 out_offset += snprintf(buf + out_offset, out_count - out_offset,
501 "tx_async - \t%llu\n", qp->tx_async);
502 out_offset += snprintf(buf + out_offset, out_count - out_offset,
503 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
504 out_offset += snprintf(buf + out_offset, out_count - out_offset,
505 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
506 out_offset += snprintf(buf + out_offset, out_count - out_offset,
507 "tx_mw - \t0x%p\n", qp->tx_mw);
508 out_offset += snprintf(buf + out_offset, out_count - out_offset,
509 "tx_index (H) - \t%u\n", qp->tx_index);
510 out_offset += snprintf(buf + out_offset, out_count - out_offset,
512 qp->remote_rx_info->entry);
513 out_offset += snprintf(buf + out_offset, out_count - out_offset,
514 "tx_max_entry - \t%u\n", qp->tx_max_entry);
515 out_offset += snprintf(buf + out_offset, out_count - out_offset,
517 ntb_transport_tx_free_entry(qp));
519 out_offset += snprintf(buf + out_offset, out_count - out_offset,
521 out_offset += snprintf(buf + out_offset, out_count - out_offset,
522 "Using TX DMA - \t%s\n",
523 qp->tx_dma_chan ? "Yes" : "No");
524 out_offset += snprintf(buf + out_offset, out_count - out_offset,
525 "Using RX DMA - \t%s\n",
526 qp->rx_dma_chan ? "Yes" : "No");
527 out_offset += snprintf(buf + out_offset, out_count - out_offset,
529 qp->link_is_up ? "Up" : "Down");
530 out_offset += snprintf(buf + out_offset, out_count - out_offset,
533 if (out_offset > out_count)
534 out_offset = out_count;
536 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
541 static const struct file_operations ntb_qp_debugfs_stats = {
542 .owner = THIS_MODULE,
544 .read = debugfs_read,
547 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
548 struct list_head *list)
552 spin_lock_irqsave(lock, flags);
553 list_add_tail(entry, list);
554 spin_unlock_irqrestore(lock, flags);
557 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
558 struct list_head *list)
560 struct ntb_queue_entry *entry;
563 spin_lock_irqsave(lock, flags);
564 if (list_empty(list)) {
568 entry = list_first_entry(list, struct ntb_queue_entry, entry);
569 list_del(&entry->entry);
572 spin_unlock_irqrestore(lock, flags);
577 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
578 struct list_head *list,
579 struct list_head *to_list)
581 struct ntb_queue_entry *entry;
584 spin_lock_irqsave(lock, flags);
586 if (list_empty(list)) {
589 entry = list_first_entry(list, struct ntb_queue_entry, entry);
590 list_move_tail(&entry->entry, to_list);
593 spin_unlock_irqrestore(lock, flags);
598 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
601 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
602 struct ntb_transport_mw *mw;
603 struct ntb_dev *ndev = nt->ndev;
604 struct ntb_queue_entry *entry;
605 unsigned int rx_size, num_qps_mw;
606 unsigned int mw_num, mw_count, qp_count;
610 mw_count = nt->mw_count;
611 qp_count = nt->qp_count;
613 mw_num = QP_TO_MW(nt, qp_num);
614 mw = &nt->mw_vec[mw_num];
619 if (mw_num < qp_count % mw_count)
620 num_qps_mw = qp_count / mw_count + 1;
622 num_qps_mw = qp_count / mw_count;
624 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
625 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
626 rx_size -= sizeof(struct ntb_rx_info);
628 qp->remote_rx_info = qp->rx_buff + rx_size;
630 /* Due to housekeeping, there must be atleast 2 buffs */
631 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
632 qp->rx_max_entry = rx_size / qp->rx_max_frame;
636 * Checking to see if we have more entries than the default.
637 * We should add additional entries if that is the case so we
638 * can be in sync with the transport frames.
640 node = dev_to_node(&ndev->dev);
641 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
642 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
647 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
649 qp->rx_alloc_entry++;
652 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
654 /* setup the hdr offsets with 0's */
655 for (i = 0; i < qp->rx_max_entry; i++) {
656 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
657 sizeof(struct ntb_payload_header));
658 memset(offset, 0, sizeof(struct ntb_payload_header));
668 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
670 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
671 struct pci_dev *pdev = nt->ndev->pdev;
676 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
677 dma_free_coherent(&pdev->dev, mw->buff_size,
678 mw->virt_addr, mw->dma_addr);
681 mw->virt_addr = NULL;
684 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
685 resource_size_t size)
687 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
688 struct pci_dev *pdev = nt->ndev->pdev;
689 size_t xlat_size, buff_size;
695 xlat_size = round_up(size, mw->xlat_align_size);
696 buff_size = round_up(size, mw->xlat_align);
698 /* No need to re-setup */
699 if (mw->xlat_size == xlat_size)
703 ntb_free_mw(nt, num_mw);
705 /* Alloc memory for receiving data. Must be aligned */
706 mw->xlat_size = xlat_size;
707 mw->buff_size = buff_size;
709 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
710 &mw->dma_addr, GFP_KERNEL);
711 if (!mw->virt_addr) {
714 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
720 * we must ensure that the memory address allocated is BAR size
721 * aligned in order for the XLAT register to take the value. This
722 * is a requirement of the hardware. It is recommended to setup CMA
723 * for BAR sizes equal or greater than 4MB.
725 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
726 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
728 ntb_free_mw(nt, num_mw);
732 /* Notify HW the memory location of the receive buffer */
733 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
736 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
737 ntb_free_mw(nt, num_mw);
744 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
746 qp->link_is_up = false;
753 qp->rx_ring_empty = 0;
754 qp->rx_err_no_buf = 0;
755 qp->rx_err_oflow = 0;
761 qp->tx_ring_full = 0;
762 qp->tx_err_no_buf = 0;
767 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
769 struct ntb_transport_ctx *nt = qp->transport;
770 struct pci_dev *pdev = nt->ndev->pdev;
772 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
774 cancel_delayed_work_sync(&qp->link_work);
775 ntb_qp_link_down_reset(qp);
777 if (qp->event_handler)
778 qp->event_handler(qp->cb_data, qp->link_is_up);
781 static void ntb_qp_link_cleanup_work(struct work_struct *work)
783 struct ntb_transport_qp *qp = container_of(work,
784 struct ntb_transport_qp,
786 struct ntb_transport_ctx *nt = qp->transport;
788 ntb_qp_link_cleanup(qp);
791 schedule_delayed_work(&qp->link_work,
792 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
795 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
797 schedule_work(&qp->link_cleanup);
800 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
802 struct ntb_transport_qp *qp;
804 unsigned int i, count;
806 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
808 /* Pass along the info to any clients */
809 for (i = 0; i < nt->qp_count; i++)
810 if (qp_bitmap_alloc & BIT_ULL(i)) {
812 ntb_qp_link_cleanup(qp);
813 cancel_work_sync(&qp->link_cleanup);
814 cancel_delayed_work_sync(&qp->link_work);
818 cancel_delayed_work_sync(&nt->link_work);
820 /* The scratchpad registers keep the values if the remote side
821 * goes down, blast them now to give them a sane value the next
822 * time they are accessed
824 count = ntb_spad_count(nt->ndev);
825 for (i = 0; i < count; i++)
826 ntb_spad_write(nt->ndev, i, 0);
829 static void ntb_transport_link_cleanup_work(struct work_struct *work)
831 struct ntb_transport_ctx *nt =
832 container_of(work, struct ntb_transport_ctx, link_cleanup);
834 ntb_transport_link_cleanup(nt);
837 static void ntb_transport_event_callback(void *data)
839 struct ntb_transport_ctx *nt = data;
841 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
842 schedule_delayed_work(&nt->link_work, 0);
844 schedule_work(&nt->link_cleanup);
847 static void ntb_transport_link_work(struct work_struct *work)
849 struct ntb_transport_ctx *nt =
850 container_of(work, struct ntb_transport_ctx, link_work.work);
851 struct ntb_dev *ndev = nt->ndev;
852 struct pci_dev *pdev = ndev->pdev;
853 resource_size_t size;
857 /* send the local info, in the opposite order of the way we read it */
858 for (i = 0; i < nt->mw_count; i++) {
859 size = nt->mw_vec[i].phys_size;
861 if (max_mw_size && size > max_mw_size)
864 spad = MW0_SZ_HIGH + (i * 2);
865 ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size));
867 spad = MW0_SZ_LOW + (i * 2);
868 ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size));
871 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
873 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
875 ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION);
877 /* Query the remote side for its info */
878 val = ntb_spad_read(ndev, VERSION);
879 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
880 if (val != NTB_TRANSPORT_VERSION)
883 val = ntb_spad_read(ndev, NUM_QPS);
884 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
885 if (val != nt->qp_count)
888 val = ntb_spad_read(ndev, NUM_MWS);
889 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
890 if (val != nt->mw_count)
893 for (i = 0; i < nt->mw_count; i++) {
896 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
897 val64 = (u64)val << 32;
899 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
902 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
904 rc = ntb_set_mw(nt, i, val64);
909 nt->link_is_up = true;
911 for (i = 0; i < nt->qp_count; i++) {
912 struct ntb_transport_qp *qp = &nt->qp_vec[i];
914 ntb_transport_setup_qp_mw(nt, i);
916 if (qp->client_ready)
917 schedule_delayed_work(&qp->link_work, 0);
923 for (i = 0; i < nt->mw_count; i++)
926 /* if there's an actual failure, we should just bail */
931 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
932 schedule_delayed_work(&nt->link_work,
933 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
936 static void ntb_qp_link_work(struct work_struct *work)
938 struct ntb_transport_qp *qp = container_of(work,
939 struct ntb_transport_qp,
941 struct pci_dev *pdev = qp->ndev->pdev;
942 struct ntb_transport_ctx *nt = qp->transport;
945 WARN_ON(!nt->link_is_up);
947 val = ntb_spad_read(nt->ndev, QP_LINKS);
949 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
951 /* query remote spad for qp ready bits */
952 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
954 /* See if the remote side is up */
955 if (val & BIT(qp->qp_num)) {
956 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
957 qp->link_is_up = true;
960 if (qp->event_handler)
961 qp->event_handler(qp->cb_data, qp->link_is_up);
964 tasklet_schedule(&qp->rxc_db_work);
965 } else if (nt->link_is_up)
966 schedule_delayed_work(&qp->link_work,
967 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
970 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
973 struct ntb_transport_qp *qp;
975 resource_size_t mw_size;
976 unsigned int num_qps_mw, tx_size;
977 unsigned int mw_num, mw_count, qp_count;
980 mw_count = nt->mw_count;
981 qp_count = nt->qp_count;
983 mw_num = QP_TO_MW(nt, qp_num);
985 qp = &nt->qp_vec[qp_num];
989 qp->client_ready = false;
990 qp->event_handler = NULL;
991 ntb_qp_link_down_reset(qp);
993 if (mw_num < qp_count % mw_count)
994 num_qps_mw = qp_count / mw_count + 1;
996 num_qps_mw = qp_count / mw_count;
998 mw_base = nt->mw_vec[mw_num].phys_addr;
999 mw_size = nt->mw_vec[mw_num].phys_size;
1001 tx_size = (unsigned int)mw_size / num_qps_mw;
1002 qp_offset = tx_size * (qp_num / mw_count);
1004 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1008 qp->tx_mw_phys = mw_base + qp_offset;
1009 if (!qp->tx_mw_phys)
1012 tx_size -= sizeof(struct ntb_rx_info);
1013 qp->rx_info = qp->tx_mw + tx_size;
1015 /* Due to housekeeping, there must be atleast 2 buffs */
1016 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
1017 qp->tx_max_entry = tx_size / qp->tx_max_frame;
1019 if (nt->debugfs_node_dir) {
1020 char debugfs_name[4];
1022 snprintf(debugfs_name, 4, "qp%d", qp_num);
1023 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1024 nt->debugfs_node_dir);
1026 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1027 qp->debugfs_dir, qp,
1028 &ntb_qp_debugfs_stats);
1030 qp->debugfs_dir = NULL;
1031 qp->debugfs_stats = NULL;
1034 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
1035 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
1037 spin_lock_init(&qp->ntb_rx_q_lock);
1038 spin_lock_init(&qp->ntb_tx_free_q_lock);
1040 INIT_LIST_HEAD(&qp->rx_post_q);
1041 INIT_LIST_HEAD(&qp->rx_pend_q);
1042 INIT_LIST_HEAD(&qp->rx_free_q);
1043 INIT_LIST_HEAD(&qp->tx_free_q);
1045 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1051 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1053 struct ntb_transport_ctx *nt;
1054 struct ntb_transport_mw *mw;
1055 unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
1060 mw_count = ntb_peer_mw_count(ndev);
1062 if (!ndev->ops->mw_set_trans) {
1063 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
1067 if (ntb_db_is_unsafe(ndev))
1069 "doorbell is unsafe, proceed anyway...\n");
1070 if (ntb_spad_is_unsafe(ndev))
1072 "scratchpad is unsafe, proceed anyway...\n");
1074 if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT)
1075 dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n");
1077 node = dev_to_node(&ndev->dev);
1079 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1084 spad_count = ntb_spad_count(ndev);
1086 /* Limit the MW's based on the availability of scratchpads */
1088 if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
1094 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
1095 nt->mw_count = min(mw_count, max_mw_count_for_spads);
1097 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1104 for (i = 0; i < mw_count; i++) {
1105 mw = &nt->mw_vec[i];
1107 rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
1108 &mw->xlat_align_size, NULL);
1112 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
1117 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1125 mw->virt_addr = NULL;
1129 qp_bitmap = ntb_db_valid_mask(ndev);
1131 qp_count = ilog2(qp_bitmap);
1132 if (max_num_clients && max_num_clients < qp_count)
1133 qp_count = max_num_clients;
1134 else if (nt->mw_count < qp_count)
1135 qp_count = nt->mw_count;
1137 qp_bitmap &= BIT_ULL(qp_count) - 1;
1139 nt->qp_count = qp_count;
1140 nt->qp_bitmap = qp_bitmap;
1141 nt->qp_bitmap_free = qp_bitmap;
1143 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1150 if (nt_debugfs_dir) {
1151 nt->debugfs_node_dir =
1152 debugfs_create_dir(pci_name(ndev->pdev),
1156 for (i = 0; i < qp_count; i++) {
1157 rc = ntb_transport_init_queue(nt, i);
1162 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1163 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1165 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1169 INIT_LIST_HEAD(&nt->client_devs);
1170 rc = ntb_bus_init(nt);
1174 nt->link_is_up = false;
1175 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1176 ntb_link_event(ndev);
1181 ntb_clear_ctx(ndev);
1186 mw = &nt->mw_vec[i];
1195 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1197 struct ntb_transport_ctx *nt = ndev->ctx;
1198 struct ntb_transport_qp *qp;
1199 u64 qp_bitmap_alloc;
1202 ntb_transport_link_cleanup(nt);
1203 cancel_work_sync(&nt->link_cleanup);
1204 cancel_delayed_work_sync(&nt->link_work);
1206 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1208 /* verify that all the qp's are freed */
1209 for (i = 0; i < nt->qp_count; i++) {
1210 qp = &nt->qp_vec[i];
1211 if (qp_bitmap_alloc & BIT_ULL(i))
1212 ntb_transport_free_queue(qp);
1213 debugfs_remove_recursive(qp->debugfs_dir);
1216 ntb_link_disable(ndev);
1217 ntb_clear_ctx(ndev);
1221 for (i = nt->mw_count; i--; ) {
1223 iounmap(nt->mw_vec[i].vbase);
1231 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1233 struct ntb_queue_entry *entry;
1236 unsigned long irqflags;
1238 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1240 while (!list_empty(&qp->rx_post_q)) {
1241 entry = list_first_entry(&qp->rx_post_q,
1242 struct ntb_queue_entry, entry);
1243 if (!(entry->flags & DESC_DONE_FLAG))
1246 entry->rx_hdr->flags = 0;
1247 iowrite32(entry->rx_index, &qp->rx_info->entry);
1249 cb_data = entry->cb_data;
1252 list_move_tail(&entry->entry, &qp->rx_free_q);
1254 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1256 if (qp->rx_handler && qp->client_ready)
1257 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1259 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1262 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1265 static void ntb_rx_copy_callback(void *data,
1266 const struct dmaengine_result *res)
1268 struct ntb_queue_entry *entry = data;
1270 /* we need to check DMA results if we are using DMA */
1272 enum dmaengine_tx_result dma_err = res->result;
1275 case DMA_TRANS_READ_FAILED:
1276 case DMA_TRANS_WRITE_FAILED:
1278 case DMA_TRANS_ABORTED:
1280 struct ntb_transport_qp *qp = entry->qp;
1281 void *offset = qp->rx_buff + qp->rx_max_frame *
1284 ntb_memcpy_rx(entry, offset);
1289 case DMA_TRANS_NOERROR:
1295 entry->flags |= DESC_DONE_FLAG;
1297 ntb_complete_rxc(entry->qp);
1300 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1302 void *buf = entry->buf;
1303 size_t len = entry->len;
1305 memcpy(buf, offset, len);
1307 /* Ensure that the data is fully copied out before clearing the flag */
1310 ntb_rx_copy_callback(entry, NULL);
1313 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1315 struct dma_async_tx_descriptor *txd;
1316 struct ntb_transport_qp *qp = entry->qp;
1317 struct dma_chan *chan = qp->rx_dma_chan;
1318 struct dma_device *device;
1319 size_t pay_off, buff_off, len;
1320 struct dmaengine_unmap_data *unmap;
1321 dma_cookie_t cookie;
1322 void *buf = entry->buf;
1325 device = chan->device;
1326 pay_off = (size_t)offset & ~PAGE_MASK;
1327 buff_off = (size_t)buf & ~PAGE_MASK;
1329 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1332 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1337 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1338 pay_off, len, DMA_TO_DEVICE);
1339 if (dma_mapping_error(device->dev, unmap->addr[0]))
1344 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1345 buff_off, len, DMA_FROM_DEVICE);
1346 if (dma_mapping_error(device->dev, unmap->addr[1]))
1349 unmap->from_cnt = 1;
1351 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1352 unmap->addr[0], len,
1353 DMA_PREP_INTERRUPT);
1357 txd->callback_result = ntb_rx_copy_callback;
1358 txd->callback_param = entry;
1359 dma_set_unmap(txd, unmap);
1361 cookie = dmaengine_submit(txd);
1362 if (dma_submit_error(cookie))
1365 dmaengine_unmap_put(unmap);
1367 qp->last_cookie = cookie;
1374 dmaengine_unmap_put(unmap);
1376 dmaengine_unmap_put(unmap);
1381 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1383 struct ntb_transport_qp *qp = entry->qp;
1384 struct dma_chan *chan = qp->rx_dma_chan;
1390 if (entry->len < copy_bytes)
1393 res = ntb_async_rx_submit(entry, offset);
1397 if (!entry->retries)
1403 ntb_memcpy_rx(entry, offset);
1407 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1409 struct ntb_payload_header *hdr;
1410 struct ntb_queue_entry *entry;
1413 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1414 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1416 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1417 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1419 if (!(hdr->flags & DESC_DONE_FLAG)) {
1420 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1421 qp->rx_ring_empty++;
1425 if (hdr->flags & LINK_DOWN_FLAG) {
1426 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1427 ntb_qp_link_down(qp);
1432 if (hdr->ver != (u32)qp->rx_pkts) {
1433 dev_dbg(&qp->ndev->pdev->dev,
1434 "version mismatch, expected %llu - got %u\n",
1435 qp->rx_pkts, hdr->ver);
1440 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1442 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1443 qp->rx_err_no_buf++;
1447 entry->rx_hdr = hdr;
1448 entry->rx_index = qp->rx_index;
1450 if (hdr->len > entry->len) {
1451 dev_dbg(&qp->ndev->pdev->dev,
1452 "receive buffer overflow! Wanted %d got %d\n",
1453 hdr->len, entry->len);
1457 entry->flags |= DESC_DONE_FLAG;
1459 ntb_complete_rxc(qp);
1461 dev_dbg(&qp->ndev->pdev->dev,
1462 "RX OK index %u ver %u size %d into buf size %d\n",
1463 qp->rx_index, hdr->ver, hdr->len, entry->len);
1465 qp->rx_bytes += hdr->len;
1468 entry->len = hdr->len;
1470 ntb_async_rx(entry, offset);
1474 qp->rx_index %= qp->rx_max_entry;
1479 static void ntb_transport_rxc_db(unsigned long data)
1481 struct ntb_transport_qp *qp = (void *)data;
1484 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1485 __func__, qp->qp_num);
1487 /* Limit the number of packets processed in a single interrupt to
1488 * provide fairness to others
1490 for (i = 0; i < qp->rx_max_entry; i++) {
1491 rc = ntb_process_rxc(qp);
1496 if (i && qp->rx_dma_chan)
1497 dma_async_issue_pending(qp->rx_dma_chan);
1499 if (i == qp->rx_max_entry) {
1500 /* there is more work to do */
1502 tasklet_schedule(&qp->rxc_db_work);
1503 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1504 /* the doorbell bit is set: clear it */
1505 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1506 /* ntb_db_read ensures ntb_db_clear write is committed */
1507 ntb_db_read(qp->ndev);
1509 /* an interrupt may have arrived between finishing
1510 * ntb_process_rxc and clearing the doorbell bit:
1511 * there might be some more work to do.
1514 tasklet_schedule(&qp->rxc_db_work);
1518 static void ntb_tx_copy_callback(void *data,
1519 const struct dmaengine_result *res)
1521 struct ntb_queue_entry *entry = data;
1522 struct ntb_transport_qp *qp = entry->qp;
1523 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1525 /* we need to check DMA results if we are using DMA */
1527 enum dmaengine_tx_result dma_err = res->result;
1530 case DMA_TRANS_READ_FAILED:
1531 case DMA_TRANS_WRITE_FAILED:
1533 case DMA_TRANS_ABORTED:
1535 void __iomem *offset =
1536 qp->tx_mw + qp->tx_max_frame *
1539 /* resubmit via CPU */
1540 ntb_memcpy_tx(entry, offset);
1545 case DMA_TRANS_NOERROR:
1551 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1553 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1555 /* The entry length can only be zero if the packet is intended to be a
1556 * "link down" or similar. Since no payload is being sent in these
1557 * cases, there is nothing to add to the completion queue.
1559 if (entry->len > 0) {
1560 qp->tx_bytes += entry->len;
1563 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1567 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1570 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1572 #ifdef ARCH_HAS_NOCACHE_UACCESS
1574 * Using non-temporal mov to improve performance on non-cached
1575 * writes, even though we aren't actually copying from user space.
1577 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1579 memcpy_toio(offset, entry->buf, entry->len);
1582 /* Ensure that the data is fully copied out before setting the flags */
1585 ntb_tx_copy_callback(entry, NULL);
1588 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1589 struct ntb_queue_entry *entry)
1591 struct dma_async_tx_descriptor *txd;
1592 struct dma_chan *chan = qp->tx_dma_chan;
1593 struct dma_device *device;
1594 size_t len = entry->len;
1595 void *buf = entry->buf;
1596 size_t dest_off, buff_off;
1597 struct dmaengine_unmap_data *unmap;
1599 dma_cookie_t cookie;
1601 device = chan->device;
1602 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
1603 buff_off = (size_t)buf & ~PAGE_MASK;
1604 dest_off = (size_t)dest & ~PAGE_MASK;
1606 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1609 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1614 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1615 buff_off, len, DMA_TO_DEVICE);
1616 if (dma_mapping_error(device->dev, unmap->addr[0]))
1621 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1622 DMA_PREP_INTERRUPT);
1626 txd->callback_result = ntb_tx_copy_callback;
1627 txd->callback_param = entry;
1628 dma_set_unmap(txd, unmap);
1630 cookie = dmaengine_submit(txd);
1631 if (dma_submit_error(cookie))
1634 dmaengine_unmap_put(unmap);
1636 dma_async_issue_pending(chan);
1640 dmaengine_unmap_put(unmap);
1642 dmaengine_unmap_put(unmap);
1647 static void ntb_async_tx(struct ntb_transport_qp *qp,
1648 struct ntb_queue_entry *entry)
1650 struct ntb_payload_header __iomem *hdr;
1651 struct dma_chan *chan = qp->tx_dma_chan;
1652 void __iomem *offset;
1655 entry->tx_index = qp->tx_index;
1656 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1657 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1658 entry->tx_hdr = hdr;
1660 iowrite32(entry->len, &hdr->len);
1661 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1666 if (entry->len < copy_bytes)
1669 res = ntb_async_tx_submit(qp, entry);
1673 if (!entry->retries)
1679 ntb_memcpy_tx(entry, offset);
1683 static int ntb_process_tx(struct ntb_transport_qp *qp,
1684 struct ntb_queue_entry *entry)
1686 if (qp->tx_index == qp->remote_rx_info->entry) {
1691 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1693 qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
1695 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1700 ntb_async_tx(qp, entry);
1703 qp->tx_index %= qp->tx_max_entry;
1710 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1712 struct pci_dev *pdev = qp->ndev->pdev;
1713 struct ntb_queue_entry *entry;
1716 if (!qp->link_is_up)
1719 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1721 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1722 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1731 entry->cb_data = NULL;
1734 entry->flags = LINK_DOWN_FLAG;
1736 rc = ntb_process_tx(qp, entry);
1738 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1741 ntb_qp_link_down_reset(qp);
1744 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1746 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1750 * ntb_transport_create_queue - Create a new NTB transport layer queue
1751 * @rx_handler: receive callback function
1752 * @tx_handler: transmit callback function
1753 * @event_handler: event callback function
1755 * Create a new NTB transport layer queue and provide the queue with a callback
1756 * routine for both transmit and receive. The receive callback routine will be
1757 * used to pass up data when the transport has received it on the queue. The
1758 * transmit callback routine will be called when the transport has completed the
1759 * transmission of the data on the queue and the data is ready to be freed.
1761 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1763 struct ntb_transport_qp *
1764 ntb_transport_create_queue(void *data, struct device *client_dev,
1765 const struct ntb_queue_handlers *handlers)
1767 struct ntb_dev *ndev;
1768 struct pci_dev *pdev;
1769 struct ntb_transport_ctx *nt;
1770 struct ntb_queue_entry *entry;
1771 struct ntb_transport_qp *qp;
1773 unsigned int free_queue;
1774 dma_cap_mask_t dma_mask;
1778 ndev = dev_ntb(client_dev->parent);
1782 node = dev_to_node(&ndev->dev);
1784 free_queue = ffs(nt->qp_bitmap_free);
1788 /* decrement free_queue to make it zero based */
1791 qp = &nt->qp_vec[free_queue];
1792 qp_bit = BIT_ULL(qp->qp_num);
1794 nt->qp_bitmap_free &= ~qp_bit;
1797 qp->rx_handler = handlers->rx_handler;
1798 qp->tx_handler = handlers->tx_handler;
1799 qp->event_handler = handlers->event_handler;
1801 dma_cap_zero(dma_mask);
1802 dma_cap_set(DMA_MEMCPY, dma_mask);
1806 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1807 (void *)(unsigned long)node);
1808 if (!qp->tx_dma_chan)
1809 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
1812 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1813 (void *)(unsigned long)node);
1814 if (!qp->rx_dma_chan)
1815 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
1817 qp->tx_dma_chan = NULL;
1818 qp->rx_dma_chan = NULL;
1821 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1822 qp->tx_dma_chan ? "DMA" : "CPU");
1824 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
1825 qp->rx_dma_chan ? "DMA" : "CPU");
1827 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1828 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1833 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1836 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
1838 for (i = 0; i < qp->tx_max_entry; i++) {
1839 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1844 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1848 ntb_db_clear(qp->ndev, qp_bit);
1849 ntb_db_clear_mask(qp->ndev, qp_bit);
1851 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1856 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1859 qp->rx_alloc_entry = 0;
1860 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1862 if (qp->tx_dma_chan)
1863 dma_release_channel(qp->tx_dma_chan);
1864 if (qp->rx_dma_chan)
1865 dma_release_channel(qp->rx_dma_chan);
1866 nt->qp_bitmap_free |= qp_bit;
1870 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1873 * ntb_transport_free_queue - Frees NTB transport queue
1874 * @qp: NTB queue to be freed
1876 * Frees NTB transport queue
1878 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1880 struct pci_dev *pdev;
1881 struct ntb_queue_entry *entry;
1887 pdev = qp->ndev->pdev;
1891 if (qp->tx_dma_chan) {
1892 struct dma_chan *chan = qp->tx_dma_chan;
1893 /* Putting the dma_chan to NULL will force any new traffic to be
1894 * processed by the CPU instead of the DAM engine
1896 qp->tx_dma_chan = NULL;
1898 /* Try to be nice and wait for any queued DMA engine
1899 * transactions to process before smashing it with a rock
1901 dma_sync_wait(chan, qp->last_cookie);
1902 dmaengine_terminate_all(chan);
1903 dma_release_channel(chan);
1906 if (qp->rx_dma_chan) {
1907 struct dma_chan *chan = qp->rx_dma_chan;
1908 /* Putting the dma_chan to NULL will force any new traffic to be
1909 * processed by the CPU instead of the DAM engine
1911 qp->rx_dma_chan = NULL;
1913 /* Try to be nice and wait for any queued DMA engine
1914 * transactions to process before smashing it with a rock
1916 dma_sync_wait(chan, qp->last_cookie);
1917 dmaengine_terminate_all(chan);
1918 dma_release_channel(chan);
1921 qp_bit = BIT_ULL(qp->qp_num);
1923 ntb_db_set_mask(qp->ndev, qp_bit);
1924 tasklet_kill(&qp->rxc_db_work);
1926 cancel_delayed_work_sync(&qp->link_work);
1929 qp->rx_handler = NULL;
1930 qp->tx_handler = NULL;
1931 qp->event_handler = NULL;
1933 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1936 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1937 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1941 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1942 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1946 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1949 qp->transport->qp_bitmap_free |= qp_bit;
1951 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1953 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1956 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1957 * @qp: NTB queue to be freed
1958 * @len: pointer to variable to write enqueued buffers length
1960 * Dequeues unused buffers from receive queue. Should only be used during
1963 * RETURNS: NULL error value on error, or void* for success.
1965 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1967 struct ntb_queue_entry *entry;
1970 if (!qp || qp->client_ready)
1973 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1977 buf = entry->cb_data;
1980 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1984 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1987 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1988 * @qp: NTB transport layer queue the entry is to be enqueued on
1989 * @cb: per buffer pointer for callback function to use
1990 * @data: pointer to data buffer that incoming packets will be copied into
1991 * @len: length of the data buffer
1993 * Enqueue a new receive buffer onto the transport queue into which a NTB
1994 * payload can be received into.
1996 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1998 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2001 struct ntb_queue_entry *entry;
2006 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
2010 entry->cb_data = cb;
2016 entry->rx_index = 0;
2018 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
2021 tasklet_schedule(&qp->rxc_db_work);
2025 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
2028 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
2029 * @qp: NTB transport layer queue the entry is to be enqueued on
2030 * @cb: per buffer pointer for callback function to use
2031 * @data: pointer to data buffer that will be sent
2032 * @len: length of the data buffer
2034 * Enqueue a new transmit buffer onto the transport queue from which a NTB
2035 * payload will be transmitted. This assumes that a lock is being held to
2036 * serialize access to the qp.
2038 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2040 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2043 struct ntb_queue_entry *entry;
2046 if (!qp || !qp->link_is_up || !len)
2049 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
2051 qp->tx_err_no_buf++;
2055 entry->cb_data = cb;
2061 entry->tx_index = 0;
2063 rc = ntb_process_tx(qp, entry);
2065 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2070 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
2073 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
2074 * @qp: NTB transport layer queue to be enabled
2076 * Notify NTB transport layer of client readiness to use queue
2078 void ntb_transport_link_up(struct ntb_transport_qp *qp)
2083 qp->client_ready = true;
2085 if (qp->transport->link_is_up)
2086 schedule_delayed_work(&qp->link_work, 0);
2088 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
2091 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
2092 * @qp: NTB transport layer queue to be disabled
2094 * Notify NTB transport layer of client's desire to no longer receive data on
2095 * transport queue specified. It is the client's responsibility to ensure all
2096 * entries on queue are purged or otherwise handled appropriately.
2098 void ntb_transport_link_down(struct ntb_transport_qp *qp)
2105 qp->client_ready = false;
2107 val = ntb_spad_read(qp->ndev, QP_LINKS);
2109 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
2112 ntb_send_link_down(qp);
2114 cancel_delayed_work_sync(&qp->link_work);
2116 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
2119 * ntb_transport_link_query - Query transport link state
2120 * @qp: NTB transport layer queue to be queried
2122 * Query connectivity to the remote system of the NTB transport queue
2124 * RETURNS: true for link up or false for link down
2126 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2131 return qp->link_is_up;
2133 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
2136 * ntb_transport_qp_num - Query the qp number
2137 * @qp: NTB transport layer queue to be queried
2139 * Query qp number of the NTB transport queue
2141 * RETURNS: a zero based number specifying the qp number
2143 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2150 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
2153 * ntb_transport_max_size - Query the max payload size of a qp
2154 * @qp: NTB transport layer queue to be queried
2156 * Query the maximum payload size permissible on the given qp
2158 * RETURNS: the max payload size of a qp
2160 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
2162 unsigned int max_size;
2163 unsigned int copy_align;
2164 struct dma_chan *rx_chan, *tx_chan;
2169 rx_chan = qp->rx_dma_chan;
2170 tx_chan = qp->tx_dma_chan;
2172 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
2173 tx_chan ? tx_chan->device->copy_align : 0);
2175 /* If DMA engine usage is possible, try to find the max size for that */
2176 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2177 max_size = round_down(max_size, 1 << copy_align);
2181 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
2183 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2185 unsigned int head = qp->tx_index;
2186 unsigned int tail = qp->remote_rx_info->entry;
2188 return tail > head ? tail - head : qp->tx_max_entry + tail - head;
2190 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2192 static void ntb_transport_doorbell_callback(void *data, int vector)
2194 struct ntb_transport_ctx *nt = data;
2195 struct ntb_transport_qp *qp;
2197 unsigned int qp_num;
2199 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2200 ntb_db_vector_mask(nt->ndev, vector));
2203 qp_num = __ffs(db_bits);
2204 qp = &nt->qp_vec[qp_num];
2207 tasklet_schedule(&qp->rxc_db_work);
2209 db_bits &= ~BIT_ULL(qp_num);
2213 static const struct ntb_ctx_ops ntb_transport_ops = {
2214 .link_event = ntb_transport_event_callback,
2215 .db_event = ntb_transport_doorbell_callback,
2218 static struct ntb_client ntb_transport_client = {
2220 .probe = ntb_transport_probe,
2221 .remove = ntb_transport_free,
2225 static int __init ntb_transport_init(void)
2229 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2231 if (debugfs_initialized())
2232 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2234 rc = bus_register(&ntb_transport_bus);
2238 rc = ntb_register_client(&ntb_transport_client);
2245 bus_unregister(&ntb_transport_bus);
2247 debugfs_remove_recursive(nt_debugfs_dir);
2250 module_init(ntb_transport_init);
2252 static void __exit ntb_transport_exit(void)
2254 ntb_unregister_client(&ntb_transport_client);
2255 bus_unregister(&ntb_transport_bus);
2256 debugfs_remove_recursive(nt_debugfs_dir);
2258 module_exit(ntb_transport_exit);