NTB: namespacecheck cleanups
[profile/ivi/kernel-x86-ivi.git] / drivers / ntb / ntb_transport.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/errno.h>
52 #include <linux/export.h>
53 #include <linux/interrupt.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/slab.h>
57 #include <linux/types.h>
58 #include <linux/ntb.h>
59 #include "ntb_hw.h"
60
61 #define NTB_TRANSPORT_VERSION   1
62
63 static unsigned int transport_mtu = 0x401E;
64 module_param(transport_mtu, uint, 0644);
65 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66
67 static unsigned char max_num_clients = 2;
68 module_param(max_num_clients, byte, 0644);
69 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
70
71 struct ntb_queue_entry {
72         /* ntb_queue list reference */
73         struct list_head entry;
74         /* pointers to data to be transfered */
75         void *cb_data;
76         void *buf;
77         unsigned int len;
78         unsigned int flags;
79 };
80
81 struct ntb_transport_qp {
82         struct ntb_transport *transport;
83         struct ntb_device *ndev;
84         void *cb_data;
85
86         bool client_ready;
87         bool qp_link;
88         u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
89
90         void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
91                             void *data, int len);
92         struct list_head tx_free_q;
93         spinlock_t ntb_tx_free_q_lock;
94         void *tx_mw_begin;
95         void *tx_mw_end;
96         void *tx_offset;
97         unsigned int tx_max_frame;
98
99         void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
100                             void *data, int len);
101         struct tasklet_struct rx_work;
102         struct list_head rx_pend_q;
103         struct list_head rx_free_q;
104         spinlock_t ntb_rx_pend_q_lock;
105         spinlock_t ntb_rx_free_q_lock;
106         void *rx_buff_begin;
107         void *rx_buff_end;
108         void *rx_offset;
109         unsigned int rx_max_frame;
110
111         void (*event_handler) (void *data, int status);
112         struct delayed_work link_work;
113         struct work_struct link_cleanup;
114
115         struct dentry *debugfs_dir;
116         struct dentry *debugfs_stats;
117
118         /* Stats */
119         u64 rx_bytes;
120         u64 rx_pkts;
121         u64 rx_ring_empty;
122         u64 rx_err_no_buf;
123         u64 rx_err_oflow;
124         u64 rx_err_ver;
125         u64 tx_bytes;
126         u64 tx_pkts;
127         u64 tx_ring_full;
128 };
129
130 struct ntb_transport_mw {
131         size_t size;
132         void *virt_addr;
133         dma_addr_t dma_addr;
134 };
135
136 struct ntb_transport_client_dev {
137         struct list_head entry;
138         struct device dev;
139 };
140
141 struct ntb_transport {
142         struct list_head entry;
143         struct list_head client_devs;
144
145         struct ntb_device *ndev;
146         struct ntb_transport_mw mw[NTB_NUM_MW];
147         struct ntb_transport_qp *qps;
148         unsigned int max_qps;
149         unsigned long qp_bitmap;
150         bool transport_link;
151         struct delayed_work link_work;
152         struct work_struct link_cleanup;
153         struct dentry *debugfs_dir;
154 };
155
156 enum {
157         DESC_DONE_FLAG = 1 << 0,
158         LINK_DOWN_FLAG = 1 << 1,
159 };
160
161 struct ntb_payload_header {
162         u64 ver;
163         unsigned int len;
164         unsigned int flags;
165 };
166
167 enum {
168         VERSION = 0,
169         MW0_SZ,
170         MW1_SZ,
171         NUM_QPS,
172         QP_LINKS,
173         MAX_SPAD,
174 };
175
176 #define QP_TO_MW(qp)            ((qp) % NTB_NUM_MW)
177 #define NTB_QP_DEF_NUM_ENTRIES  100
178 #define NTB_LINK_DOWN_TIMEOUT   10
179
180 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
181 {
182         return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
183 }
184
185 static int ntb_client_probe(struct device *dev)
186 {
187         const struct ntb_client *drv = container_of(dev->driver,
188                                                     struct ntb_client, driver);
189         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
190         int rc = -EINVAL;
191
192         get_device(dev);
193         if (drv && drv->probe)
194                 rc = drv->probe(pdev);
195         if (rc)
196                 put_device(dev);
197
198         return rc;
199 }
200
201 static int ntb_client_remove(struct device *dev)
202 {
203         const struct ntb_client *drv = container_of(dev->driver,
204                                                     struct ntb_client, driver);
205         struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
206
207         if (drv && drv->remove)
208                 drv->remove(pdev);
209
210         put_device(dev);
211
212         return 0;
213 }
214
215 static struct bus_type ntb_bus_type = {
216         .name = "ntb_bus",
217         .match = ntb_match_bus,
218         .probe = ntb_client_probe,
219         .remove = ntb_client_remove,
220 };
221
222 static LIST_HEAD(ntb_transport_list);
223
224 static int ntb_bus_init(struct ntb_transport *nt)
225 {
226         if (list_empty(&ntb_transport_list)) {
227                 int rc = bus_register(&ntb_bus_type);
228                 if (rc)
229                         return rc;
230         }
231
232         list_add(&nt->entry, &ntb_transport_list);
233
234         return 0;
235 }
236
237 static void ntb_bus_remove(struct ntb_transport *nt)
238 {
239         struct ntb_transport_client_dev *client_dev, *cd;
240
241         list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
242                 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
243                         dev_name(&client_dev->dev));
244                 list_del(&client_dev->entry);
245                 device_unregister(&client_dev->dev);
246         }
247
248         list_del(&nt->entry);
249
250         if (list_empty(&ntb_transport_list))
251                 bus_unregister(&ntb_bus_type);
252 }
253
254 static void ntb_client_release(struct device *dev)
255 {
256         struct ntb_transport_client_dev *client_dev;
257         client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
258
259         kfree(client_dev);
260 }
261
262 /**
263  * ntb_unregister_client_dev - Unregister NTB client device
264  * @device_name: Name of NTB client device
265  *
266  * Unregister an NTB client device with the NTB transport layer
267  */
268 void ntb_unregister_client_dev(char *device_name)
269 {
270         struct ntb_transport_client_dev *client, *cd;
271         struct ntb_transport *nt;
272
273         list_for_each_entry(nt, &ntb_transport_list, entry)
274                 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
275                         if (!strncmp(dev_name(&client->dev), device_name,
276                                      strlen(device_name))) {
277                                 list_del(&client->entry);
278                                 device_unregister(&client->dev);
279                         }
280 }
281 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
282
283 /**
284  * ntb_register_client_dev - Register NTB client device
285  * @device_name: Name of NTB client device
286  *
287  * Register an NTB client device with the NTB transport layer
288  */
289 int ntb_register_client_dev(char *device_name)
290 {
291         struct ntb_transport_client_dev *client_dev;
292         struct ntb_transport *nt;
293         int rc;
294
295         if (list_empty(&ntb_transport_list))
296                 return -ENODEV;
297
298         list_for_each_entry(nt, &ntb_transport_list, entry) {
299                 struct device *dev;
300
301                 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
302                                      GFP_KERNEL);
303                 if (!client_dev) {
304                         rc = -ENOMEM;
305                         goto err;
306                 }
307
308                 dev = &client_dev->dev;
309
310                 /* setup and register client devices */
311                 dev_set_name(dev, "%s", device_name);
312                 dev->bus = &ntb_bus_type;
313                 dev->release = ntb_client_release;
314                 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
315
316                 rc = device_register(dev);
317                 if (rc) {
318                         kfree(client_dev);
319                         goto err;
320                 }
321
322                 list_add_tail(&client_dev->entry, &nt->client_devs);
323         }
324
325         return 0;
326
327 err:
328         ntb_unregister_client_dev(device_name);
329
330         return rc;
331 }
332 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
333
334 /**
335  * ntb_register_client - Register NTB client driver
336  * @drv: NTB client driver to be registered
337  *
338  * Register an NTB client driver with the NTB transport layer
339  *
340  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
341  */
342 int ntb_register_client(struct ntb_client *drv)
343 {
344         drv->driver.bus = &ntb_bus_type;
345
346         if (list_empty(&ntb_transport_list))
347                 return -ENODEV;
348
349         return driver_register(&drv->driver);
350 }
351 EXPORT_SYMBOL_GPL(ntb_register_client);
352
353 /**
354  * ntb_unregister_client - Unregister NTB client driver
355  * @drv: NTB client driver to be unregistered
356  *
357  * Unregister an NTB client driver with the NTB transport layer
358  *
359  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
360  */
361 void ntb_unregister_client(struct ntb_client *drv)
362 {
363         driver_unregister(&drv->driver);
364 }
365 EXPORT_SYMBOL_GPL(ntb_unregister_client);
366
367 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
368                             loff_t *offp)
369 {
370         struct ntb_transport_qp *qp;
371         char buf[1024];
372         ssize_t ret, out_offset, out_count;
373
374         out_count = 1024;
375
376         qp = filp->private_data;
377         out_offset = 0;
378         out_offset += snprintf(buf + out_offset, out_count - out_offset,
379                                "NTB QP stats\n");
380         out_offset += snprintf(buf + out_offset, out_count - out_offset,
381                                "rx_bytes - \t%llu\n", qp->rx_bytes);
382         out_offset += snprintf(buf + out_offset, out_count - out_offset,
383                                "rx_pkts - \t%llu\n", qp->rx_pkts);
384         out_offset += snprintf(buf + out_offset, out_count - out_offset,
385                                "rx_ring_empty - %llu\n", qp->rx_ring_empty);
386         out_offset += snprintf(buf + out_offset, out_count - out_offset,
387                                "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
388         out_offset += snprintf(buf + out_offset, out_count - out_offset,
389                                "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
390         out_offset += snprintf(buf + out_offset, out_count - out_offset,
391                                "rx_err_ver - \t%llu\n", qp->rx_err_ver);
392         out_offset += snprintf(buf + out_offset, out_count - out_offset,
393                                "rx_buff_begin - %p\n", qp->rx_buff_begin);
394         out_offset += snprintf(buf + out_offset, out_count - out_offset,
395                                "rx_offset - \t%p\n", qp->rx_offset);
396         out_offset += snprintf(buf + out_offset, out_count - out_offset,
397                                "rx_buff_end - \t%p\n", qp->rx_buff_end);
398
399         out_offset += snprintf(buf + out_offset, out_count - out_offset,
400                                "tx_bytes - \t%llu\n", qp->tx_bytes);
401         out_offset += snprintf(buf + out_offset, out_count - out_offset,
402                                "tx_pkts - \t%llu\n", qp->tx_pkts);
403         out_offset += snprintf(buf + out_offset, out_count - out_offset,
404                                "tx_ring_full - \t%llu\n", qp->tx_ring_full);
405         out_offset += snprintf(buf + out_offset, out_count - out_offset,
406                                "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
407         out_offset += snprintf(buf + out_offset, out_count - out_offset,
408                                "tx_offset - \t%p\n", qp->tx_offset);
409         out_offset += snprintf(buf + out_offset, out_count - out_offset,
410                                "tx_mw_end - \t%p\n", qp->tx_mw_end);
411
412         out_offset += snprintf(buf + out_offset, out_count - out_offset,
413                                "QP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
414                                "Up" : "Down");
415
416         ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
417         return ret;
418 }
419
420 static const struct file_operations ntb_qp_debugfs_stats = {
421         .owner = THIS_MODULE,
422         .open = simple_open,
423         .read = debugfs_read,
424 };
425
426 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
427                          struct list_head *list)
428 {
429         unsigned long flags;
430
431         spin_lock_irqsave(lock, flags);
432         list_add_tail(entry, list);
433         spin_unlock_irqrestore(lock, flags);
434 }
435
436 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
437                                                 struct list_head *list)
438 {
439         struct ntb_queue_entry *entry;
440         unsigned long flags;
441
442         spin_lock_irqsave(lock, flags);
443         if (list_empty(list)) {
444                 entry = NULL;
445                 goto out;
446         }
447         entry = list_first_entry(list, struct ntb_queue_entry, entry);
448         list_del(&entry->entry);
449 out:
450         spin_unlock_irqrestore(lock, flags);
451
452         return entry;
453 }
454
455 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
456                                       unsigned int qp_num)
457 {
458         struct ntb_transport_qp *qp = &nt->qps[qp_num];
459         unsigned int rx_size, num_qps_mw;
460         u8 mw_num = QP_TO_MW(qp_num);
461         void *offset;
462
463         WARN_ON(nt->mw[mw_num].virt_addr == 0);
464
465         if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
466                 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
467         else
468                 num_qps_mw = nt->max_qps / NTB_NUM_MW;
469
470         rx_size = nt->mw[mw_num].size / num_qps_mw;
471         qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
472                             (qp_num / NTB_NUM_MW * rx_size);
473         qp->rx_buff_end = qp->rx_buff_begin + rx_size;
474         qp->rx_offset = qp->rx_buff_begin;
475         qp->rx_max_frame = min(transport_mtu, rx_size);
476
477         /* setup the hdr offsets with 0's */
478         for (offset = qp->rx_buff_begin + qp->rx_max_frame -
479                       sizeof(struct ntb_payload_header);
480              offset < qp->rx_buff_end; offset += qp->rx_max_frame)
481                 memset(offset, 0, sizeof(struct ntb_payload_header));
482
483         qp->rx_pkts = 0;
484         qp->tx_pkts = 0;
485 }
486
487 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
488 {
489         struct ntb_transport_mw *mw = &nt->mw[num_mw];
490         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
491
492         /* Alloc memory for receiving data.  Must be 4k aligned */
493         mw->size = ALIGN(size, 4096);
494
495         mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
496                                            GFP_KERNEL);
497         if (!mw->virt_addr) {
498                 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
499                        (int) mw->size);
500                 return -ENOMEM;
501         }
502
503         /* Notify HW the memory location of the receive buffer */
504         ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
505
506         return 0;
507 }
508
509 static void ntb_qp_link_cleanup(struct work_struct *work)
510 {
511         struct ntb_transport_qp *qp = container_of(work,
512                                                    struct ntb_transport_qp,
513                                                    link_cleanup);
514         struct ntb_transport *nt = qp->transport;
515         struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
516
517         if (qp->qp_link == NTB_LINK_DOWN) {
518                 cancel_delayed_work_sync(&qp->link_work);
519                 return;
520         }
521
522         if (qp->event_handler)
523                 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
524
525         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
526         qp->qp_link = NTB_LINK_DOWN;
527
528         if (nt->transport_link == NTB_LINK_UP)
529                 schedule_delayed_work(&qp->link_work,
530                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
531 }
532
533 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
534 {
535         schedule_work(&qp->link_cleanup);
536 }
537
538 static void ntb_transport_link_cleanup(struct work_struct *work)
539 {
540         struct ntb_transport *nt = container_of(work, struct ntb_transport,
541                                                 link_cleanup);
542         int i;
543
544         if (nt->transport_link == NTB_LINK_DOWN)
545                 cancel_delayed_work_sync(&nt->link_work);
546         else
547                 nt->transport_link = NTB_LINK_DOWN;
548
549         /* Pass along the info to any clients */
550         for (i = 0; i < nt->max_qps; i++)
551                 if (!test_bit(i, &nt->qp_bitmap))
552                         ntb_qp_link_down(&nt->qps[i]);
553
554         /* The scratchpad registers keep the values if the remote side
555          * goes down, blast them now to give them a sane value the next
556          * time they are accessed
557          */
558         for (i = 0; i < MAX_SPAD; i++)
559                 ntb_write_local_spad(nt->ndev, i, 0);
560 }
561
562 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
563 {
564         struct ntb_transport *nt = data;
565
566         switch (event) {
567         case NTB_EVENT_HW_LINK_UP:
568                 schedule_delayed_work(&nt->link_work, 0);
569                 break;
570         case NTB_EVENT_HW_LINK_DOWN:
571                 schedule_work(&nt->link_cleanup);
572                 break;
573         default:
574                 BUG();
575         }
576 }
577
578 static void ntb_transport_link_work(struct work_struct *work)
579 {
580         struct ntb_transport *nt = container_of(work, struct ntb_transport,
581                                                 link_work.work);
582         struct ntb_device *ndev = nt->ndev;
583         struct pci_dev *pdev = ntb_query_pdev(ndev);
584         u32 val;
585         int rc, i;
586
587         /* send the local info */
588         rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
589         if (rc) {
590                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
591                         0, VERSION);
592                 goto out;
593         }
594
595         rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
596         if (rc) {
597                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
598                         (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
599                 goto out;
600         }
601
602         rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
603         if (rc) {
604                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
605                         (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
606                 goto out;
607         }
608
609         rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
610         if (rc) {
611                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
612                         nt->max_qps, NUM_QPS);
613                 goto out;
614         }
615
616         rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
617         if (rc) {
618                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
619                 goto out;
620         }
621
622         rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
623         if (rc) {
624                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
625                         val, QP_LINKS);
626                 goto out;
627         }
628
629         /* Query the remote side for its info */
630         rc = ntb_read_remote_spad(ndev, VERSION, &val);
631         if (rc) {
632                 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
633                 goto out;
634         }
635
636         if (val != NTB_TRANSPORT_VERSION)
637                 goto out;
638         dev_dbg(&pdev->dev, "Remote version = %d\n", val);
639
640         rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
641         if (rc) {
642                 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
643                 goto out;
644         }
645
646         if (val != nt->max_qps)
647                 goto out;
648         dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
649
650         rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
651         if (rc) {
652                 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
653                 goto out;
654         }
655
656         if (!val)
657                 goto out;
658         dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
659
660         rc = ntb_set_mw(nt, 0, val);
661         if (rc)
662                 goto out;
663
664         rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
665         if (rc) {
666                 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
667                 goto out;
668         }
669
670         if (!val)
671                 goto out;
672         dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
673
674         rc = ntb_set_mw(nt, 1, val);
675         if (rc)
676                 goto out;
677
678         nt->transport_link = NTB_LINK_UP;
679
680         for (i = 0; i < nt->max_qps; i++) {
681                 struct ntb_transport_qp *qp = &nt->qps[i];
682
683                 ntb_transport_setup_qp_mw(nt, i);
684
685                 if (qp->client_ready == NTB_LINK_UP)
686                         schedule_delayed_work(&qp->link_work, 0);
687         }
688
689         return;
690
691 out:
692         if (ntb_hw_link_status(ndev))
693                 schedule_delayed_work(&nt->link_work,
694                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
695 }
696
697 static void ntb_qp_link_work(struct work_struct *work)
698 {
699         struct ntb_transport_qp *qp = container_of(work,
700                                                    struct ntb_transport_qp,
701                                                    link_work.work);
702         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
703         struct ntb_transport *nt = qp->transport;
704         int rc, val;
705
706         WARN_ON(nt->transport_link != NTB_LINK_UP);
707
708         rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
709         if (rc) {
710                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
711                 return;
712         }
713
714         rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
715         if (rc)
716                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
717                         val | 1 << qp->qp_num, QP_LINKS);
718
719         /* query remote spad for qp ready bits */
720         rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
721         if (rc)
722                 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
723
724         dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
725
726         /* See if the remote side is up */
727         if (1 << qp->qp_num & val) {
728                 qp->qp_link = NTB_LINK_UP;
729
730                 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
731                 if (qp->event_handler)
732                         qp->event_handler(qp->cb_data, NTB_LINK_UP);
733         } else if (nt->transport_link == NTB_LINK_UP)
734                 schedule_delayed_work(&qp->link_work,
735                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
736 }
737
738 static void ntb_transport_init_queue(struct ntb_transport *nt,
739                                      unsigned int qp_num)
740 {
741         struct ntb_transport_qp *qp;
742         unsigned int num_qps_mw, tx_size;
743         u8 mw_num = QP_TO_MW(qp_num);
744
745         qp = &nt->qps[qp_num];
746         qp->qp_num = qp_num;
747         qp->transport = nt;
748         qp->ndev = nt->ndev;
749         qp->qp_link = NTB_LINK_DOWN;
750         qp->client_ready = NTB_LINK_DOWN;
751         qp->event_handler = NULL;
752
753         if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
754                 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
755         else
756                 num_qps_mw = nt->max_qps / NTB_NUM_MW;
757
758         tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
759         qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
760                           (qp_num / NTB_NUM_MW * tx_size);
761         qp->tx_mw_end = qp->tx_mw_begin + tx_size;
762         qp->tx_offset = qp->tx_mw_begin;
763         qp->tx_max_frame = min(transport_mtu, tx_size);
764
765         if (nt->debugfs_dir) {
766                 char debugfs_name[4];
767
768                 snprintf(debugfs_name, 4, "qp%d", qp_num);
769                 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
770                                                      nt->debugfs_dir);
771
772                 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
773                                                         qp->debugfs_dir, qp,
774                                                         &ntb_qp_debugfs_stats);
775         }
776
777         INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
778         INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
779
780         spin_lock_init(&qp->ntb_rx_pend_q_lock);
781         spin_lock_init(&qp->ntb_rx_free_q_lock);
782         spin_lock_init(&qp->ntb_tx_free_q_lock);
783
784         INIT_LIST_HEAD(&qp->rx_pend_q);
785         INIT_LIST_HEAD(&qp->rx_free_q);
786         INIT_LIST_HEAD(&qp->tx_free_q);
787 }
788
789 int ntb_transport_init(struct pci_dev *pdev)
790 {
791         struct ntb_transport *nt;
792         int rc, i;
793
794         nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
795         if (!nt)
796                 return -ENOMEM;
797
798         if (debugfs_initialized())
799                 nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
800         else
801                 nt->debugfs_dir = NULL;
802
803         nt->ndev = ntb_register_transport(pdev, nt);
804         if (!nt->ndev) {
805                 rc = -EIO;
806                 goto err;
807         }
808
809         nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
810
811         nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
812                           GFP_KERNEL);
813         if (!nt->qps) {
814                 rc = -ENOMEM;
815                 goto err1;
816         }
817
818         nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
819
820         for (i = 0; i < nt->max_qps; i++)
821                 ntb_transport_init_queue(nt, i);
822
823         INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
824         INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
825
826         rc = ntb_register_event_callback(nt->ndev,
827                                          ntb_transport_event_callback);
828         if (rc)
829                 goto err2;
830
831         INIT_LIST_HEAD(&nt->client_devs);
832         rc = ntb_bus_init(nt);
833         if (rc)
834                 goto err3;
835
836         if (ntb_hw_link_status(nt->ndev))
837                 schedule_delayed_work(&nt->link_work, 0);
838
839         return 0;
840
841 err3:
842         ntb_unregister_event_callback(nt->ndev);
843 err2:
844         kfree(nt->qps);
845 err1:
846         ntb_unregister_transport(nt->ndev);
847 err:
848         debugfs_remove_recursive(nt->debugfs_dir);
849         kfree(nt);
850         return rc;
851 }
852
853 void ntb_transport_free(void *transport)
854 {
855         struct ntb_transport *nt = transport;
856         struct pci_dev *pdev;
857         int i;
858
859         nt->transport_link = NTB_LINK_DOWN;
860
861         /* verify that all the qp's are freed */
862         for (i = 0; i < nt->max_qps; i++)
863                 if (!test_bit(i, &nt->qp_bitmap))
864                         ntb_transport_free_queue(&nt->qps[i]);
865
866         ntb_bus_remove(nt);
867
868         cancel_delayed_work_sync(&nt->link_work);
869
870         debugfs_remove_recursive(nt->debugfs_dir);
871
872         ntb_unregister_event_callback(nt->ndev);
873
874         pdev = ntb_query_pdev(nt->ndev);
875
876         for (i = 0; i < NTB_NUM_MW; i++)
877                 if (nt->mw[i].virt_addr)
878                         dma_free_coherent(&pdev->dev, nt->mw[i].size,
879                                           nt->mw[i].virt_addr,
880                                           nt->mw[i].dma_addr);
881
882         kfree(nt->qps);
883         ntb_unregister_transport(nt->ndev);
884         kfree(nt);
885 }
886
887 static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
888                              struct ntb_queue_entry *entry, void *offset)
889 {
890
891         struct ntb_payload_header *hdr;
892
893         BUG_ON(offset < qp->rx_buff_begin ||
894                offset + qp->rx_max_frame >= qp->rx_buff_end);
895
896         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
897         entry->len = hdr->len;
898
899         memcpy(entry->buf, offset, entry->len);
900
901         /* Ensure that the data is fully copied out before clearing the flag */
902         wmb();
903         hdr->flags = 0;
904
905         if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
906                 qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
907
908         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
909 }
910
911 static int ntb_process_rxc(struct ntb_transport_qp *qp)
912 {
913         struct ntb_payload_header *hdr;
914         struct ntb_queue_entry *entry;
915         void *offset;
916
917         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
918         if (!entry) {
919                 hdr = offset + qp->rx_max_frame -
920                       sizeof(struct ntb_payload_header);
921                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
922                         "no buffer - HDR ver %llu, len %d, flags %x\n",
923                         hdr->ver, hdr->len, hdr->flags);
924                 qp->rx_err_no_buf++;
925                 return -ENOMEM;
926         }
927
928         offset = qp->rx_offset;
929         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
930
931         if (!(hdr->flags & DESC_DONE_FLAG)) {
932                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
933                                   &qp->rx_pend_q);
934                 qp->rx_ring_empty++;
935                 return -EAGAIN;
936         }
937
938         if (hdr->ver != qp->rx_pkts) {
939                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
940                         "qp %d: version mismatch, expected %llu - got %llu\n",
941                         qp->qp_num, qp->rx_pkts, hdr->ver);
942                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
943                                   &qp->rx_pend_q);
944                 qp->rx_err_ver++;
945                 return -EIO;
946         }
947
948         if (hdr->flags & LINK_DOWN_FLAG) {
949                 ntb_qp_link_down(qp);
950
951                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
952                                   &qp->rx_pend_q);
953
954                 /* Ensure that the data is fully copied out before clearing the
955                  * done flag
956                  */
957                 wmb();
958                 hdr->flags = 0;
959                 goto out;
960         }
961
962         dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
963                 "rx offset %p, ver %llu - %d payload received, buf size %d\n",
964                 qp->rx_offset, hdr->ver, hdr->len, entry->len);
965
966         if (hdr->len <= entry->len)
967                 ntb_rx_copy_task(qp, entry, offset);
968         else {
969                 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
970                                   &qp->rx_pend_q);
971
972                 /* Ensure that the data is fully copied out before clearing the
973                  * done flag
974                  */
975                 wmb();
976                 hdr->flags = 0;
977                 qp->rx_err_oflow++;
978                 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
979                         "RX overflow! Wanted %d got %d\n",
980                         hdr->len, entry->len);
981         }
982
983         qp->rx_bytes += hdr->len;
984         qp->rx_pkts++;
985
986 out:
987         qp->rx_offset += qp->rx_max_frame;
988         if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
989                 qp->rx_offset = qp->rx_buff_begin;
990
991         return 0;
992 }
993
994 static void ntb_transport_rx(unsigned long data)
995 {
996         struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
997         int rc;
998
999         do {
1000                 rc = ntb_process_rxc(qp);
1001         } while (!rc);
1002 }
1003
1004 static void ntb_transport_rxc_db(void *data, int db_num)
1005 {
1006         struct ntb_transport_qp *qp = data;
1007
1008         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1009                 __func__, db_num);
1010
1011         tasklet_schedule(&qp->rx_work);
1012 }
1013
1014 static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
1015                              struct ntb_queue_entry *entry,
1016                              void *offset)
1017 {
1018         struct ntb_payload_header *hdr;
1019
1020         BUG_ON(offset < qp->tx_mw_begin ||
1021                offset + qp->tx_max_frame >= qp->tx_mw_end);
1022
1023         memcpy_toio(offset, entry->buf, entry->len);
1024
1025         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1026         hdr->len = entry->len;
1027         hdr->ver = qp->tx_pkts;
1028
1029         /* Ensure that the data is fully copied out before setting the flag */
1030         wmb();
1031         hdr->flags = entry->flags | DESC_DONE_FLAG;
1032
1033         ntb_ring_sdb(qp->ndev, qp->qp_num);
1034
1035         /* The entry length can only be zero if the packet is intended to be a
1036          * "link down" or similar.  Since no payload is being sent in these
1037          * cases, there is nothing to add to the completion queue.
1038          */
1039         if (entry->len > 0) {
1040                 qp->tx_bytes += entry->len;
1041
1042                 if (qp->tx_handler)
1043                         qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1044                                        entry->len);
1045         }
1046
1047         ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1048 }
1049
1050 static int ntb_process_tx(struct ntb_transport_qp *qp,
1051                           struct ntb_queue_entry *entry)
1052 {
1053         struct ntb_payload_header *hdr;
1054         void *offset;
1055
1056         offset = qp->tx_offset;
1057         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1058
1059         dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
1060                  qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
1061                  entry->buf);
1062         if (hdr->flags) {
1063                 qp->tx_ring_full++;
1064                 return -EAGAIN;
1065         }
1066
1067         if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1068                 if (qp->tx_handler)
1069                         qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1070
1071                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1072                              &qp->tx_free_q);
1073                 return 0;
1074         }
1075
1076         ntb_tx_copy_task(qp, entry, offset);
1077
1078         qp->tx_offset += qp->tx_max_frame;
1079         if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
1080                 qp->tx_offset = qp->tx_mw_begin;
1081
1082         qp->tx_pkts++;
1083
1084         return 0;
1085 }
1086
1087 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1088 {
1089         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1090         struct ntb_queue_entry *entry;
1091         int i, rc;
1092
1093         if (qp->qp_link == NTB_LINK_DOWN)
1094                 return;
1095
1096         qp->qp_link = NTB_LINK_DOWN;
1097         dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1098
1099         for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1100                 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock,
1101                                          &qp->tx_free_q);
1102                 if (entry)
1103                         break;
1104                 msleep(100);
1105         }
1106
1107         if (!entry)
1108                 return;
1109
1110         entry->cb_data = NULL;
1111         entry->buf = NULL;
1112         entry->len = 0;
1113         entry->flags = LINK_DOWN_FLAG;
1114
1115         rc = ntb_process_tx(qp, entry);
1116         if (rc)
1117                 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1118                         qp->qp_num);
1119 }
1120
1121 /**
1122  * ntb_transport_create_queue - Create a new NTB transport layer queue
1123  * @rx_handler: receive callback function
1124  * @tx_handler: transmit callback function
1125  * @event_handler: event callback function
1126  *
1127  * Create a new NTB transport layer queue and provide the queue with a callback
1128  * routine for both transmit and receive.  The receive callback routine will be
1129  * used to pass up data when the transport has received it on the queue.   The
1130  * transmit callback routine will be called when the transport has completed the
1131  * transmission of the data on the queue and the data is ready to be freed.
1132  *
1133  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1134  */
1135 struct ntb_transport_qp *
1136 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1137                            const struct ntb_queue_handlers *handlers)
1138 {
1139         struct ntb_queue_entry *entry;
1140         struct ntb_transport_qp *qp;
1141         struct ntb_transport *nt;
1142         unsigned int free_queue;
1143         int rc, i;
1144
1145         nt = ntb_find_transport(pdev);
1146         if (!nt)
1147                 goto err;
1148
1149         free_queue = ffs(nt->qp_bitmap);
1150         if (!free_queue)
1151                 goto err;
1152
1153         /* decrement free_queue to make it zero based */
1154         free_queue--;
1155
1156         clear_bit(free_queue, &nt->qp_bitmap);
1157
1158         qp = &nt->qps[free_queue];
1159         qp->cb_data = data;
1160         qp->rx_handler = handlers->rx_handler;
1161         qp->tx_handler = handlers->tx_handler;
1162         qp->event_handler = handlers->event_handler;
1163
1164         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1165                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1166                 if (!entry)
1167                         goto err1;
1168
1169                 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1170                                   &qp->rx_free_q);
1171         }
1172
1173         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1174                 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1175                 if (!entry)
1176                         goto err2;
1177
1178                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1179                                   &qp->tx_free_q);
1180         }
1181
1182         tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1183
1184         rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1185                                       ntb_transport_rxc_db);
1186         if (rc)
1187                 goto err3;
1188
1189         dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1190
1191         return qp;
1192
1193 err3:
1194         tasklet_disable(&qp->rx_work);
1195 err2:
1196         while ((entry =
1197                 ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1198                 kfree(entry);
1199 err1:
1200         while ((entry =
1201                 ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1202                 kfree(entry);
1203         set_bit(free_queue, &nt->qp_bitmap);
1204 err:
1205         return NULL;
1206 }
1207 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1208
1209 /**
1210  * ntb_transport_free_queue - Frees NTB transport queue
1211  * @qp: NTB queue to be freed
1212  *
1213  * Frees NTB transport queue
1214  */
1215 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1216 {
1217         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1218         struct ntb_queue_entry *entry;
1219
1220         if (!qp)
1221                 return;
1222
1223         cancel_delayed_work_sync(&qp->link_work);
1224
1225         ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1226         tasklet_disable(&qp->rx_work);
1227
1228         while ((entry =
1229                 ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1230                 kfree(entry);
1231
1232         while ((entry =
1233                 ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1234                 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1235                 kfree(entry);
1236         }
1237
1238         while ((entry =
1239                 ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1240                 kfree(entry);
1241
1242         set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1243
1244         dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1245 }
1246 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1247
1248 /**
1249  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1250  * @qp: NTB queue to be freed
1251  * @len: pointer to variable to write enqueued buffers length
1252  *
1253  * Dequeues unused buffers from receive queue.  Should only be used during
1254  * shutdown of qp.
1255  *
1256  * RETURNS: NULL error value on error, or void* for success.
1257  */
1258 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1259 {
1260         struct ntb_queue_entry *entry;
1261         void *buf;
1262
1263         if (!qp || qp->client_ready == NTB_LINK_UP)
1264                 return NULL;
1265
1266         entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1267         if (!entry)
1268                 return NULL;
1269
1270         buf = entry->cb_data;
1271         *len = entry->len;
1272
1273         ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1274                           &qp->rx_free_q);
1275
1276         return buf;
1277 }
1278 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1279
1280 /**
1281  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1282  * @qp: NTB transport layer queue the entry is to be enqueued on
1283  * @cb: per buffer pointer for callback function to use
1284  * @data: pointer to data buffer that incoming packets will be copied into
1285  * @len: length of the data buffer
1286  *
1287  * Enqueue a new receive buffer onto the transport queue into which a NTB
1288  * payload can be received into.
1289  *
1290  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1291  */
1292 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1293                              unsigned int len)
1294 {
1295         struct ntb_queue_entry *entry;
1296
1297         if (!qp)
1298                 return -EINVAL;
1299
1300         entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1301         if (!entry)
1302                 return -ENOMEM;
1303
1304         entry->cb_data = cb;
1305         entry->buf = data;
1306         entry->len = len;
1307
1308         ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1309                           &qp->rx_pend_q);
1310
1311         return 0;
1312 }
1313 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1314
1315 /**
1316  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1317  * @qp: NTB transport layer queue the entry is to be enqueued on
1318  * @cb: per buffer pointer for callback function to use
1319  * @data: pointer to data buffer that will be sent
1320  * @len: length of the data buffer
1321  *
1322  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1323  * payload will be transmitted.  This assumes that a lock is behing held to
1324  * serialize access to the qp.
1325  *
1326  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1327  */
1328 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1329                              unsigned int len)
1330 {
1331         struct ntb_queue_entry *entry;
1332         int rc;
1333
1334         if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1335                 return -EINVAL;
1336
1337         entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1338         if (!entry)
1339                 return -ENOMEM;
1340
1341         entry->cb_data = cb;
1342         entry->buf = data;
1343         entry->len = len;
1344         entry->flags = 0;
1345
1346         rc = ntb_process_tx(qp, entry);
1347         if (rc)
1348                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1349                              &qp->tx_free_q);
1350
1351         return rc;
1352 }
1353 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1354
1355 /**
1356  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1357  * @qp: NTB transport layer queue to be enabled
1358  *
1359  * Notify NTB transport layer of client readiness to use queue
1360  */
1361 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1362 {
1363         if (!qp)
1364                 return;
1365
1366         qp->client_ready = NTB_LINK_UP;
1367
1368         if (qp->transport->transport_link == NTB_LINK_UP)
1369                 schedule_delayed_work(&qp->link_work, 0);
1370 }
1371 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1372
1373 /**
1374  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1375  * @qp: NTB transport layer queue to be disabled
1376  *
1377  * Notify NTB transport layer of client's desire to no longer receive data on
1378  * transport queue specified.  It is the client's responsibility to ensure all
1379  * entries on queue are purged or otherwise handled appropraitely.
1380  */
1381 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1382 {
1383         struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1384         int rc, val;
1385
1386         if (!qp)
1387                 return;
1388
1389         qp->client_ready = NTB_LINK_DOWN;
1390
1391         rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1392         if (rc) {
1393                 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1394                 return;
1395         }
1396
1397         rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1398                                    val & ~(1 << qp->qp_num));
1399         if (rc)
1400                 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1401                         val & ~(1 << qp->qp_num), QP_LINKS);
1402
1403         if (qp->qp_link == NTB_LINK_UP)
1404                 ntb_send_link_down(qp);
1405         else
1406                 cancel_delayed_work_sync(&qp->link_work);
1407 }
1408 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1409
1410 /**
1411  * ntb_transport_link_query - Query transport link state
1412  * @qp: NTB transport layer queue to be queried
1413  *
1414  * Query connectivity to the remote system of the NTB transport queue
1415  *
1416  * RETURNS: true for link up or false for link down
1417  */
1418 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1419 {
1420         return qp->qp_link == NTB_LINK_UP;
1421 }
1422 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1423
1424 /**
1425  * ntb_transport_qp_num - Query the qp number
1426  * @qp: NTB transport layer queue to be queried
1427  *
1428  * Query qp number of the NTB transport queue
1429  *
1430  * RETURNS: a zero based number specifying the qp number
1431  */
1432 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1433 {
1434         return qp->qp_num;
1435 }
1436 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1437
1438 /**
1439  * ntb_transport_max_size - Query the max payload size of a qp
1440  * @qp: NTB transport layer queue to be queried
1441  *
1442  * Query the maximum payload size permissible on the given qp
1443  *
1444  * RETURNS: the max payload size of a qp
1445  */
1446 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1447 {
1448         return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1449 }
1450 EXPORT_SYMBOL_GPL(ntb_transport_max_size);