1 // SPDX-License-Identifier: GPL-2.0-only
3 * vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
5 * Copyright (c) 2021, Alibaba Inc. All rights reserved.
6 * Author: Wu Zongyong <wuzongyong@linux.alibaba.com>
10 #include "linux/bits.h"
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/vdpa.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_ring.h>
18 #include <linux/virtio_pci.h>
19 #include <linux/virtio_pci_legacy.h>
20 #include <uapi/linux/virtio_net.h>
22 #define ENI_MSIX_NAME_SIZE 256
24 #define ENI_ERR(pdev, fmt, ...) \
25 dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
26 #define ENI_DBG(pdev, fmt, ...) \
27 dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
28 #define ENI_INFO(pdev, fmt, ...) \
29 dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
33 char msix_name[ENI_MSIX_NAME_SIZE];
34 struct vdpa_callback cb;
39 struct vdpa_device vdpa;
40 struct virtio_pci_legacy_device ldev;
41 struct eni_vring *vring;
42 struct vdpa_callback config_cb;
43 char msix_name[ENI_MSIX_NAME_SIZE];
49 static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
51 return container_of(vdpa, struct eni_vdpa, vdpa);
54 static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
56 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
58 return &eni_vdpa->ldev;
61 static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
63 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
64 u64 features = vp_legacy_get_features(ldev);
66 features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
67 features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
72 static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
74 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
76 if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
77 ENI_ERR(ldev->pci_dev,
78 "VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
82 vp_legacy_set_features(ldev, (u32)features);
87 static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
89 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
91 return vp_legacy_get_driver_features(ldev);
94 static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
96 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
98 return vp_legacy_get_status(ldev);
101 static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
103 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
104 int irq = eni_vdpa->vring[idx].irq;
106 if (irq == VIRTIO_MSI_NO_VECTOR)
112 static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
114 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
115 struct pci_dev *pdev = ldev->pci_dev;
118 for (i = 0; i < eni_vdpa->queues; i++) {
119 if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
120 vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
121 devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
122 &eni_vdpa->vring[i]);
123 eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
127 if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
128 vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
129 devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
130 eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
133 if (eni_vdpa->vectors) {
134 pci_free_irq_vectors(pdev);
135 eni_vdpa->vectors = 0;
139 static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
141 struct eni_vring *vring = arg;
143 if (vring->cb.callback)
144 return vring->cb.callback(vring->cb.private);
149 static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
151 struct eni_vdpa *eni_vdpa = arg;
153 if (eni_vdpa->config_cb.callback)
154 return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
159 static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
161 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
162 struct pci_dev *pdev = ldev->pci_dev;
164 int queues = eni_vdpa->queues;
165 int vectors = queues + 1;
167 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
168 if (ret != vectors) {
170 "failed to allocate irq vectors want %d but %d\n",
175 eni_vdpa->vectors = vectors;
177 for (i = 0; i < queues; i++) {
178 snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
179 "eni-vdpa[%s]-%d\n", pci_name(pdev), i);
180 irq = pci_irq_vector(pdev, i);
181 ret = devm_request_irq(&pdev->dev, irq,
183 0, eni_vdpa->vring[i].msix_name,
184 &eni_vdpa->vring[i]);
186 ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
189 vp_legacy_queue_vector(ldev, i, i);
190 eni_vdpa->vring[i].irq = irq;
193 snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
195 irq = pci_irq_vector(pdev, queues);
196 ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
197 eni_vdpa->msix_name, eni_vdpa);
199 ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
202 vp_legacy_config_vector(ldev, queues);
203 eni_vdpa->config_irq = irq;
207 eni_vdpa_free_irq(eni_vdpa);
211 static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
213 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
214 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
215 u8 s = eni_vdpa_get_status(vdpa);
217 if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
218 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
219 eni_vdpa_request_irq(eni_vdpa);
222 vp_legacy_set_status(ldev, status);
224 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
225 (s & VIRTIO_CONFIG_S_DRIVER_OK))
226 eni_vdpa_free_irq(eni_vdpa);
229 static int eni_vdpa_reset(struct vdpa_device *vdpa)
231 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
232 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
233 u8 s = eni_vdpa_get_status(vdpa);
235 vp_legacy_set_status(ldev, 0);
237 if (s & VIRTIO_CONFIG_S_DRIVER_OK)
238 eni_vdpa_free_irq(eni_vdpa);
243 static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
245 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
247 return vp_legacy_get_queue_size(ldev, 0);
250 static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
252 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
254 return vp_legacy_get_queue_size(ldev, 0);
257 static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
258 struct vdpa_vq_state *state)
263 static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
264 const struct vdpa_vq_state *state)
266 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
267 const struct vdpa_vq_state_split *split = &state->split;
269 /* ENI is build upon virtio-pci specfication which not support
270 * to set state of virtqueue. But if the state is equal to the
271 * device initial state by chance, we can let it go.
273 if (!vp_legacy_get_queue_enable(ldev, qid)
274 && split->avail_index == 0)
281 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
282 struct vdpa_callback *cb)
284 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
286 eni_vdpa->vring[qid].cb = *cb;
289 static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
292 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
294 /* ENI is a legacy virtio-pci device. This is not supported
295 * by specification. But we can disable virtqueue by setting
299 vp_legacy_set_queue_address(ldev, qid, 0);
302 static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
304 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
306 return vp_legacy_get_queue_enable(ldev, qid);
309 static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
312 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
313 struct pci_dev *pdev = ldev->pci_dev;
314 u16 n = vp_legacy_get_queue_size(ldev, qid);
316 /* ENI is a legacy virtio-pci device which not allow to change
317 * virtqueue size. Just report a error if someone tries to
322 "not support to set vq %u fixed num %u to %u\n",
326 static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
327 u64 desc_area, u64 driver_area,
330 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
331 u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
333 vp_legacy_set_queue_address(ldev, qid, pfn);
338 static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
340 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
342 iowrite16(qid, eni_vdpa->vring[qid].notify);
345 static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
347 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
349 return ldev->id.device;
352 static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
354 struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
356 return ldev->id.vendor;
359 static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
361 return VIRTIO_PCI_VRING_ALIGN;
364 static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
366 return sizeof(struct virtio_net_config);
370 static void eni_vdpa_get_config(struct vdpa_device *vdpa,
372 void *buf, unsigned int len)
374 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
375 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
376 void __iomem *ioaddr = ldev->ioaddr +
377 VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
382 for (i = 0; i < len; i++)
383 *p++ = ioread8(ioaddr + i);
386 static void eni_vdpa_set_config(struct vdpa_device *vdpa,
387 unsigned int offset, const void *buf,
390 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
391 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
392 void __iomem *ioaddr = ldev->ioaddr +
393 VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
398 for (i = 0; i < len; i++)
399 iowrite8(*p++, ioaddr + i);
402 static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
403 struct vdpa_callback *cb)
405 struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
407 eni_vdpa->config_cb = *cb;
410 static const struct vdpa_config_ops eni_vdpa_ops = {
411 .get_device_features = eni_vdpa_get_device_features,
412 .set_driver_features = eni_vdpa_set_driver_features,
413 .get_driver_features = eni_vdpa_get_driver_features,
414 .get_status = eni_vdpa_get_status,
415 .set_status = eni_vdpa_set_status,
416 .reset = eni_vdpa_reset,
417 .get_vq_num_max = eni_vdpa_get_vq_num_max,
418 .get_vq_num_min = eni_vdpa_get_vq_num_min,
419 .get_vq_state = eni_vdpa_get_vq_state,
420 .set_vq_state = eni_vdpa_set_vq_state,
421 .set_vq_cb = eni_vdpa_set_vq_cb,
422 .set_vq_ready = eni_vdpa_set_vq_ready,
423 .get_vq_ready = eni_vdpa_get_vq_ready,
424 .set_vq_num = eni_vdpa_set_vq_num,
425 .set_vq_address = eni_vdpa_set_vq_address,
426 .kick_vq = eni_vdpa_kick_vq,
427 .get_device_id = eni_vdpa_get_device_id,
428 .get_vendor_id = eni_vdpa_get_vendor_id,
429 .get_vq_align = eni_vdpa_get_vq_align,
430 .get_config_size = eni_vdpa_get_config_size,
431 .get_config = eni_vdpa_get_config,
432 .set_config = eni_vdpa_set_config,
433 .set_config_cb = eni_vdpa_set_config_cb,
434 .get_vq_irq = eni_vdpa_get_vq_irq,
438 static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
440 struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
441 u32 features = vp_legacy_get_features(ldev);
444 if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
445 __virtio16 max_virtqueue_pairs;
447 eni_vdpa_get_config(&eni_vdpa->vdpa,
448 offsetof(struct virtio_net_config, max_virtqueue_pairs),
449 &max_virtqueue_pairs,
450 sizeof(max_virtqueue_pairs));
451 num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
452 max_virtqueue_pairs);
455 if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
461 static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
463 struct device *dev = &pdev->dev;
464 struct eni_vdpa *eni_vdpa;
465 struct virtio_pci_legacy_device *ldev;
468 ret = pcim_enable_device(pdev);
472 eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
473 dev, &eni_vdpa_ops, 1, 1, NULL, false);
474 if (IS_ERR(eni_vdpa)) {
475 ENI_ERR(pdev, "failed to allocate vDPA structure\n");
476 return PTR_ERR(eni_vdpa);
479 ldev = &eni_vdpa->ldev;
480 ldev->pci_dev = pdev;
482 ret = vp_legacy_probe(ldev);
484 ENI_ERR(pdev, "failed to probe legacy PCI device\n");
488 pci_set_master(pdev);
489 pci_set_drvdata(pdev, eni_vdpa);
491 eni_vdpa->vdpa.dma_dev = &pdev->dev;
492 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
494 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
495 sizeof(*eni_vdpa->vring),
497 if (!eni_vdpa->vring) {
499 ENI_ERR(pdev, "failed to allocate virtqueues\n");
503 for (i = 0; i < eni_vdpa->queues; i++) {
504 eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
505 eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
507 eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
509 ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
511 ENI_ERR(pdev, "failed to register to vdpa bus\n");
518 put_device(&eni_vdpa->vdpa.dev);
522 static void eni_vdpa_remove(struct pci_dev *pdev)
524 struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
526 vdpa_unregister_device(&eni_vdpa->vdpa);
527 vp_legacy_remove(&eni_vdpa->ldev);
530 static struct pci_device_id eni_pci_ids[] = {
531 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
533 PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
538 static struct pci_driver eni_vdpa_driver = {
539 .name = "alibaba-eni-vdpa",
540 .id_table = eni_pci_ids,
541 .probe = eni_vdpa_probe,
542 .remove = eni_vdpa_remove,
545 module_pci_driver(eni_vdpa_driver);
547 MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
548 MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
549 MODULE_LICENSE("GPL v2");