1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Intel Corporation
4 * Author: Johannes Berg <johannes@sipsolutions.net>
6 #include <linux/module.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_config.h>
10 #include <linux/logic_iomem.h>
11 #include <linux/irqdomain.h>
12 #include <linux/virtio_pcidev.h>
13 #include <linux/virtio-uml.h>
14 #include <linux/delay.h>
15 #include <linux/msi.h>
16 #include <asm/unaligned.h>
20 #define MAX_MSI_VECTORS 32
21 #define CFG_SPACE_SIZE 4096
23 /* for MSI-X we have a 32-bit payload */
24 #define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
25 #define NUM_IRQ_MSGS 10
27 #define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1))
28 #define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1)
30 struct um_pci_device {
31 struct virtio_device *vdev;
33 /* for now just standard BARs */
34 u8 resptr[PCI_STD_NUM_BARS];
36 struct virtqueue *cmd_vq, *irq_vq;
38 #define UM_PCI_STAT_WAITING 0
44 struct um_pci_device_reg {
45 struct um_pci_device *dev;
49 static struct pci_host_bridge *bridge;
50 static DEFINE_MUTEX(um_pci_mtx);
51 static struct um_pci_device_reg um_pci_devices[MAX_DEVICES];
52 static struct fwnode_handle *um_pci_fwnode;
53 static struct irq_domain *um_pci_inner_domain;
54 static struct irq_domain *um_pci_msi_domain;
55 static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
57 #define UM_VIRT_PCI_MAXDELAY 40000
59 struct um_pci_message_buffer {
60 struct virtio_pcidev_msg hdr;
64 static struct um_pci_message_buffer __percpu *um_pci_msg_bufs;
66 static int um_pci_send_cmd(struct um_pci_device *dev,
67 struct virtio_pcidev_msg *cmd,
68 unsigned int cmd_size,
69 const void *extra, unsigned int extra_size,
70 void *out, unsigned int out_size)
72 struct scatterlist out_sg, extra_sg, in_sg;
73 struct scatterlist *sgs_list[] = {
75 [1] = extra ? &extra_sg : &in_sg,
76 [2] = extra ? &in_sg : NULL,
78 struct um_pci_message_buffer *buf;
83 if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
87 case VIRTIO_PCIDEV_OP_CFG_WRITE:
88 case VIRTIO_PCIDEV_OP_MMIO_WRITE:
89 case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
90 /* in PCI, writes are posted, so don't wait */
99 buf = get_cpu_var(um_pci_msg_bufs);
100 memcpy(buf, cmd, cmd_size);
103 u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
106 memcpy(ncmd, cmd, cmd_size);
108 memcpy(ncmd + cmd_size, extra, extra_size);
110 cmd_size += extra_size;
114 /* try without allocating memory */
122 sg_init_one(&out_sg, cmd, cmd_size);
124 sg_init_one(&extra_sg, extra, extra_size);
126 sg_init_one(&in_sg, out, out_size);
128 /* add to internal virtio queue */
129 ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
132 posted ? cmd : HANDLE_NO_FREE(cmd),
138 virtqueue_kick(dev->cmd_vq);
143 /* kick and poll for getting a response on the queue */
144 set_bit(UM_PCI_STAT_WAITING, &dev->status);
145 virtqueue_kick(dev->cmd_vq);
148 void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
150 if (completed == HANDLE_NO_FREE(cmd))
153 if (completed && !HANDLE_IS_NO_FREE(completed))
156 if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
157 ++delay_count > UM_VIRT_PCI_MAXDELAY,
158 "um virt-pci delay: %d", delay_count)) {
164 clear_bit(UM_PCI_STAT_WAITING, &dev->status);
167 put_cpu_var(um_pci_msg_bufs);
171 static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
174 struct um_pci_device_reg *reg = priv;
175 struct um_pci_device *dev = reg->dev;
176 struct virtio_pcidev_msg hdr = {
177 .op = VIRTIO_PCIDEV_OP_CFG_READ,
181 /* buf->data is maximum size - we may only use parts of it */
182 struct um_pci_message_buffer *buf;
184 unsigned long ret = ULONG_MAX;
189 buf = get_cpu_var(um_pci_msg_bufs);
192 memset(buf->data, 0xff, sizeof(buf->data));
203 WARN(1, "invalid config space read size %d\n", size);
207 if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8))
215 ret = le16_to_cpup((void *)data);
218 ret = le32_to_cpup((void *)data);
222 ret = le64_to_cpup((void *)data);
230 put_cpu_var(um_pci_msg_bufs);
234 static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
237 struct um_pci_device_reg *reg = priv;
238 struct um_pci_device *dev = reg->dev;
240 struct virtio_pcidev_msg hdr;
241 /* maximum size - we may only use parts of it */
245 .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
256 msg.data[0] = (u8)val;
259 put_unaligned_le16(val, (void *)msg.data);
262 put_unaligned_le32(val, (void *)msg.data);
266 put_unaligned_le64(val, (void *)msg.data);
270 WARN(1, "invalid config space write size %d\n", size);
274 WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
277 static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
278 .read = um_pci_cfgspace_read,
279 .write = um_pci_cfgspace_write,
282 static void um_pci_bar_copy_from(void *priv, void *buffer,
283 unsigned int offset, int size)
286 struct um_pci_device *dev = container_of(resptr - *resptr,
287 struct um_pci_device,
289 struct virtio_pcidev_msg hdr = {
290 .op = VIRTIO_PCIDEV_OP_MMIO_READ,
296 memset(buffer, 0xff, size);
298 um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
301 static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
304 /* buf->data is maximum size - we may only use parts of it */
305 struct um_pci_message_buffer *buf;
307 unsigned long ret = ULONG_MAX;
309 buf = get_cpu_var(um_pci_msg_bufs);
321 WARN(1, "invalid config space read size %d\n", size);
325 um_pci_bar_copy_from(priv, data, offset, size);
332 ret = le16_to_cpup((void *)data);
335 ret = le32_to_cpup((void *)data);
339 ret = le64_to_cpup((void *)data);
347 put_cpu_var(um_pci_msg_bufs);
351 static void um_pci_bar_copy_to(void *priv, unsigned int offset,
352 const void *buffer, int size)
355 struct um_pci_device *dev = container_of(resptr - *resptr,
356 struct um_pci_device,
358 struct virtio_pcidev_msg hdr = {
359 .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
365 um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
368 static void um_pci_bar_write(void *priv, unsigned int offset, int size,
371 /* maximum size - we may only use parts of it */
379 put_unaligned_le16(val, (void *)data);
382 put_unaligned_le32(val, (void *)data);
386 put_unaligned_le64(val, (void *)data);
390 WARN(1, "invalid config space write size %d\n", size);
394 um_pci_bar_copy_to(priv, offset, data, size);
397 static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
400 struct um_pci_device *dev = container_of(resptr - *resptr,
401 struct um_pci_device,
404 struct virtio_pcidev_msg hdr;
408 .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
416 um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
419 static const struct logic_iomem_ops um_pci_device_bar_ops = {
420 .read = um_pci_bar_read,
421 .write = um_pci_bar_write,
422 .set = um_pci_bar_set,
423 .copy_from = um_pci_bar_copy_from,
424 .copy_to = um_pci_bar_copy_to,
427 static void __iomem *um_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
430 struct um_pci_device_reg *dev;
431 unsigned int busn = bus->number;
436 /* not allowing functions for now ... */
440 if (devfn / 8 >= ARRAY_SIZE(um_pci_devices))
443 dev = &um_pci_devices[devfn / 8];
447 return (void __iomem *)((unsigned long)dev->iomem + where);
450 static struct pci_ops um_pci_ops = {
451 .map_bus = um_pci_map_bus,
452 .read = pci_generic_config_read,
453 .write = pci_generic_config_write,
456 static void um_pci_rescan(void)
458 pci_lock_rescan_remove();
459 pci_rescan_bus(bridge->bus);
460 pci_unlock_rescan_remove();
463 static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
465 struct scatterlist sg[1];
467 sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
468 if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
474 static void um_pci_handle_irq_message(struct virtqueue *vq,
475 struct virtio_pcidev_msg *msg)
477 struct virtio_device *vdev = vq->vdev;
478 struct um_pci_device *dev = vdev->priv;
480 /* we should properly chain interrupts, but on ARCH=um we don't care */
483 case VIRTIO_PCIDEV_OP_INT:
484 generic_handle_irq(dev->irq);
486 case VIRTIO_PCIDEV_OP_MSI:
487 /* our MSI message is just the interrupt number */
488 if (msg->size == sizeof(u32))
489 generic_handle_irq(le32_to_cpup((void *)msg->data));
491 generic_handle_irq(le16_to_cpup((void *)msg->data));
493 case VIRTIO_PCIDEV_OP_PME:
494 /* nothing to do - we already woke up due to the message */
497 dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
502 static void um_pci_cmd_vq_cb(struct virtqueue *vq)
504 struct virtio_device *vdev = vq->vdev;
505 struct um_pci_device *dev = vdev->priv;
509 if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
512 while ((cmd = virtqueue_get_buf(vq, &len))) {
513 if (WARN_ON(HANDLE_IS_NO_FREE(cmd)))
519 static void um_pci_irq_vq_cb(struct virtqueue *vq)
521 struct virtio_pcidev_msg *msg;
524 while ((msg = virtqueue_get_buf(vq, &len))) {
525 if (len >= sizeof(*msg))
526 um_pci_handle_irq_message(vq, msg);
528 /* recycle the message buffer */
529 um_pci_irq_vq_addbuf(vq, msg, true);
533 static int um_pci_init_vqs(struct um_pci_device *dev)
535 struct virtqueue *vqs[2];
536 static const char *const names[2] = { "cmd", "irq" };
537 vq_callback_t *cbs[2] = { um_pci_cmd_vq_cb, um_pci_irq_vq_cb };
540 err = virtio_find_vqs(dev->vdev, 2, vqs, cbs, names, NULL);
544 dev->cmd_vq = vqs[0];
545 dev->irq_vq = vqs[1];
547 virtio_device_ready(dev->vdev);
549 for (i = 0; i < NUM_IRQ_MSGS; i++) {
550 void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
553 um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
556 virtqueue_kick(dev->irq_vq);
561 static int um_pci_virtio_probe(struct virtio_device *vdev)
563 struct um_pci_device *dev;
567 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
574 mutex_lock(&um_pci_mtx);
575 for (i = 0; i < MAX_DEVICES; i++) {
576 if (um_pci_devices[i].dev)
585 err = um_pci_init_vqs(dev);
589 dev->irq = irq_alloc_desc(numa_node_id());
594 um_pci_devices[free].dev = dev;
597 mutex_unlock(&um_pci_mtx);
599 device_set_wakeup_enable(&vdev->dev, true);
602 * In order to do suspend-resume properly, don't allow VQs
605 virtio_uml_set_no_vq_suspend(vdev, true);
610 virtio_reset_device(vdev);
611 vdev->config->del_vqs(vdev);
613 mutex_unlock(&um_pci_mtx);
618 static void um_pci_virtio_remove(struct virtio_device *vdev)
620 struct um_pci_device *dev = vdev->priv;
623 /* Stop all virtqueues */
624 virtio_reset_device(vdev);
625 vdev->config->del_vqs(vdev);
627 device_set_wakeup_enable(&vdev->dev, false);
629 mutex_lock(&um_pci_mtx);
630 for (i = 0; i < MAX_DEVICES; i++) {
631 if (um_pci_devices[i].dev != dev)
633 um_pci_devices[i].dev = NULL;
634 irq_free_desc(dev->irq);
636 mutex_unlock(&um_pci_mtx);
643 static struct virtio_device_id id_table[] = {
644 { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
647 MODULE_DEVICE_TABLE(virtio, id_table);
649 static struct virtio_driver um_pci_virtio_driver = {
650 .driver.name = "virtio-pci",
651 .driver.owner = THIS_MODULE,
652 .id_table = id_table,
653 .probe = um_pci_virtio_probe,
654 .remove = um_pci_virtio_remove,
657 static struct resource virt_cfgspace_resource = {
658 .name = "PCI config space",
659 .start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
660 .end = 0xf0000000 - 1,
661 .flags = IORESOURCE_MEM,
664 static long um_pci_map_cfgspace(unsigned long offset, size_t size,
665 const struct logic_iomem_ops **ops,
668 if (WARN_ON(size > CFG_SPACE_SIZE || offset % CFG_SPACE_SIZE))
671 if (offset / CFG_SPACE_SIZE < MAX_DEVICES) {
672 *ops = &um_pci_device_cfgspace_ops;
673 *priv = &um_pci_devices[offset / CFG_SPACE_SIZE];
677 WARN(1, "cannot map offset 0x%lx/0x%zx\n", offset, size);
681 static const struct logic_iomem_region_ops um_pci_cfgspace_ops = {
682 .map = um_pci_map_cfgspace,
685 static struct resource virt_iomem_resource = {
689 .flags = IORESOURCE_MEM,
692 struct um_pci_map_iomem_data {
693 unsigned long offset;
695 const struct logic_iomem_ops **ops;
700 static int um_pci_map_iomem_walk(struct pci_dev *pdev, void *_data)
702 struct um_pci_map_iomem_data *data = _data;
703 struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
704 struct um_pci_device *dev;
710 for (i = 0; i < ARRAY_SIZE(dev->resptr); i++) {
711 struct resource *r = &pdev->resource[i];
713 if ((r->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)
717 * must be the whole or part of the resource,
718 * not allowed to only overlap
720 if (data->offset < r->start || data->offset > r->end)
722 if (data->offset + data->size - 1 > r->end)
726 *data->ops = &um_pci_device_bar_ops;
728 *data->priv = &dev->resptr[i];
729 data->ret = data->offset - r->start;
731 /* no need to continue */
738 static long um_pci_map_iomem(unsigned long offset, size_t size,
739 const struct logic_iomem_ops **ops,
742 struct um_pci_map_iomem_data data = {
743 /* we want the full address here */
744 .offset = offset + virt_iomem_resource.start,
751 pci_walk_bus(bridge->bus, um_pci_map_iomem_walk, &data);
755 static const struct logic_iomem_region_ops um_pci_iomem_ops = {
756 .map = um_pci_map_iomem,
759 static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
762 * This is a very low address and not actually valid 'physical' memory
763 * in UML, so we can simply map MSI(-X) vectors to there, it cannot be
764 * legitimately written to by the device in any other way.
765 * We use the (virtual) IRQ number here as the message to simplify the
766 * code that receives the message, where for now we simply trust the
767 * device to send the correct message.
770 msg->address_lo = 0xa0000;
771 msg->data = data->irq;
774 static struct irq_chip um_pci_msi_bottom_irq_chip = {
775 .name = "UM virtio MSI",
776 .irq_compose_msi_msg = um_pci_compose_msi_msg,
779 static int um_pci_inner_domain_alloc(struct irq_domain *domain,
780 unsigned int virq, unsigned int nr_irqs,
785 WARN_ON(nr_irqs != 1);
787 mutex_lock(&um_pci_mtx);
788 bit = find_first_zero_bit(um_pci_msi_used, MAX_MSI_VECTORS);
789 if (bit >= MAX_MSI_VECTORS) {
790 mutex_unlock(&um_pci_mtx);
794 set_bit(bit, um_pci_msi_used);
795 mutex_unlock(&um_pci_mtx);
797 irq_domain_set_info(domain, virq, bit, &um_pci_msi_bottom_irq_chip,
798 domain->host_data, handle_simple_irq,
804 static void um_pci_inner_domain_free(struct irq_domain *domain,
805 unsigned int virq, unsigned int nr_irqs)
807 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
809 mutex_lock(&um_pci_mtx);
811 if (!test_bit(d->hwirq, um_pci_msi_used))
812 pr_err("trying to free unused MSI#%lu\n", d->hwirq);
814 __clear_bit(d->hwirq, um_pci_msi_used);
816 mutex_unlock(&um_pci_mtx);
819 static const struct irq_domain_ops um_pci_inner_domain_ops = {
820 .alloc = um_pci_inner_domain_alloc,
821 .free = um_pci_inner_domain_free,
824 static struct irq_chip um_pci_msi_irq_chip = {
825 .name = "UM virtio PCIe MSI",
826 .irq_mask = pci_msi_mask_irq,
827 .irq_unmask = pci_msi_unmask_irq,
830 static struct msi_domain_info um_pci_msi_domain_info = {
831 .flags = MSI_FLAG_USE_DEF_DOM_OPS |
832 MSI_FLAG_USE_DEF_CHIP_OPS |
834 .chip = &um_pci_msi_irq_chip,
837 static struct resource busn_resource = {
841 .flags = IORESOURCE_BUS,
844 static int um_pci_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
846 struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
848 if (WARN_ON(!reg->dev))
851 /* Yes, we map all pins to the same IRQ ... doesn't matter for now. */
852 return reg->dev->irq;
855 void *pci_root_bus_fwnode(struct pci_bus *bus)
857 return um_pci_fwnode;
860 static int um_pci_init(void)
864 WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource,
865 &um_pci_cfgspace_ops));
866 WARN_ON(logic_iomem_add_region(&virt_iomem_resource,
869 if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
870 "No virtio device ID configured for PCI - no PCI support\n"))
873 um_pci_msg_bufs = alloc_percpu(struct um_pci_message_buffer);
874 if (!um_pci_msg_bufs)
877 bridge = pci_alloc_host_bridge(0);
883 um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci");
884 if (!um_pci_fwnode) {
889 um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS,
891 &um_pci_inner_domain_ops, NULL);
892 if (!um_pci_inner_domain) {
897 um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode,
898 &um_pci_msi_domain_info,
899 um_pci_inner_domain);
900 if (!um_pci_msi_domain) {
905 pci_add_resource(&bridge->windows, &virt_iomem_resource);
906 pci_add_resource(&bridge->windows, &busn_resource);
907 bridge->ops = &um_pci_ops;
908 bridge->map_irq = um_pci_map_irq;
910 for (i = 0; i < MAX_DEVICES; i++) {
911 resource_size_t start;
913 start = virt_cfgspace_resource.start + i * CFG_SPACE_SIZE;
914 um_pci_devices[i].iomem = ioremap(start, CFG_SPACE_SIZE);
915 if (WARN(!um_pci_devices[i].iomem, "failed to map %d\n", i)) {
921 err = pci_host_probe(bridge);
925 err = register_virtio_driver(&um_pci_virtio_driver);
930 if (um_pci_inner_domain)
931 irq_domain_remove(um_pci_inner_domain);
933 irq_domain_free_fwnode(um_pci_fwnode);
935 pci_free_resource_list(&bridge->windows);
936 pci_free_host_bridge(bridge);
938 free_percpu(um_pci_msg_bufs);
941 module_init(um_pci_init);
943 static void um_pci_exit(void)
945 unregister_virtio_driver(&um_pci_virtio_driver);
946 irq_domain_remove(um_pci_msi_domain);
947 irq_domain_remove(um_pci_inner_domain);
948 pci_free_resource_list(&bridge->windows);
949 pci_free_host_bridge(bridge);
950 free_percpu(um_pci_msg_bufs);
952 module_exit(um_pci_exit);