1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) Microsoft Corporation.
6 * Jake Oshins <jakeo@microsoft.com>
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/semaphore.h>
45 #include <linux/irqdomain.h>
46 #include <asm/irqdomain.h>
48 #include <linux/irq.h>
49 #include <linux/msi.h>
50 #include <linux/hyperv.h>
51 #include <linux/refcount.h>
52 #include <asm/mshyperv.h>
55 * Protocol versions. The low word is the minor version, the high word the
59 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
60 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
61 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
63 enum pci_protocol_version_t {
64 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
65 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
66 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
69 #define CPU_AFFINITY_ALL -1ULL
72 * Supported protocol versions in the order of probing - highest go
75 static enum pci_protocol_version_t pci_protocol_versions[] = {
76 PCI_PROTOCOL_VERSION_1_3,
77 PCI_PROTOCOL_VERSION_1_2,
78 PCI_PROTOCOL_VERSION_1_1,
81 #define PCI_CONFIG_MMIO_LENGTH 0x2000
82 #define CFG_PAGE_OFFSET 0x1000
83 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
85 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
87 #define STATUS_REVISION_MISMATCH 0xC0000059
89 /* space for 32bit serial number as string */
90 #define SLOT_NAME_SIZE 11
96 enum pci_message_type {
100 PCI_MESSAGE_BASE = 0x42490000,
101 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
102 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
103 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
104 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
105 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
106 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
107 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
108 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
109 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
110 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
111 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
112 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
113 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
114 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
115 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
116 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
117 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
118 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
119 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
120 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
121 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
122 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
123 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
124 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
129 * Structures defining the virtual PCI Express protocol.
141 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
142 * which is all this driver does. This representation is the one used in
143 * Windows, which is what is expected when sending this back and forth with
144 * the Hyper-V parent partition.
146 union win_slot_encoding {
156 * Pretty much as defined in the PCI Specifications.
158 struct pci_function_description {
159 u16 v_id; /* vendor ID */
160 u16 d_id; /* device ID */
166 union win_slot_encoding win_slot;
167 u32 ser; /* serial number */
170 enum pci_device_description_flags {
171 HV_PCI_DEVICE_FLAG_NONE = 0x0,
172 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
175 struct pci_function_description2 {
176 u16 v_id; /* vendor ID */
177 u16 d_id; /* device ID */
183 union win_slot_encoding win_slot;
184 u32 ser; /* serial number */
186 u16 virtual_numa_node;
193 * @delivery_mode: As defined in Intel's Programmer's
194 * Reference Manual, Volume 3, Chapter 8.
195 * @vector_count: Number of contiguous entries in the
196 * Interrupt Descriptor Table that are
197 * occupied by this Message-Signaled
198 * Interrupt. For "MSI", as first defined
199 * in PCI 2.2, this can be between 1 and
200 * 32. For "MSI-X," as first defined in PCI
201 * 3.0, this must be 1, as each MSI-X table
202 * entry would have its own descriptor.
203 * @reserved: Empty space
204 * @cpu_mask: All the target virtual processors.
215 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
217 * @delivery_mode: As defined in Intel's Programmer's
218 * Reference Manual, Volume 3, Chapter 8.
219 * @vector_count: Number of contiguous entries in the
220 * Interrupt Descriptor Table that are
221 * occupied by this Message-Signaled
222 * Interrupt. For "MSI", as first defined
223 * in PCI 2.2, this can be between 1 and
224 * 32. For "MSI-X," as first defined in PCI
225 * 3.0, this must be 1, as each MSI-X table
226 * entry would have its own descriptor.
227 * @processor_count: number of bits enabled in array.
228 * @processor_array: All the target virtual processors.
230 struct hv_msi_desc2 {
235 u16 processor_array[32];
239 * struct tran_int_desc
240 * @reserved: unused, padding
241 * @vector_count: same as in hv_msi_desc
242 * @data: This is the "data payload" value that is
243 * written by the device when it generates
244 * a message-signaled interrupt, either MSI
246 * @address: This is the address to which the data
247 * payload is written on interrupt
250 struct tran_int_desc {
258 * A generic message format for virtual PCI.
259 * Specific message formats are defined later in the file.
266 struct pci_child_message {
267 struct pci_message message_type;
268 union win_slot_encoding wslot;
271 struct pci_incoming_message {
272 struct vmpacket_descriptor hdr;
273 struct pci_message message_type;
276 struct pci_response {
277 struct vmpacket_descriptor hdr;
278 s32 status; /* negative values are failures */
282 void (*completion_func)(void *context, struct pci_response *resp,
283 int resp_packet_size);
286 struct pci_message message[0];
290 * Specific message types supporting the PCI protocol.
294 * Version negotiation message. Sent from the guest to the host.
295 * The guest is free to try different versions until the host
296 * accepts the version.
298 * pci_version: The protocol version requested.
299 * is_last_attempt: If TRUE, this is the last version guest will request.
300 * reservedz: Reserved field, set to zero.
303 struct pci_version_request {
304 struct pci_message message_type;
305 u32 protocol_version;
309 * Bus D0 Entry. This is sent from the guest to the host when the virtual
310 * bus (PCI Express port) is ready for action.
313 struct pci_bus_d0_entry {
314 struct pci_message message_type;
319 struct pci_bus_relations {
320 struct pci_incoming_message incoming;
322 struct pci_function_description func[0];
325 struct pci_bus_relations2 {
326 struct pci_incoming_message incoming;
328 struct pci_function_description2 func[0];
331 struct pci_q_res_req_response {
332 struct vmpacket_descriptor hdr;
333 s32 status; /* negative values are failures */
334 u32 probed_bar[PCI_STD_NUM_BARS];
337 struct pci_set_power {
338 struct pci_message message_type;
339 union win_slot_encoding wslot;
340 u32 power_state; /* In Windows terms */
344 struct pci_set_power_response {
345 struct vmpacket_descriptor hdr;
346 s32 status; /* negative values are failures */
347 union win_slot_encoding wslot;
348 u32 resultant_state; /* In Windows terms */
352 struct pci_resources_assigned {
353 struct pci_message message_type;
354 union win_slot_encoding wslot;
355 u8 memory_range[0x14][6]; /* not used here */
360 struct pci_resources_assigned2 {
361 struct pci_message message_type;
362 union win_slot_encoding wslot;
363 u8 memory_range[0x14][6]; /* not used here */
364 u32 msi_descriptor_count;
368 struct pci_create_interrupt {
369 struct pci_message message_type;
370 union win_slot_encoding wslot;
371 struct hv_msi_desc int_desc;
374 struct pci_create_int_response {
375 struct pci_response response;
377 struct tran_int_desc int_desc;
380 struct pci_create_interrupt2 {
381 struct pci_message message_type;
382 union win_slot_encoding wslot;
383 struct hv_msi_desc2 int_desc;
386 struct pci_delete_interrupt {
387 struct pci_message message_type;
388 union win_slot_encoding wslot;
389 struct tran_int_desc int_desc;
393 * Note: the VM must pass a valid block id, wslot and bytes_requested.
395 struct pci_read_block {
396 struct pci_message message_type;
398 union win_slot_encoding wslot;
402 struct pci_read_block_response {
403 struct vmpacket_descriptor hdr;
405 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
409 * Note: the VM must pass a valid block id, wslot and byte_count.
411 struct pci_write_block {
412 struct pci_message message_type;
414 union win_slot_encoding wslot;
416 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
419 struct pci_dev_inval_block {
420 struct pci_incoming_message incoming;
421 union win_slot_encoding wslot;
425 struct pci_dev_incoming {
426 struct pci_incoming_message incoming;
427 union win_slot_encoding wslot;
430 struct pci_eject_response {
431 struct pci_message message_type;
432 union win_slot_encoding wslot;
436 static int pci_ring_size = (4 * PAGE_SIZE);
439 * Definitions or interrupt steering hypercall.
441 #define HV_PARTITION_ID_SELF ((u64)-1)
442 #define HVCALL_RETARGET_INTERRUPT 0x7e
444 struct hv_interrupt_entry {
445 u32 source; /* 1 for MSI(-X) */
452 * flags for hv_device_interrupt_target.flags
454 #define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
455 #define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
457 struct hv_device_interrupt_target {
462 struct hv_vpset vp_set;
466 struct retarget_msi_interrupt {
467 u64 partition_id; /* use "self" */
469 struct hv_interrupt_entry int_entry;
471 struct hv_device_interrupt_target int_target;
472 } __packed __aligned(8);
475 * Driver specific state.
478 enum hv_pcibus_state {
487 struct hv_pcibus_device {
488 struct pci_sysdata sysdata;
489 /* Protocol version negotiated with the host */
490 enum pci_protocol_version_t protocol_version;
491 enum hv_pcibus_state state;
492 refcount_t remove_lock;
493 struct hv_device *hdev;
494 resource_size_t low_mmio_space;
495 resource_size_t high_mmio_space;
496 struct resource *mem_config;
497 struct resource *low_mmio_res;
498 struct resource *high_mmio_res;
499 struct completion *survey_event;
500 struct completion remove_event;
501 struct pci_bus *pci_bus;
502 spinlock_t config_lock; /* Avoid two threads writing index page */
503 spinlock_t device_list_lock; /* Protect lists below */
504 void __iomem *cfg_addr;
506 struct list_head resources_for_children;
508 struct list_head children;
509 struct list_head dr_list;
511 struct msi_domain_info msi_info;
512 struct msi_controller msi_chip;
513 struct irq_domain *irq_domain;
515 spinlock_t retarget_msi_interrupt_lock;
517 struct workqueue_struct *wq;
519 /* hypercall arg, must not cross page boundary */
520 struct retarget_msi_interrupt retarget_msi_interrupt_params;
523 * Don't put anything here: retarget_msi_interrupt_params must be last
528 * Tracks "Device Relations" messages from the host, which must be both
529 * processed in order and deferred so that they don't run in the context
530 * of the incoming packet callback.
533 struct work_struct wrk;
534 struct hv_pcibus_device *bus;
537 struct hv_pcidev_description {
538 u16 v_id; /* vendor ID */
539 u16 d_id; /* device ID */
545 union win_slot_encoding win_slot;
546 u32 ser; /* serial number */
548 u16 virtual_numa_node;
552 struct list_head list_entry;
554 struct hv_pcidev_description func[0];
557 enum hv_pcichild_state {
558 hv_pcichild_init = 0,
559 hv_pcichild_requirements,
560 hv_pcichild_resourced,
561 hv_pcichild_ejecting,
566 /* List protected by pci_rescan_remove_lock */
567 struct list_head list_entry;
569 enum hv_pcichild_state state;
570 struct pci_slot *pci_slot;
571 struct hv_pcidev_description desc;
572 bool reported_missing;
573 struct hv_pcibus_device *hbus;
574 struct work_struct wrk;
576 void (*block_invalidate)(void *context, u64 block_mask);
577 void *invalidate_context;
580 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
581 * read it back, for each of the BAR offsets within config space.
583 u32 probed_bar[PCI_STD_NUM_BARS];
586 struct hv_pci_compl {
587 struct completion host_event;
588 s32 completion_status;
591 static void hv_pci_onchannelcallback(void *context);
594 * hv_pci_generic_compl() - Invoked for a completion packet
595 * @context: Set up by the sender of the packet.
596 * @resp: The response packet
597 * @resp_packet_size: Size in bytes of the packet
599 * This function is used to trigger an event and report status
600 * for any message for which the completion packet contains a
601 * status and nothing else.
603 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
604 int resp_packet_size)
606 struct hv_pci_compl *comp_pkt = context;
608 if (resp_packet_size >= offsetofend(struct pci_response, status))
609 comp_pkt->completion_status = resp->status;
611 comp_pkt->completion_status = -1;
613 complete(&comp_pkt->host_event);
616 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
619 static void get_pcichild(struct hv_pci_dev *hpdev)
621 refcount_inc(&hpdev->refs);
624 static void put_pcichild(struct hv_pci_dev *hpdev)
626 if (refcount_dec_and_test(&hpdev->refs))
630 static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
631 static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
634 * There is no good way to get notified from vmbus_onoffer_rescind(),
635 * so let's use polling here, since this is not a hot path.
637 static int wait_for_response(struct hv_device *hdev,
638 struct completion *comp)
641 if (hdev->channel->rescind) {
642 dev_warn_once(&hdev->device, "The device is gone.\n");
646 if (wait_for_completion_timeout(comp, HZ / 10))
654 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
655 * @devfn: The Linux representation of PCI slot
657 * Windows uses a slightly different representation of PCI slot.
659 * Return: The Windows representation
661 static u32 devfn_to_wslot(int devfn)
663 union win_slot_encoding wslot;
666 wslot.bits.dev = PCI_SLOT(devfn);
667 wslot.bits.func = PCI_FUNC(devfn);
673 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
674 * @wslot: The Windows representation of PCI slot
676 * Windows uses a slightly different representation of PCI slot.
678 * Return: The Linux representation
680 static int wslot_to_devfn(u32 wslot)
682 union win_slot_encoding slot_no;
684 slot_no.slot = wslot;
685 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
689 * PCI Configuration Space for these root PCI buses is implemented as a pair
690 * of pages in memory-mapped I/O space. Writing to the first page chooses
691 * the PCI function being written or read. Once the first page has been
692 * written to, the following page maps in the entire configuration space of
697 * _hv_pcifront_read_config() - Internal PCI config read
698 * @hpdev: The PCI driver's representation of the device
699 * @where: Offset within config space
700 * @size: Size of the transfer
701 * @val: Pointer to the buffer receiving the data
703 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
707 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
710 * If the attempt is to read the IDs or the ROM BAR, simulate that.
712 if (where + size <= PCI_COMMAND) {
713 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
714 } else if (where >= PCI_CLASS_REVISION && where + size <=
715 PCI_CACHE_LINE_SIZE) {
716 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
717 PCI_CLASS_REVISION, size);
718 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
720 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
721 PCI_SUBSYSTEM_VENDOR_ID, size);
722 } else if (where >= PCI_ROM_ADDRESS && where + size <=
723 PCI_CAPABILITY_LIST) {
724 /* ROM BARs are unimplemented */
726 } else if (where >= PCI_INTERRUPT_LINE && where + size <=
729 * Interrupt Line and Interrupt PIN are hard-wired to zero
730 * because this front-end only supports message-signaled
734 } else if (where + size <= CFG_PAGE_SIZE) {
735 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
736 /* Choose the function to be read. (See comment above) */
737 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
738 /* Make sure the function was chosen before we start reading. */
740 /* Read from that function's config space. */
753 * Make sure the read was done before we release the spinlock
754 * allowing consecutive reads/writes.
757 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
759 dev_err(&hpdev->hbus->hdev->device,
760 "Attempt to read beyond a function's config space.\n");
764 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
768 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
771 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
773 /* Choose the function to be read. (See comment above) */
774 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
775 /* Make sure the function was chosen before we start reading. */
777 /* Read from that function's config space. */
780 * mb() is not required here, because the spin_unlock_irqrestore()
784 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
790 * _hv_pcifront_write_config() - Internal PCI config write
791 * @hpdev: The PCI driver's representation of the device
792 * @where: Offset within config space
793 * @size: Size of the transfer
794 * @val: The data being transferred
796 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
800 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
802 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
803 where + size <= PCI_CAPABILITY_LIST) {
804 /* SSIDs and ROM BARs are read-only */
805 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
806 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
807 /* Choose the function to be written. (See comment above) */
808 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
809 /* Make sure the function was chosen before we start writing. */
811 /* Write to that function's config space. */
824 * Make sure the write was done before we release the spinlock
825 * allowing consecutive reads/writes.
828 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
830 dev_err(&hpdev->hbus->hdev->device,
831 "Attempt to write beyond a function's config space.\n");
836 * hv_pcifront_read_config() - Read configuration space
837 * @bus: PCI Bus structure
838 * @devfn: Device/function
839 * @where: Offset from base
840 * @size: Byte/word/dword
841 * @val: Value to be read
843 * Return: PCIBIOS_SUCCESSFUL on success
844 * PCIBIOS_DEVICE_NOT_FOUND on failure
846 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
847 int where, int size, u32 *val)
849 struct hv_pcibus_device *hbus =
850 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
851 struct hv_pci_dev *hpdev;
853 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
855 return PCIBIOS_DEVICE_NOT_FOUND;
857 _hv_pcifront_read_config(hpdev, where, size, val);
860 return PCIBIOS_SUCCESSFUL;
864 * hv_pcifront_write_config() - Write configuration space
865 * @bus: PCI Bus structure
866 * @devfn: Device/function
867 * @where: Offset from base
868 * @size: Byte/word/dword
869 * @val: Value to be written to device
871 * Return: PCIBIOS_SUCCESSFUL on success
872 * PCIBIOS_DEVICE_NOT_FOUND on failure
874 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
875 int where, int size, u32 val)
877 struct hv_pcibus_device *hbus =
878 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
879 struct hv_pci_dev *hpdev;
881 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
883 return PCIBIOS_DEVICE_NOT_FOUND;
885 _hv_pcifront_write_config(hpdev, where, size, val);
888 return PCIBIOS_SUCCESSFUL;
891 /* PCIe operations */
892 static struct pci_ops hv_pcifront_ops = {
893 .read = hv_pcifront_read_config,
894 .write = hv_pcifront_write_config,
898 * Paravirtual backchannel
900 * Hyper-V SR-IOV provides a backchannel mechanism in software for
901 * communication between a VF driver and a PF driver. These
902 * "configuration blocks" are similar in concept to PCI configuration space,
903 * but instead of doing reads and writes in 32-bit chunks through a very slow
904 * path, packets of up to 128 bytes can be sent or received asynchronously.
906 * Nearly every SR-IOV device contains just such a communications channel in
907 * hardware, so using this one in software is usually optional. Using the
908 * software channel, however, allows driver implementers to leverage software
909 * tools that fuzz the communications channel looking for vulnerabilities.
911 * The usage model for these packets puts the responsibility for reading or
912 * writing on the VF driver. The VF driver sends a read or a write packet,
913 * indicating which "block" is being referred to by number.
915 * If the PF driver wishes to initiate communication, it can "invalidate" one or
916 * more of the first 64 blocks. This invalidation is delivered via a callback
917 * supplied by the VF driver by this driver.
919 * No protocol is implied, except that supplied by the PF and VF drivers.
922 struct hv_read_config_compl {
923 struct hv_pci_compl comp_pkt;
926 unsigned int bytes_returned;
930 * hv_pci_read_config_compl() - Invoked when a response packet
931 * for a read config block operation arrives.
932 * @context: Identifies the read config operation
933 * @resp: The response packet itself
934 * @resp_packet_size: Size in bytes of the response packet
936 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
937 int resp_packet_size)
939 struct hv_read_config_compl *comp = context;
940 struct pci_read_block_response *read_resp =
941 (struct pci_read_block_response *)resp;
942 unsigned int data_len, hdr_len;
944 hdr_len = offsetof(struct pci_read_block_response, bytes);
945 if (resp_packet_size < hdr_len) {
946 comp->comp_pkt.completion_status = -1;
950 data_len = resp_packet_size - hdr_len;
951 if (data_len > 0 && read_resp->status == 0) {
952 comp->bytes_returned = min(comp->len, data_len);
953 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
955 comp->bytes_returned = 0;
958 comp->comp_pkt.completion_status = read_resp->status;
960 complete(&comp->comp_pkt.host_event);
964 * hv_read_config_block() - Sends a read config block request to
965 * the back-end driver running in the Hyper-V parent partition.
966 * @pdev: The PCI driver's representation for this device.
967 * @buf: Buffer into which the config block will be copied.
968 * @len: Size in bytes of buf.
969 * @block_id: Identifies the config block which has been requested.
970 * @bytes_returned: Size which came back from the back-end driver.
972 * Return: 0 on success, -errno on failure
974 int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
975 unsigned int block_id, unsigned int *bytes_returned)
977 struct hv_pcibus_device *hbus =
978 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
981 struct pci_packet pkt;
982 char buf[sizeof(struct pci_read_block)];
984 struct hv_read_config_compl comp_pkt;
985 struct pci_read_block *read_blk;
988 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
991 init_completion(&comp_pkt.comp_pkt.host_event);
995 memset(&pkt, 0, sizeof(pkt));
996 pkt.pkt.completion_func = hv_pci_read_config_compl;
997 pkt.pkt.compl_ctxt = &comp_pkt;
998 read_blk = (struct pci_read_block *)&pkt.pkt.message;
999 read_blk->message_type.type = PCI_READ_BLOCK;
1000 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1001 read_blk->block_id = block_id;
1002 read_blk->bytes_requested = len;
1004 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1005 sizeof(*read_blk), (unsigned long)&pkt.pkt,
1007 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1011 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1015 if (comp_pkt.comp_pkt.completion_status != 0 ||
1016 comp_pkt.bytes_returned == 0) {
1017 dev_err(&hbus->hdev->device,
1018 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1019 comp_pkt.comp_pkt.completion_status,
1020 comp_pkt.bytes_returned);
1024 *bytes_returned = comp_pkt.bytes_returned;
1029 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1030 * config block operation arrives.
1031 * @context: Identifies the write config operation
1032 * @resp: The response packet itself
1033 * @resp_packet_size: Size in bytes of the response packet
1035 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1036 int resp_packet_size)
1038 struct hv_pci_compl *comp_pkt = context;
1040 comp_pkt->completion_status = resp->status;
1041 complete(&comp_pkt->host_event);
1045 * hv_write_config_block() - Sends a write config block request to the
1046 * back-end driver running in the Hyper-V parent partition.
1047 * @pdev: The PCI driver's representation for this device.
1048 * @buf: Buffer from which the config block will be copied.
1049 * @len: Size in bytes of buf.
1050 * @block_id: Identifies the config block which is being written.
1052 * Return: 0 on success, -errno on failure
1054 int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
1055 unsigned int block_id)
1057 struct hv_pcibus_device *hbus =
1058 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1061 struct pci_packet pkt;
1062 char buf[sizeof(struct pci_write_block)];
1065 struct hv_pci_compl comp_pkt;
1066 struct pci_write_block *write_blk;
1070 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1073 init_completion(&comp_pkt.host_event);
1075 memset(&pkt, 0, sizeof(pkt));
1076 pkt.pkt.completion_func = hv_pci_write_config_compl;
1077 pkt.pkt.compl_ctxt = &comp_pkt;
1078 write_blk = (struct pci_write_block *)&pkt.pkt.message;
1079 write_blk->message_type.type = PCI_WRITE_BLOCK;
1080 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1081 write_blk->block_id = block_id;
1082 write_blk->byte_count = len;
1083 memcpy(write_blk->bytes, buf, len);
1084 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1086 * This quirk is required on some hosts shipped around 2018, because
1087 * these hosts don't check the pkt_size correctly (new hosts have been
1088 * fixed since early 2019). The quirk is also safe on very old hosts
1089 * and new hosts, because, on them, what really matters is the length
1090 * specified in write_blk->byte_count.
1092 pkt_size += sizeof(pkt.reserved);
1094 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1095 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1096 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1100 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1104 if (comp_pkt.completion_status != 0) {
1105 dev_err(&hbus->hdev->device,
1106 "Write Config Block failed: 0x%x\n",
1107 comp_pkt.completion_status);
1115 * hv_register_block_invalidate() - Invoked when a config block invalidation
1116 * arrives from the back-end driver.
1117 * @pdev: The PCI driver's representation for this device.
1118 * @context: Identifies the device.
1119 * @block_invalidate: Identifies all of the blocks being invalidated.
1121 * Return: 0 on success, -errno on failure
1123 int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1124 void (*block_invalidate)(void *context,
1127 struct hv_pcibus_device *hbus =
1128 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1130 struct hv_pci_dev *hpdev;
1132 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1136 hpdev->block_invalidate = block_invalidate;
1137 hpdev->invalidate_context = context;
1139 put_pcichild(hpdev);
1144 /* Interrupt management hooks */
1145 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1146 struct tran_int_desc *int_desc)
1148 struct pci_delete_interrupt *int_pkt;
1150 struct pci_packet pkt;
1151 u8 buffer[sizeof(struct pci_delete_interrupt)];
1154 memset(&ctxt, 0, sizeof(ctxt));
1155 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1156 int_pkt->message_type.type =
1157 PCI_DELETE_INTERRUPT_MESSAGE;
1158 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1159 int_pkt->int_desc = *int_desc;
1160 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1161 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
1166 * hv_msi_free() - Free the MSI.
1167 * @domain: The interrupt domain pointer
1168 * @info: Extra MSI-related context
1169 * @irq: Identifies the IRQ.
1171 * The Hyper-V parent partition and hypervisor are tracking the
1172 * messages that are in use, keeping the interrupt redirection
1173 * table up to date. This callback sends a message that frees
1174 * the IRT entry and related tracking nonsense.
1176 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1179 struct hv_pcibus_device *hbus;
1180 struct hv_pci_dev *hpdev;
1181 struct pci_dev *pdev;
1182 struct tran_int_desc *int_desc;
1183 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1184 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1186 pdev = msi_desc_to_pci_dev(msi);
1188 int_desc = irq_data_get_irq_chip_data(irq_data);
1192 irq_data->chip_data = NULL;
1193 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1199 hv_int_desc_free(hpdev, int_desc);
1200 put_pcichild(hpdev);
1203 static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
1206 struct irq_data *parent = data->parent_data;
1208 return parent->chip->irq_set_affinity(parent, dest, force);
1211 static void hv_irq_mask(struct irq_data *data)
1213 pci_msi_mask_irq(data);
1217 * hv_irq_unmask() - "Unmask" the IRQ by setting its current
1219 * @data: Describes the IRQ
1221 * Build new a destination for the MSI and make a hypercall to
1222 * update the Interrupt Redirection Table. "Device Logical ID"
1223 * is built out of this PCI bus's instance GUID and the function
1224 * number of the device.
1226 static void hv_irq_unmask(struct irq_data *data)
1228 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
1229 struct irq_cfg *cfg = irqd_cfg(data);
1230 struct retarget_msi_interrupt *params;
1231 struct hv_pcibus_device *hbus;
1232 struct cpumask *dest;
1234 struct pci_bus *pbus;
1235 struct pci_dev *pdev;
1236 unsigned long flags;
1241 dest = irq_data_get_effective_affinity_mask(data);
1242 pdev = msi_desc_to_pci_dev(msi_desc);
1244 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1246 spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
1248 params = &hbus->retarget_msi_interrupt_params;
1249 memset(params, 0, sizeof(*params));
1250 params->partition_id = HV_PARTITION_ID_SELF;
1251 params->int_entry.source = 1; /* MSI(-X) */
1252 params->int_entry.address = msi_desc->msg.address_lo;
1253 params->int_entry.data = msi_desc->msg.data;
1254 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
1255 (hbus->hdev->dev_instance.b[4] << 16) |
1256 (hbus->hdev->dev_instance.b[7] << 8) |
1257 (hbus->hdev->dev_instance.b[6] & 0xf8) |
1258 PCI_FUNC(pdev->devfn);
1259 params->int_target.vector = cfg->vector;
1262 * Honoring apic->irq_delivery_mode set to dest_Fixed by
1263 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
1264 * spurious interrupt storm. Not doing so does not seem to have a
1265 * negative effect (yet?).
1268 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
1270 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
1271 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
1272 * with >64 VP support.
1273 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
1274 * is not sufficient for this hypercall.
1276 params->int_target.flags |=
1277 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
1279 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
1284 cpumask_and(tmp, dest, cpu_online_mask);
1285 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
1286 free_cpumask_var(tmp);
1294 * var-sized hypercall, var-size starts after vp_mask (thus
1295 * vp_set.format does not count, but vp_set.valid_bank_mask
1298 var_size = 1 + nr_bank;
1300 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1301 params->int_target.vp_mask |=
1302 (1ULL << hv_cpu_number_to_vp_number(cpu));
1306 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
1310 spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
1313 dev_err(&hbus->hdev->device,
1314 "%s() failed: %#llx", __func__, res);
1318 pci_msi_unmask_irq(data);
1321 struct compose_comp_ctxt {
1322 struct hv_pci_compl comp_pkt;
1323 struct tran_int_desc int_desc;
1326 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1327 int resp_packet_size)
1329 struct compose_comp_ctxt *comp_pkt = context;
1330 struct pci_create_int_response *int_resp =
1331 (struct pci_create_int_response *)resp;
1333 comp_pkt->comp_pkt.completion_status = resp->status;
1334 comp_pkt->int_desc = int_resp->int_desc;
1335 complete(&comp_pkt->comp_pkt.host_event);
1338 static u32 hv_compose_msi_req_v1(
1339 struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
1340 u32 slot, u8 vector)
1342 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1343 int_pkt->wslot.slot = slot;
1344 int_pkt->int_desc.vector = vector;
1345 int_pkt->int_desc.vector_count = 1;
1346 int_pkt->int_desc.delivery_mode = dest_Fixed;
1349 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1352 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1354 return sizeof(*int_pkt);
1357 static u32 hv_compose_msi_req_v2(
1358 struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
1359 u32 slot, u8 vector)
1363 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1364 int_pkt->wslot.slot = slot;
1365 int_pkt->int_desc.vector = vector;
1366 int_pkt->int_desc.vector_count = 1;
1367 int_pkt->int_desc.delivery_mode = dest_Fixed;
1370 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1371 * by subsequent retarget in hv_irq_unmask().
1373 cpu = cpumask_first_and(affinity, cpu_online_mask);
1374 int_pkt->int_desc.processor_array[0] =
1375 hv_cpu_number_to_vp_number(cpu);
1376 int_pkt->int_desc.processor_count = 1;
1378 return sizeof(*int_pkt);
1382 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1383 * @data: Everything about this MSI
1384 * @msg: Buffer that is filled in by this function
1386 * This function unpacks the IRQ looking for target CPU set, IDT
1387 * vector and mode and sends a message to the parent partition
1388 * asking for a mapping for that tuple in this partition. The
1389 * response supplies a data value and address to which that data
1390 * should be written to trigger that interrupt.
1392 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1394 struct irq_cfg *cfg = irqd_cfg(data);
1395 struct hv_pcibus_device *hbus;
1396 struct hv_pci_dev *hpdev;
1397 struct pci_bus *pbus;
1398 struct pci_dev *pdev;
1399 struct cpumask *dest;
1400 unsigned long flags;
1401 struct compose_comp_ctxt comp;
1402 struct tran_int_desc *int_desc;
1404 struct pci_packet pci_pkt;
1406 struct pci_create_interrupt v1;
1407 struct pci_create_interrupt2 v2;
1414 pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
1415 dest = irq_data_get_effective_affinity_mask(data);
1417 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1418 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1420 goto return_null_message;
1422 /* Free any previous message that might have already been composed. */
1423 if (data->chip_data) {
1424 int_desc = data->chip_data;
1425 data->chip_data = NULL;
1426 hv_int_desc_free(hpdev, int_desc);
1429 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1431 goto drop_reference;
1433 memset(&ctxt, 0, sizeof(ctxt));
1434 init_completion(&comp.comp_pkt.host_event);
1435 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1436 ctxt.pci_pkt.compl_ctxt = ∁
1438 switch (hbus->protocol_version) {
1439 case PCI_PROTOCOL_VERSION_1_1:
1440 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1442 hpdev->desc.win_slot.slot,
1446 case PCI_PROTOCOL_VERSION_1_2:
1447 case PCI_PROTOCOL_VERSION_1_3:
1448 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1450 hpdev->desc.win_slot.slot,
1455 /* As we only negotiate protocol versions known to this driver,
1456 * this path should never hit. However, this is it not a hot
1457 * path so we print a message to aid future updates.
1459 dev_err(&hbus->hdev->device,
1460 "Unexpected vPCI protocol, update driver.");
1464 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1465 size, (unsigned long)&ctxt.pci_pkt,
1467 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1469 dev_err(&hbus->hdev->device,
1470 "Sending request for interrupt failed: 0x%x",
1471 comp.comp_pkt.completion_status);
1476 * Since this function is called with IRQ locks held, can't
1477 * do normal wait for completion; instead poll.
1479 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1480 /* 0xFFFF means an invalid PCI VENDOR ID. */
1481 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1482 dev_err_once(&hbus->hdev->device,
1483 "the device has gone\n");
1488 * When the higher level interrupt code calls us with
1489 * interrupt disabled, we must poll the channel by calling
1490 * the channel callback directly when channel->target_cpu is
1491 * the current CPU. When the higher level interrupt code
1492 * calls us with interrupt enabled, let's add the
1493 * local_irq_save()/restore() to avoid race:
1494 * hv_pci_onchannelcallback() can also run in tasklet.
1496 local_irq_save(flags);
1498 if (hbus->hdev->channel->target_cpu == smp_processor_id())
1499 hv_pci_onchannelcallback(hbus);
1501 local_irq_restore(flags);
1503 if (hpdev->state == hv_pcichild_ejecting) {
1504 dev_err_once(&hbus->hdev->device,
1505 "the device is being ejected\n");
1512 if (comp.comp_pkt.completion_status < 0) {
1513 dev_err(&hbus->hdev->device,
1514 "Request for interrupt failed: 0x%x",
1515 comp.comp_pkt.completion_status);
1520 * Record the assignment so that this can be unwound later. Using
1521 * irq_set_chip_data() here would be appropriate, but the lock it takes
1524 *int_desc = comp.int_desc;
1525 data->chip_data = int_desc;
1527 /* Pass up the result. */
1528 msg->address_hi = comp.int_desc.address >> 32;
1529 msg->address_lo = comp.int_desc.address & 0xffffffff;
1530 msg->data = comp.int_desc.data;
1532 put_pcichild(hpdev);
1538 put_pcichild(hpdev);
1539 return_null_message:
1540 msg->address_hi = 0;
1541 msg->address_lo = 0;
1545 /* HW Interrupt Chip Descriptor */
1546 static struct irq_chip hv_msi_irq_chip = {
1547 .name = "Hyper-V PCIe MSI",
1548 .irq_compose_msi_msg = hv_compose_msi_msg,
1549 .irq_set_affinity = hv_set_affinity,
1550 .irq_ack = irq_chip_ack_parent,
1551 .irq_mask = hv_irq_mask,
1552 .irq_unmask = hv_irq_unmask,
1555 static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
1556 msi_alloc_info_t *arg)
1558 return arg->msi_hwirq;
1561 static struct msi_domain_ops hv_msi_ops = {
1562 .get_hwirq = hv_msi_domain_ops_get_hwirq,
1563 .msi_prepare = pci_msi_prepare,
1564 .set_desc = pci_msi_set_desc,
1565 .msi_free = hv_msi_free,
1569 * hv_pcie_init_irq_domain() - Initialize IRQ domain
1570 * @hbus: The root PCI bus
1572 * This function creates an IRQ domain which will be used for
1573 * interrupts from devices that have been passed through. These
1574 * devices only support MSI and MSI-X, not line-based interrupts
1575 * or simulations of line-based interrupts through PCIe's
1576 * fabric-layer messages. Because interrupts are remapped, we
1577 * can support multi-message MSI here.
1579 * Return: '0' on success and error value on failure
1581 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
1583 hbus->msi_info.chip = &hv_msi_irq_chip;
1584 hbus->msi_info.ops = &hv_msi_ops;
1585 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
1586 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
1588 hbus->msi_info.handler = handle_edge_irq;
1589 hbus->msi_info.handler_name = "edge";
1590 hbus->msi_info.data = hbus;
1591 hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
1594 if (!hbus->irq_domain) {
1595 dev_err(&hbus->hdev->device,
1596 "Failed to build an MSI IRQ domain\n");
1604 * get_bar_size() - Get the address space consumed by a BAR
1605 * @bar_val: Value that a BAR returned after -1 was written
1608 * This function returns the size of the BAR, rounded up to 1
1609 * page. It has to be rounded up because the hypervisor's page
1610 * table entry that maps the BAR into the VM can't specify an
1611 * offset within a page. The invariant is that the hypervisor
1612 * must place any BARs of smaller than page length at the
1613 * beginning of a page.
1615 * Return: Size in bytes of the consumed MMIO space.
1617 static u64 get_bar_size(u64 bar_val)
1619 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
1624 * survey_child_resources() - Total all MMIO requirements
1625 * @hbus: Root PCI bus, as understood by this driver
1627 static void survey_child_resources(struct hv_pcibus_device *hbus)
1629 struct hv_pci_dev *hpdev;
1630 resource_size_t bar_size = 0;
1631 unsigned long flags;
1632 struct completion *event;
1636 /* If nobody is waiting on the answer, don't compute it. */
1637 event = xchg(&hbus->survey_event, NULL);
1641 /* If the answer has already been computed, go with it. */
1642 if (hbus->low_mmio_space || hbus->high_mmio_space) {
1647 spin_lock_irqsave(&hbus->device_list_lock, flags);
1650 * Due to an interesting quirk of the PCI spec, all memory regions
1651 * for a child device are a power of 2 in size and aligned in memory,
1652 * so it's sufficient to just add them up without tracking alignment.
1654 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1655 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1656 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
1657 dev_err(&hbus->hdev->device,
1658 "There's an I/O BAR in this list!\n");
1660 if (hpdev->probed_bar[i] != 0) {
1662 * A probed BAR has all the upper bits set that
1666 bar_val = hpdev->probed_bar[i];
1667 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1669 ((u64)hpdev->probed_bar[++i] << 32);
1671 bar_val |= 0xffffffff00000000ULL;
1673 bar_size = get_bar_size(bar_val);
1675 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1676 hbus->high_mmio_space += bar_size;
1678 hbus->low_mmio_space += bar_size;
1683 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1688 * prepopulate_bars() - Fill in BARs with defaults
1689 * @hbus: Root PCI bus, as understood by this driver
1691 * The core PCI driver code seems much, much happier if the BARs
1692 * for a device have values upon first scan. So fill them in.
1693 * The algorithm below works down from large sizes to small,
1694 * attempting to pack the assignments optimally. The assumption,
1695 * enforced in other parts of the code, is that the beginning of
1696 * the memory-mapped I/O space will be aligned on the largest
1699 static void prepopulate_bars(struct hv_pcibus_device *hbus)
1701 resource_size_t high_size = 0;
1702 resource_size_t low_size = 0;
1703 resource_size_t high_base = 0;
1704 resource_size_t low_base = 0;
1705 resource_size_t bar_size;
1706 struct hv_pci_dev *hpdev;
1707 unsigned long flags;
1713 if (hbus->low_mmio_space) {
1714 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
1715 low_base = hbus->low_mmio_res->start;
1718 if (hbus->high_mmio_space) {
1720 (63 - __builtin_clzll(hbus->high_mmio_space));
1721 high_base = hbus->high_mmio_res->start;
1724 spin_lock_irqsave(&hbus->device_list_lock, flags);
1727 * Clear the memory enable bit, in case it's already set. This occurs
1728 * in the suspend path of hibernation, where the device is suspended,
1729 * resumed and suspended again: see hibernation_snapshot() and
1730 * hibernation_platform_enter().
1732 * If the memory enable bit is already set, Hyper-V sliently ignores
1733 * the below BAR updates, and the related PCI device driver can not
1734 * work, because reading from the device register(s) always returns
1737 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1738 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
1739 command &= ~PCI_COMMAND_MEMORY;
1740 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
1743 /* Pick addresses for the BARs. */
1745 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1746 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1747 bar_val = hpdev->probed_bar[i];
1750 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
1753 ((u64)hpdev->probed_bar[i + 1]
1756 bar_val |= 0xffffffffULL << 32;
1758 bar_size = get_bar_size(bar_val);
1760 if (high_size != bar_size) {
1764 _hv_pcifront_write_config(hpdev,
1765 PCI_BASE_ADDRESS_0 + (4 * i),
1767 (u32)(high_base & 0xffffff00));
1769 _hv_pcifront_write_config(hpdev,
1770 PCI_BASE_ADDRESS_0 + (4 * i),
1771 4, (u32)(high_base >> 32));
1772 high_base += bar_size;
1774 if (low_size != bar_size)
1776 _hv_pcifront_write_config(hpdev,
1777 PCI_BASE_ADDRESS_0 + (4 * i),
1779 (u32)(low_base & 0xffffff00));
1780 low_base += bar_size;
1783 if (high_size <= 1 && low_size <= 1) {
1784 /* Set the memory enable bit. */
1785 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
1787 command |= PCI_COMMAND_MEMORY;
1788 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
1796 } while (high_size || low_size);
1798 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1802 * Assign entries in sysfs pci slot directory.
1804 * Note that this function does not need to lock the children list
1805 * because it is called from pci_devices_present_work which
1806 * is serialized with hv_eject_device_work because they are on the
1807 * same ordered workqueue. Therefore hbus->children list will not change
1808 * even when pci_create_slot sleeps.
1810 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1812 struct hv_pci_dev *hpdev;
1813 char name[SLOT_NAME_SIZE];
1816 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1817 if (hpdev->pci_slot)
1820 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1821 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1822 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1824 if (IS_ERR(hpdev->pci_slot)) {
1825 pr_warn("pci_create slot %s failed\n", name);
1826 hpdev->pci_slot = NULL;
1832 * Remove entries in sysfs pci slot directory.
1834 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
1836 struct hv_pci_dev *hpdev;
1838 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1839 if (!hpdev->pci_slot)
1841 pci_destroy_slot(hpdev->pci_slot);
1842 hpdev->pci_slot = NULL;
1847 * Set NUMA node for the devices on the bus
1849 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
1851 struct pci_dev *dev;
1852 struct pci_bus *bus = hbus->pci_bus;
1853 struct hv_pci_dev *hv_dev;
1855 list_for_each_entry(dev, &bus->devices, bus_list) {
1856 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
1860 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
1861 set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
1863 put_pcichild(hv_dev);
1868 * create_root_hv_pci_bus() - Expose a new root PCI bus
1869 * @hbus: Root PCI bus, as understood by this driver
1871 * Return: 0 on success, -errno on failure
1873 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1875 /* Register the device */
1876 hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
1877 0, /* bus number is always zero */
1880 &hbus->resources_for_children);
1884 hbus->pci_bus->msi = &hbus->msi_chip;
1885 hbus->pci_bus->msi->dev = &hbus->hdev->device;
1887 pci_lock_rescan_remove();
1888 pci_scan_child_bus(hbus->pci_bus);
1889 hv_pci_assign_numa_node(hbus);
1890 pci_bus_assign_resources(hbus->pci_bus);
1891 hv_pci_assign_slots(hbus);
1892 pci_bus_add_devices(hbus->pci_bus);
1893 pci_unlock_rescan_remove();
1894 hbus->state = hv_pcibus_installed;
1898 struct q_res_req_compl {
1899 struct completion host_event;
1900 struct hv_pci_dev *hpdev;
1904 * q_resource_requirements() - Query Resource Requirements
1905 * @context: The completion context.
1906 * @resp: The response that came from the host.
1907 * @resp_packet_size: The size in bytes of resp.
1909 * This function is invoked on completion of a Query Resource
1910 * Requirements packet.
1912 static void q_resource_requirements(void *context, struct pci_response *resp,
1913 int resp_packet_size)
1915 struct q_res_req_compl *completion = context;
1916 struct pci_q_res_req_response *q_res_req =
1917 (struct pci_q_res_req_response *)resp;
1920 if (resp->status < 0) {
1921 dev_err(&completion->hpdev->hbus->hdev->device,
1922 "query resource requirements failed: %x\n",
1925 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1926 completion->hpdev->probed_bar[i] =
1927 q_res_req->probed_bar[i];
1931 complete(&completion->host_event);
1935 * new_pcichild_device() - Create a new child device
1936 * @hbus: The internal struct tracking this root PCI bus.
1937 * @desc: The information supplied so far from the host
1940 * This function creates the tracking structure for a new child
1941 * device and kicks off the process of figuring out what it is.
1943 * Return: Pointer to the new tracking struct
1945 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
1946 struct hv_pcidev_description *desc)
1948 struct hv_pci_dev *hpdev;
1949 struct pci_child_message *res_req;
1950 struct q_res_req_compl comp_pkt;
1952 struct pci_packet init_packet;
1953 u8 buffer[sizeof(struct pci_child_message)];
1955 unsigned long flags;
1958 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
1964 memset(&pkt, 0, sizeof(pkt));
1965 init_completion(&comp_pkt.host_event);
1966 comp_pkt.hpdev = hpdev;
1967 pkt.init_packet.compl_ctxt = &comp_pkt;
1968 pkt.init_packet.completion_func = q_resource_requirements;
1969 res_req = (struct pci_child_message *)&pkt.init_packet.message;
1970 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
1971 res_req->wslot.slot = desc->win_slot.slot;
1973 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
1974 sizeof(struct pci_child_message),
1975 (unsigned long)&pkt.init_packet,
1977 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1981 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
1984 hpdev->desc = *desc;
1985 refcount_set(&hpdev->refs, 1);
1986 get_pcichild(hpdev);
1987 spin_lock_irqsave(&hbus->device_list_lock, flags);
1989 list_add_tail(&hpdev->list_entry, &hbus->children);
1990 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1999 * get_pcichild_wslot() - Find device from slot
2000 * @hbus: Root PCI bus, as understood by this driver
2001 * @wslot: Location on the bus
2003 * This function looks up a PCI device and returns the internal
2004 * representation of it. It acquires a reference on it, so that
2005 * the device won't be deleted while somebody is using it. The
2006 * caller is responsible for calling put_pcichild() to release
2009 * Return: Internal representation of a PCI device
2011 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2014 unsigned long flags;
2015 struct hv_pci_dev *iter, *hpdev = NULL;
2017 spin_lock_irqsave(&hbus->device_list_lock, flags);
2018 list_for_each_entry(iter, &hbus->children, list_entry) {
2019 if (iter->desc.win_slot.slot == wslot) {
2021 get_pcichild(hpdev);
2025 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2031 * pci_devices_present_work() - Handle new list of child devices
2032 * @work: Work struct embedded in struct hv_dr_work
2034 * "Bus Relations" is the Windows term for "children of this
2035 * bus." The terminology is preserved here for people trying to
2036 * debug the interaction between Hyper-V and Linux. This
2037 * function is called when the parent partition reports a list
2038 * of functions that should be observed under this PCI Express
2041 * This function updates the list, and must tolerate being
2042 * called multiple times with the same information. The typical
2043 * number of child devices is one, with very atypical cases
2044 * involving three or four, so the algorithms used here can be
2045 * simple and inefficient.
2047 * It must also treat the omission of a previously observed device as
2048 * notification that the device no longer exists.
2050 * Note that this function is serialized with hv_eject_device_work(),
2051 * because both are pushed to the ordered workqueue hbus->wq.
2053 static void pci_devices_present_work(struct work_struct *work)
2057 struct hv_pcidev_description *new_desc;
2058 struct hv_pci_dev *hpdev;
2059 struct hv_pcibus_device *hbus;
2060 struct list_head removed;
2061 struct hv_dr_work *dr_wrk;
2062 struct hv_dr_state *dr = NULL;
2063 unsigned long flags;
2065 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2069 INIT_LIST_HEAD(&removed);
2071 /* Pull this off the queue and process it if it was the last one. */
2072 spin_lock_irqsave(&hbus->device_list_lock, flags);
2073 while (!list_empty(&hbus->dr_list)) {
2074 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2076 list_del(&dr->list_entry);
2078 /* Throw this away if the list still has stuff in it. */
2079 if (!list_empty(&hbus->dr_list)) {
2084 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2091 /* First, mark all existing children as reported missing. */
2092 spin_lock_irqsave(&hbus->device_list_lock, flags);
2093 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2094 hpdev->reported_missing = true;
2096 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2098 /* Next, add back any reported devices. */
2099 for (child_no = 0; child_no < dr->device_count; child_no++) {
2101 new_desc = &dr->func[child_no];
2103 spin_lock_irqsave(&hbus->device_list_lock, flags);
2104 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2105 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2106 (hpdev->desc.v_id == new_desc->v_id) &&
2107 (hpdev->desc.d_id == new_desc->d_id) &&
2108 (hpdev->desc.ser == new_desc->ser)) {
2109 hpdev->reported_missing = false;
2113 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2116 hpdev = new_pcichild_device(hbus, new_desc);
2118 dev_err(&hbus->hdev->device,
2119 "couldn't record a child device.\n");
2123 /* Move missing children to a list on the stack. */
2124 spin_lock_irqsave(&hbus->device_list_lock, flags);
2127 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2128 if (hpdev->reported_missing) {
2130 put_pcichild(hpdev);
2131 list_move_tail(&hpdev->list_entry, &removed);
2136 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2138 /* Delete everything that should no longer exist. */
2139 while (!list_empty(&removed)) {
2140 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2142 list_del(&hpdev->list_entry);
2144 if (hpdev->pci_slot)
2145 pci_destroy_slot(hpdev->pci_slot);
2147 put_pcichild(hpdev);
2150 switch (hbus->state) {
2151 case hv_pcibus_installed:
2153 * Tell the core to rescan bus
2154 * because there may have been changes.
2156 pci_lock_rescan_remove();
2157 pci_scan_child_bus(hbus->pci_bus);
2158 hv_pci_assign_numa_node(hbus);
2159 hv_pci_assign_slots(hbus);
2160 pci_unlock_rescan_remove();
2163 case hv_pcibus_init:
2164 case hv_pcibus_probed:
2165 survey_child_resources(hbus);
2177 * hv_pci_start_relations_work() - Queue work to start device discovery
2178 * @hbus: Root PCI bus, as understood by this driver
2179 * @dr: The list of children returned from host
2181 * Return: 0 on success, -errno on failure
2183 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2184 struct hv_dr_state *dr)
2186 struct hv_dr_work *dr_wrk;
2187 unsigned long flags;
2190 if (hbus->state == hv_pcibus_removing) {
2191 dev_info(&hbus->hdev->device,
2192 "PCI VMBus BUS_RELATIONS: ignored\n");
2196 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2200 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2203 spin_lock_irqsave(&hbus->device_list_lock, flags);
2205 * If pending_dr is true, we have already queued a work,
2206 * which will see the new dr. Otherwise, we need to
2209 pending_dr = !list_empty(&hbus->dr_list);
2210 list_add_tail(&dr->list_entry, &hbus->dr_list);
2211 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2217 queue_work(hbus->wq, &dr_wrk->wrk);
2224 * hv_pci_devices_present() - Handle list of new children
2225 * @hbus: Root PCI bus, as understood by this driver
2226 * @relations: Packet from host listing children
2228 * Process a new list of devices on the bus. The list of devices is
2229 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2230 * whenever a new list of devices for this bus appears.
2232 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2233 struct pci_bus_relations *relations)
2235 struct hv_dr_state *dr;
2238 dr = kzalloc(offsetof(struct hv_dr_state, func) +
2239 (sizeof(struct hv_pcidev_description) *
2240 (relations->device_count)), GFP_NOWAIT);
2245 dr->device_count = relations->device_count;
2246 for (i = 0; i < dr->device_count; i++) {
2247 dr->func[i].v_id = relations->func[i].v_id;
2248 dr->func[i].d_id = relations->func[i].d_id;
2249 dr->func[i].rev = relations->func[i].rev;
2250 dr->func[i].prog_intf = relations->func[i].prog_intf;
2251 dr->func[i].subclass = relations->func[i].subclass;
2252 dr->func[i].base_class = relations->func[i].base_class;
2253 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2254 dr->func[i].win_slot = relations->func[i].win_slot;
2255 dr->func[i].ser = relations->func[i].ser;
2258 if (hv_pci_start_relations_work(hbus, dr))
2263 * hv_pci_devices_present2() - Handle list of new children
2264 * @hbus: Root PCI bus, as understood by this driver
2265 * @relations: Packet from host listing children
2267 * This function is the v2 version of hv_pci_devices_present()
2269 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2270 struct pci_bus_relations2 *relations)
2272 struct hv_dr_state *dr;
2275 dr = kzalloc(offsetof(struct hv_dr_state, func) +
2276 (sizeof(struct hv_pcidev_description) *
2277 (relations->device_count)), GFP_NOWAIT);
2282 dr->device_count = relations->device_count;
2283 for (i = 0; i < dr->device_count; i++) {
2284 dr->func[i].v_id = relations->func[i].v_id;
2285 dr->func[i].d_id = relations->func[i].d_id;
2286 dr->func[i].rev = relations->func[i].rev;
2287 dr->func[i].prog_intf = relations->func[i].prog_intf;
2288 dr->func[i].subclass = relations->func[i].subclass;
2289 dr->func[i].base_class = relations->func[i].base_class;
2290 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2291 dr->func[i].win_slot = relations->func[i].win_slot;
2292 dr->func[i].ser = relations->func[i].ser;
2293 dr->func[i].flags = relations->func[i].flags;
2294 dr->func[i].virtual_numa_node =
2295 relations->func[i].virtual_numa_node;
2298 if (hv_pci_start_relations_work(hbus, dr))
2303 * hv_eject_device_work() - Asynchronously handles ejection
2304 * @work: Work struct embedded in internal device struct
2306 * This function handles ejecting a device. Windows will
2307 * attempt to gracefully eject a device, waiting 60 seconds to
2308 * hear back from the guest OS that this completed successfully.
2309 * If this timer expires, the device will be forcibly removed.
2311 static void hv_eject_device_work(struct work_struct *work)
2313 struct pci_eject_response *ejct_pkt;
2314 struct hv_pcibus_device *hbus;
2315 struct hv_pci_dev *hpdev;
2316 struct pci_dev *pdev;
2317 unsigned long flags;
2320 struct pci_packet pkt;
2321 u8 buffer[sizeof(struct pci_eject_response)];
2324 hpdev = container_of(work, struct hv_pci_dev, wrk);
2327 WARN_ON(hpdev->state != hv_pcichild_ejecting);
2330 * Ejection can come before or after the PCI bus has been set up, so
2331 * attempt to find it and tear down the bus state, if it exists. This
2332 * must be done without constructs like pci_domain_nr(hbus->pci_bus)
2333 * because hbus->pci_bus may not exist yet.
2335 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2336 pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
2338 pci_lock_rescan_remove();
2339 pci_stop_and_remove_bus_device(pdev);
2341 pci_unlock_rescan_remove();
2344 spin_lock_irqsave(&hbus->device_list_lock, flags);
2345 list_del(&hpdev->list_entry);
2346 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2348 if (hpdev->pci_slot)
2349 pci_destroy_slot(hpdev->pci_slot);
2351 memset(&ctxt, 0, sizeof(ctxt));
2352 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2353 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2354 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2355 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2356 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
2357 VM_PKT_DATA_INBAND, 0);
2359 /* For the get_pcichild() in hv_pci_eject_device() */
2360 put_pcichild(hpdev);
2361 /* For the two refs got in new_pcichild_device() */
2362 put_pcichild(hpdev);
2363 put_pcichild(hpdev);
2364 /* hpdev has been freed. Do not use it any more. */
2370 * hv_pci_eject_device() - Handles device ejection
2371 * @hpdev: Internal device tracking struct
2373 * This function is invoked when an ejection packet arrives. It
2374 * just schedules work so that we don't re-enter the packet
2375 * delivery code handling the ejection.
2377 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2379 struct hv_pcibus_device *hbus = hpdev->hbus;
2380 struct hv_device *hdev = hbus->hdev;
2382 if (hbus->state == hv_pcibus_removing) {
2383 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2387 hpdev->state = hv_pcichild_ejecting;
2388 get_pcichild(hpdev);
2389 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2391 queue_work(hbus->wq, &hpdev->wrk);
2395 * hv_pci_onchannelcallback() - Handles incoming packets
2396 * @context: Internal bus tracking struct
2398 * This function is invoked whenever the host sends a packet to
2399 * this channel (which is private to this root PCI bus).
2401 static void hv_pci_onchannelcallback(void *context)
2403 const int packet_size = 0x100;
2405 struct hv_pcibus_device *hbus = context;
2408 struct vmpacket_descriptor *desc;
2409 unsigned char *buffer;
2410 int bufferlen = packet_size;
2411 struct pci_packet *comp_packet;
2412 struct pci_response *response;
2413 struct pci_incoming_message *new_message;
2414 struct pci_bus_relations *bus_rel;
2415 struct pci_bus_relations2 *bus_rel2;
2416 struct pci_dev_inval_block *inval;
2417 struct pci_dev_incoming *dev_message;
2418 struct hv_pci_dev *hpdev;
2420 buffer = kmalloc(bufferlen, GFP_ATOMIC);
2425 ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
2426 bufferlen, &bytes_recvd, &req_id);
2428 if (ret == -ENOBUFS) {
2430 /* Handle large packet */
2431 bufferlen = bytes_recvd;
2432 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2438 /* Zero length indicates there are no more packets. */
2439 if (ret || !bytes_recvd)
2443 * All incoming packets must be at least as large as a
2446 if (bytes_recvd <= sizeof(struct pci_response))
2448 desc = (struct vmpacket_descriptor *)buffer;
2450 switch (desc->type) {
2454 * The host is trusted, and thus it's safe to interpret
2455 * this transaction ID as a pointer.
2457 comp_packet = (struct pci_packet *)req_id;
2458 response = (struct pci_response *)buffer;
2459 comp_packet->completion_func(comp_packet->compl_ctxt,
2464 case VM_PKT_DATA_INBAND:
2466 new_message = (struct pci_incoming_message *)buffer;
2467 switch (new_message->message_type.type) {
2468 case PCI_BUS_RELATIONS:
2470 bus_rel = (struct pci_bus_relations *)buffer;
2472 offsetof(struct pci_bus_relations, func) +
2473 (sizeof(struct pci_function_description) *
2474 (bus_rel->device_count))) {
2475 dev_err(&hbus->hdev->device,
2476 "bus relations too small\n");
2480 hv_pci_devices_present(hbus, bus_rel);
2483 case PCI_BUS_RELATIONS2:
2485 bus_rel2 = (struct pci_bus_relations2 *)buffer;
2487 offsetof(struct pci_bus_relations2, func) +
2488 (sizeof(struct pci_function_description2) *
2489 (bus_rel2->device_count))) {
2490 dev_err(&hbus->hdev->device,
2491 "bus relations v2 too small\n");
2495 hv_pci_devices_present2(hbus, bus_rel2);
2500 dev_message = (struct pci_dev_incoming *)buffer;
2501 hpdev = get_pcichild_wslot(hbus,
2502 dev_message->wslot.slot);
2504 hv_pci_eject_device(hpdev);
2505 put_pcichild(hpdev);
2509 case PCI_INVALIDATE_BLOCK:
2511 inval = (struct pci_dev_inval_block *)buffer;
2512 hpdev = get_pcichild_wslot(hbus,
2515 if (hpdev->block_invalidate) {
2516 hpdev->block_invalidate(
2517 hpdev->invalidate_context,
2520 put_pcichild(hpdev);
2525 dev_warn(&hbus->hdev->device,
2526 "Unimplemented protocol message %x\n",
2527 new_message->message_type.type);
2533 dev_err(&hbus->hdev->device,
2534 "unhandled packet type %d, tid %llx len %d\n",
2535 desc->type, req_id, bytes_recvd);
2544 * hv_pci_protocol_negotiation() - Set up protocol
2545 * @hdev: VMBus's tracking struct for this root PCI bus
2547 * This driver is intended to support running on Windows 10
2548 * (server) and later versions. It will not run on earlier
2549 * versions, as they assume that many of the operations which
2550 * Linux needs accomplished with a spinlock held were done via
2551 * asynchronous messaging via VMBus. Windows 10 increases the
2552 * surface area of PCI emulation so that these actions can take
2553 * place by suspending a virtual processor for their duration.
2555 * This function negotiates the channel protocol version,
2556 * failing if the host doesn't support the necessary protocol
2559 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
2560 enum pci_protocol_version_t version[],
2563 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2564 struct pci_version_request *version_req;
2565 struct hv_pci_compl comp_pkt;
2566 struct pci_packet *pkt;
2571 * Initiate the handshake with the host and negotiate
2572 * a version that the host can support. We start with the
2573 * highest version number and go down if the host cannot
2576 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
2580 init_completion(&comp_pkt.host_event);
2581 pkt->completion_func = hv_pci_generic_compl;
2582 pkt->compl_ctxt = &comp_pkt;
2583 version_req = (struct pci_version_request *)&pkt->message;
2584 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
2586 for (i = 0; i < num_version; i++) {
2587 version_req->protocol_version = version[i];
2588 ret = vmbus_sendpacket(hdev->channel, version_req,
2589 sizeof(struct pci_version_request),
2590 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2591 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2593 ret = wait_for_response(hdev, &comp_pkt.host_event);
2596 dev_err(&hdev->device,
2597 "PCI Pass-through VSP failed to request version: %d",
2602 if (comp_pkt.completion_status >= 0) {
2603 hbus->protocol_version = version[i];
2604 dev_info(&hdev->device,
2605 "PCI VMBus probing: Using version %#x\n",
2606 hbus->protocol_version);
2610 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
2611 dev_err(&hdev->device,
2612 "PCI Pass-through VSP failed version request: %#x",
2613 comp_pkt.completion_status);
2618 reinit_completion(&comp_pkt.host_event);
2621 dev_err(&hdev->device,
2622 "PCI pass-through VSP failed to find supported version");
2631 * hv_pci_free_bridge_windows() - Release memory regions for the
2633 * @hbus: Root PCI bus, as understood by this driver
2635 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
2638 * Set the resources back to the way they looked when they
2639 * were allocated by setting IORESOURCE_BUSY again.
2642 if (hbus->low_mmio_space && hbus->low_mmio_res) {
2643 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
2644 vmbus_free_mmio(hbus->low_mmio_res->start,
2645 resource_size(hbus->low_mmio_res));
2648 if (hbus->high_mmio_space && hbus->high_mmio_res) {
2649 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
2650 vmbus_free_mmio(hbus->high_mmio_res->start,
2651 resource_size(hbus->high_mmio_res));
2656 * hv_pci_allocate_bridge_windows() - Allocate memory regions
2658 * @hbus: Root PCI bus, as understood by this driver
2660 * This function calls vmbus_allocate_mmio(), which is itself a
2661 * bit of a compromise. Ideally, we might change the pnp layer
2662 * in the kernel such that it comprehends either PCI devices
2663 * which are "grandchildren of ACPI," with some intermediate bus
2664 * node (in this case, VMBus) or change it such that it
2665 * understands VMBus. The pnp layer, however, has been declared
2666 * deprecated, and not subject to change.
2668 * The workaround, implemented here, is to ask VMBus to allocate
2669 * MMIO space for this bus. VMBus itself knows which ranges are
2670 * appropriate by looking at its own ACPI objects. Then, after
2671 * these ranges are claimed, they're modified to look like they
2672 * would have looked if the ACPI and pnp code had allocated
2673 * bridge windows. These descriptors have to exist in this form
2674 * in order to satisfy the code which will get invoked when the
2675 * endpoint PCI function driver calls request_mem_region() or
2676 * request_mem_region_exclusive().
2678 * Return: 0 on success, -errno on failure
2680 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
2682 resource_size_t align;
2685 if (hbus->low_mmio_space) {
2686 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2687 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
2688 (u64)(u32)0xffffffff,
2689 hbus->low_mmio_space,
2692 dev_err(&hbus->hdev->device,
2693 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2694 hbus->low_mmio_space);
2698 /* Modify this resource to become a bridge window. */
2699 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
2700 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
2701 pci_add_resource(&hbus->resources_for_children,
2702 hbus->low_mmio_res);
2705 if (hbus->high_mmio_space) {
2706 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
2707 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
2709 hbus->high_mmio_space, align,
2712 dev_err(&hbus->hdev->device,
2713 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2714 hbus->high_mmio_space);
2715 goto release_low_mmio;
2718 /* Modify this resource to become a bridge window. */
2719 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
2720 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
2721 pci_add_resource(&hbus->resources_for_children,
2722 hbus->high_mmio_res);
2728 if (hbus->low_mmio_res) {
2729 vmbus_free_mmio(hbus->low_mmio_res->start,
2730 resource_size(hbus->low_mmio_res));
2737 * hv_allocate_config_window() - Find MMIO space for PCI Config
2738 * @hbus: Root PCI bus, as understood by this driver
2740 * This function claims memory-mapped I/O space for accessing
2741 * configuration space for the functions on this bus.
2743 * Return: 0 on success, -errno on failure
2745 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
2750 * Set up a region of MMIO space to use for accessing configuration
2753 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
2754 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
2759 * vmbus_allocate_mmio() gets used for allocating both device endpoint
2760 * resource claims (those which cannot be overlapped) and the ranges
2761 * which are valid for the children of this bus, which are intended
2762 * to be overlapped by those children. Set the flag on this claim
2763 * meaning that this region can't be overlapped.
2766 hbus->mem_config->flags |= IORESOURCE_BUSY;
2771 static void hv_free_config_window(struct hv_pcibus_device *hbus)
2773 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
2777 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
2778 * @hdev: VMBus's tracking struct for this root PCI bus
2780 * Return: 0 on success, -errno on failure
2782 static int hv_pci_enter_d0(struct hv_device *hdev)
2784 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2785 struct pci_bus_d0_entry *d0_entry;
2786 struct hv_pci_compl comp_pkt;
2787 struct pci_packet *pkt;
2791 * Tell the host that the bus is ready to use, and moved into the
2792 * powered-on state. This includes telling the host which region
2793 * of memory-mapped I/O space has been chosen for configuration space
2796 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
2800 init_completion(&comp_pkt.host_event);
2801 pkt->completion_func = hv_pci_generic_compl;
2802 pkt->compl_ctxt = &comp_pkt;
2803 d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
2804 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
2805 d0_entry->mmio_base = hbus->mem_config->start;
2807 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
2808 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2809 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2811 ret = wait_for_response(hdev, &comp_pkt.host_event);
2816 if (comp_pkt.completion_status < 0) {
2817 dev_err(&hdev->device,
2818 "PCI Pass-through VSP failed D0 Entry with status %x\n",
2819 comp_pkt.completion_status);
2832 * hv_pci_query_relations() - Ask host to send list of child
2834 * @hdev: VMBus's tracking struct for this root PCI bus
2836 * Return: 0 on success, -errno on failure
2838 static int hv_pci_query_relations(struct hv_device *hdev)
2840 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2841 struct pci_message message;
2842 struct completion comp;
2845 /* Ask the host to send along the list of child devices */
2846 init_completion(&comp);
2847 if (cmpxchg(&hbus->survey_event, NULL, &comp))
2850 memset(&message, 0, sizeof(message));
2851 message.type = PCI_QUERY_BUS_RELATIONS;
2853 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
2854 0, VM_PKT_DATA_INBAND, 0);
2856 ret = wait_for_response(hdev, &comp);
2862 * hv_send_resources_allocated() - Report local resource choices
2863 * @hdev: VMBus's tracking struct for this root PCI bus
2865 * The host OS is expecting to be sent a request as a message
2866 * which contains all the resources that the device will use.
2867 * The response contains those same resources, "translated"
2868 * which is to say, the values which should be used by the
2869 * hardware, when it delivers an interrupt. (MMIO resources are
2870 * used in local terms.) This is nice for Windows, and lines up
2871 * with the FDO/PDO split, which doesn't exist in Linux. Linux
2872 * is deeply expecting to scan an emulated PCI configuration
2873 * space. So this message is sent here only to drive the state
2874 * machine on the host forward.
2876 * Return: 0 on success, -errno on failure
2878 static int hv_send_resources_allocated(struct hv_device *hdev)
2880 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2881 struct pci_resources_assigned *res_assigned;
2882 struct pci_resources_assigned2 *res_assigned2;
2883 struct hv_pci_compl comp_pkt;
2884 struct hv_pci_dev *hpdev;
2885 struct pci_packet *pkt;
2890 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
2891 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
2893 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
2899 for (wslot = 0; wslot < 256; wslot++) {
2900 hpdev = get_pcichild_wslot(hbus, wslot);
2904 memset(pkt, 0, sizeof(*pkt) + size_res);
2905 init_completion(&comp_pkt.host_event);
2906 pkt->completion_func = hv_pci_generic_compl;
2907 pkt->compl_ctxt = &comp_pkt;
2909 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
2911 (struct pci_resources_assigned *)&pkt->message;
2912 res_assigned->message_type.type =
2913 PCI_RESOURCES_ASSIGNED;
2914 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
2917 (struct pci_resources_assigned2 *)&pkt->message;
2918 res_assigned2->message_type.type =
2919 PCI_RESOURCES_ASSIGNED2;
2920 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
2922 put_pcichild(hpdev);
2924 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
2925 size_res, (unsigned long)pkt,
2927 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2929 ret = wait_for_response(hdev, &comp_pkt.host_event);
2933 if (comp_pkt.completion_status < 0) {
2935 dev_err(&hdev->device,
2936 "resource allocated returned 0x%x",
2937 comp_pkt.completion_status);
2947 * hv_send_resources_released() - Report local resources
2949 * @hdev: VMBus's tracking struct for this root PCI bus
2951 * Return: 0 on success, -errno on failure
2953 static int hv_send_resources_released(struct hv_device *hdev)
2955 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2956 struct pci_child_message pkt;
2957 struct hv_pci_dev *hpdev;
2961 for (wslot = 0; wslot < 256; wslot++) {
2962 hpdev = get_pcichild_wslot(hbus, wslot);
2966 memset(&pkt, 0, sizeof(pkt));
2967 pkt.message_type.type = PCI_RESOURCES_RELEASED;
2968 pkt.wslot.slot = hpdev->desc.win_slot.slot;
2970 put_pcichild(hpdev);
2972 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
2973 VM_PKT_DATA_INBAND, 0);
2981 static void get_hvpcibus(struct hv_pcibus_device *hbus)
2983 refcount_inc(&hbus->remove_lock);
2986 static void put_hvpcibus(struct hv_pcibus_device *hbus)
2988 if (refcount_dec_and_test(&hbus->remove_lock))
2989 complete(&hbus->remove_event);
2992 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
2993 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
2996 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
2997 * as invalid for passthrough PCI devices of this driver.
2999 #define HVPCI_DOM_INVALID 0
3002 * hv_get_dom_num() - Get a valid PCI domain number
3003 * Check if the PCI domain number is in use, and return another number if
3006 * @dom: Requested domain number
3008 * return: domain number on success, HVPCI_DOM_INVALID on failure
3010 static u16 hv_get_dom_num(u16 dom)
3014 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3017 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3018 if (test_and_set_bit(i, hvpci_dom_map) == 0)
3022 return HVPCI_DOM_INVALID;
3026 * hv_put_dom_num() - Mark the PCI domain number as free
3027 * @dom: Domain number to be freed
3029 static void hv_put_dom_num(u16 dom)
3031 clear_bit(dom, hvpci_dom_map);
3035 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3036 * @hdev: VMBus's tracking struct for this root PCI bus
3037 * @dev_id: Identifies the device itself
3039 * Return: 0 on success, -errno on failure
3041 static int hv_pci_probe(struct hv_device *hdev,
3042 const struct hv_vmbus_device_id *dev_id)
3044 struct hv_pcibus_device *hbus;
3050 * hv_pcibus_device contains the hypercall arguments for retargeting in
3051 * hv_irq_unmask(). Those must not cross a page boundary.
3053 BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
3056 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
3057 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
3058 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
3059 * alignment of hbus is important because hbus's field
3060 * retarget_msi_interrupt_params must not cross a 4KB page boundary.
3062 * Here we prefer kzalloc to get_zeroed_page(), because a buffer
3063 * allocated by the latter is not tracked and scanned by kmemleak, and
3064 * hence kmemleak reports the pointer contained in the hbus buffer
3065 * (i.e. the hpdev struct, which is created in new_pcichild_device() and
3066 * is tracked by hbus->children) as memory leak (false positive).
3068 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
3069 * used to allocate the hbus buffer and we can avoid the kmemleak false
3070 * positive by using kmemleak_alloc() and kmemleak_free() to ask
3071 * kmemleak to track and scan the hbus buffer.
3073 hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
3076 hbus->state = hv_pcibus_init;
3079 * The PCI bus "domain" is what is called "segment" in ACPI and other
3080 * specs. Pull it from the instance ID, to get something usually
3081 * unique. In rare cases of collision, we will find out another number
3084 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3085 * together with this guest driver can guarantee that (1) The only
3086 * domain used by Gen1 VMs for something that looks like a physical
3087 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3088 * (2) There will be no overlap between domains (after fixing possible
3089 * collisions) in the same VM.
3091 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3092 dom = hv_get_dom_num(dom_req);
3094 if (dom == HVPCI_DOM_INVALID) {
3095 dev_err(&hdev->device,
3096 "Unable to use dom# 0x%hx or other numbers", dom_req);
3102 dev_info(&hdev->device,
3103 "PCI dom# 0x%hx has collision, using 0x%hx",
3106 hbus->sysdata.domain = dom;
3109 refcount_set(&hbus->remove_lock, 1);
3110 INIT_LIST_HEAD(&hbus->children);
3111 INIT_LIST_HEAD(&hbus->dr_list);
3112 INIT_LIST_HEAD(&hbus->resources_for_children);
3113 spin_lock_init(&hbus->config_lock);
3114 spin_lock_init(&hbus->device_list_lock);
3115 spin_lock_init(&hbus->retarget_msi_interrupt_lock);
3116 init_completion(&hbus->remove_event);
3117 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3118 hbus->sysdata.domain);
3124 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3125 hv_pci_onchannelcallback, hbus);
3129 hv_set_drvdata(hdev, hbus);
3131 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3132 ARRAY_SIZE(pci_protocol_versions));
3136 ret = hv_allocate_config_window(hbus);
3140 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3141 PCI_CONFIG_MMIO_LENGTH);
3142 if (!hbus->cfg_addr) {
3143 dev_err(&hdev->device,
3144 "Unable to map a virtual address for config space\n");
3149 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3155 hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
3157 if (!hbus->sysdata.fwnode) {
3162 ret = hv_pcie_init_irq_domain(hbus);
3166 ret = hv_pci_query_relations(hdev);
3168 goto free_irq_domain;
3170 ret = hv_pci_enter_d0(hdev);
3172 goto free_irq_domain;
3174 ret = hv_pci_allocate_bridge_windows(hbus);
3176 goto free_irq_domain;
3178 ret = hv_send_resources_allocated(hdev);
3182 prepopulate_bars(hbus);
3184 hbus->state = hv_pcibus_probed;
3186 ret = create_root_hv_pci_bus(hbus);
3193 hv_pci_free_bridge_windows(hbus);
3195 irq_domain_remove(hbus->irq_domain);
3197 irq_domain_free_fwnode(hbus->sysdata.fwnode);
3199 iounmap(hbus->cfg_addr);
3201 hv_free_config_window(hbus);
3203 vmbus_close(hdev->channel);
3205 destroy_workqueue(hbus->wq);
3207 hv_put_dom_num(hbus->sysdata.domain);
3213 static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
3215 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3217 struct pci_packet teardown_packet;
3218 u8 buffer[sizeof(struct pci_message)];
3220 struct hv_dr_state *dr;
3221 struct hv_pci_compl comp_pkt;
3225 * After the host sends the RESCIND_CHANNEL message, it doesn't
3226 * access the per-channel ringbuffer any longer.
3228 if (hdev->channel->rescind)
3232 /* Delete any children which might still exist. */
3233 dr = kzalloc(sizeof(*dr), GFP_KERNEL);
3234 if (dr && hv_pci_start_relations_work(hbus, dr))
3238 ret = hv_send_resources_released(hdev);
3240 dev_err(&hdev->device,
3241 "Couldn't send resources released packet(s)\n");
3245 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3246 init_completion(&comp_pkt.host_event);
3247 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3248 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3249 pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3251 ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
3252 sizeof(struct pci_message),
3253 (unsigned long)&pkt.teardown_packet,
3255 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3259 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
3266 * hv_pci_remove() - Remove routine for this VMBus channel
3267 * @hdev: VMBus's tracking struct for this root PCI bus
3269 * Return: 0 on success, -errno on failure
3271 static int hv_pci_remove(struct hv_device *hdev)
3273 struct hv_pcibus_device *hbus;
3276 hbus = hv_get_drvdata(hdev);
3277 if (hbus->state == hv_pcibus_installed) {
3278 /* Remove the bus from PCI's point of view. */
3279 pci_lock_rescan_remove();
3280 pci_stop_root_bus(hbus->pci_bus);
3281 hv_pci_remove_slots(hbus);
3282 pci_remove_root_bus(hbus->pci_bus);
3283 pci_unlock_rescan_remove();
3284 hbus->state = hv_pcibus_removed;
3287 ret = hv_pci_bus_exit(hdev, false);
3289 vmbus_close(hdev->channel);
3291 iounmap(hbus->cfg_addr);
3292 hv_free_config_window(hbus);
3293 pci_free_resource_list(&hbus->resources_for_children);
3294 hv_pci_free_bridge_windows(hbus);
3295 irq_domain_remove(hbus->irq_domain);
3296 irq_domain_free_fwnode(hbus->sysdata.fwnode);
3298 wait_for_completion(&hbus->remove_event);
3299 destroy_workqueue(hbus->wq);
3301 hv_put_dom_num(hbus->sysdata.domain);
3307 static int hv_pci_suspend(struct hv_device *hdev)
3309 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3310 enum hv_pcibus_state old_state;
3314 * hv_pci_suspend() must make sure there are no pending work items
3315 * before calling vmbus_close(), since it runs in a process context
3316 * as a callback in dpm_suspend(). When it starts to run, the channel
3317 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3318 * context, can be still running concurrently and scheduling new work
3319 * items onto hbus->wq in hv_pci_devices_present() and
3320 * hv_pci_eject_device(), and the work item handlers can access the
3321 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3322 * the work item handler pci_devices_present_work() ->
3323 * new_pcichild_device() writes to the vmbus channel.
3325 * To eliminate the race, hv_pci_suspend() disables the channel
3326 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3327 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3328 * it knows that no new work item can be scheduled, and then it flushes
3329 * hbus->wq and safely closes the vmbus channel.
3331 tasklet_disable(&hdev->channel->callback_event);
3333 /* Change the hbus state to prevent new work items. */
3334 old_state = hbus->state;
3335 if (hbus->state == hv_pcibus_installed)
3336 hbus->state = hv_pcibus_removing;
3338 tasklet_enable(&hdev->channel->callback_event);
3340 if (old_state != hv_pcibus_installed)
3343 flush_workqueue(hbus->wq);
3345 ret = hv_pci_bus_exit(hdev, true);
3349 vmbus_close(hdev->channel);
3354 static int hv_pci_resume(struct hv_device *hdev)
3356 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3357 enum pci_protocol_version_t version[1];
3360 hbus->state = hv_pcibus_init;
3362 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3363 hv_pci_onchannelcallback, hbus);
3367 /* Only use the version that was in use before hibernation. */
3368 version[0] = hbus->protocol_version;
3369 ret = hv_pci_protocol_negotiation(hdev, version, 1);
3373 ret = hv_pci_query_relations(hdev);
3377 ret = hv_pci_enter_d0(hdev);
3381 ret = hv_send_resources_allocated(hdev);
3385 prepopulate_bars(hbus);
3387 hbus->state = hv_pcibus_installed;
3390 vmbus_close(hdev->channel);
3394 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
3395 /* PCI Pass-through Class ID */
3396 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
3401 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
3403 static struct hv_driver hv_pci_drv = {
3405 .id_table = hv_pci_id_table,
3406 .probe = hv_pci_probe,
3407 .remove = hv_pci_remove,
3408 .suspend = hv_pci_suspend,
3409 .resume = hv_pci_resume,
3412 static void __exit exit_hv_pci_drv(void)
3414 vmbus_driver_unregister(&hv_pci_drv);
3416 hvpci_block_ops.read_block = NULL;
3417 hvpci_block_ops.write_block = NULL;
3418 hvpci_block_ops.reg_blk_invalidate = NULL;
3421 static int __init init_hv_pci_drv(void)
3423 /* Set the invalid domain number's bit, so it will not be used */
3424 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
3426 /* Initialize PCI block r/w interface */
3427 hvpci_block_ops.read_block = hv_read_config_block;
3428 hvpci_block_ops.write_block = hv_write_config_block;
3429 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
3431 return vmbus_driver_register(&hv_pci_drv);
3434 module_init(init_hv_pci_drv);
3435 module_exit(exit_hv_pci_drv);
3437 MODULE_DESCRIPTION("Hyper-V PCI");
3438 MODULE_LICENSE("GPL v2");