1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/acpi.h>
7 #include <linux/bitfield.h>
8 #include <linux/module.h>
9 #include <linux/pm_runtime.h>
10 #include <net/rtnetlink.h>
12 #include "iosm_ipc_imem.h"
13 #include "iosm_ipc_pcie.h"
14 #include "iosm_ipc_protocol.h"
16 MODULE_DESCRIPTION("IOSM Driver");
17 MODULE_LICENSE("GPL v2");
20 static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
21 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
23 static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
25 /* Free the MSI resources. */
26 ipc_release_irq(ipc_pcie);
28 /* Free mapped doorbell scratchpad bus memory into CPU space. */
29 iounmap(ipc_pcie->scratchpad);
31 /* Free mapped IPC_REGS bus memory into CPU space. */
32 iounmap(ipc_pcie->ipc_regs);
34 /* Releases all PCI I/O and memory resources previously reserved by a
35 * successful call to pci_request_regions. Call this function only
36 * after all use of the PCI regions has ceased.
38 pci_release_regions(ipc_pcie->pci);
41 static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
43 /* Free the shared memory resources. */
44 ipc_imem_cleanup(ipc_pcie->imem);
46 ipc_pcie_resources_release(ipc_pcie);
48 /* Signal to the system that the PCI device is not in use. */
49 pci_disable_device(ipc_pcie->pci);
52 static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
54 kfree(ipc_pcie->imem);
58 static void ipc_pcie_remove(struct pci_dev *pci)
60 struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
62 ipc_pcie_cleanup(ipc_pcie);
64 ipc_pcie_deinit(ipc_pcie);
67 static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
69 struct pci_dev *pci = ipc_pcie->pci;
73 /* Reserved PCI I/O and memory resources.
74 * Mark all PCI regions associated with PCI device pci as
75 * being reserved by owner IOSM_IPC.
77 ret = pci_request_regions(pci, "IOSM_IPC");
79 dev_err(ipc_pcie->dev, "failed pci request regions");
80 goto pci_request_region_fail;
83 /* Reserve the doorbell IPC REGS memory resources.
84 * Remap the memory into CPU space. Arrange for the physical address
85 * (BAR) to be visible from this driver.
86 * pci_ioremap_bar() ensures that the memory is marked uncachable.
88 ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
90 if (!ipc_pcie->ipc_regs) {
91 dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
93 goto ipc_regs_remap_fail;
96 /* Reserve the MMIO scratchpad memory resources.
97 * Remap the memory into CPU space. Arrange for the physical address
98 * (BAR) to be visible from this driver.
99 * pci_ioremap_bar() ensures that the memory is marked uncachable.
101 ipc_pcie->scratchpad =
102 pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
104 if (!ipc_pcie->scratchpad) {
105 dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
107 goto scratch_remap_fail;
110 /* Install the irq handler triggered by CP. */
111 ret = ipc_acquire_irq(ipc_pcie);
113 dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
114 goto irq_acquire_fail;
117 /* Enable bus-mastering for the IOSM IPC device. */
120 /* Enable LTR if possible
121 * This is needed for L1.2!
123 pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
124 if (cap & PCI_EXP_DEVCAP2_LTR)
125 pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
126 PCI_EXP_DEVCTL2_LTR_EN);
128 dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
133 iounmap(ipc_pcie->scratchpad);
135 iounmap(ipc_pcie->ipc_regs);
137 pci_release_regions(pci);
138 pci_request_region_fail:
142 bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
145 struct pci_dev *pdev;
150 pdev = ipc_pcie->pci->bus->self;
152 pdev = ipc_pcie->pci;
154 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
155 enabled = value & PCI_EXP_LNKCTL_ASPMC;
156 dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
158 return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
159 enabled == PCI_EXP_LNKCTL_ASPMC);
162 bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
164 struct pci_dev *parent;
167 if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
168 dev_err(ipc_pcie->dev, "root port not found");
172 parent = ipc_pcie->pci->bus->self;
174 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
175 dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
177 return link_status & PCI_EXP_LNKSTA_DLLLA;
180 static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
183 struct pci_dev *pdev;
188 pdev = ipc_pcie->pci->bus->self;
190 pdev = ipc_pcie->pci;
191 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
192 support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
193 if (support < PCI_EXP_LNKCTL_ASPM_L1) {
194 dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
201 void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
203 bool parent_aspm_enabled, dev_aspm_enabled;
205 /* check if both root port and child supports ASPM L1 */
206 if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
207 !ipc_pcie_check_aspm_supported(ipc_pcie, false))
210 parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
211 dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
213 dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
214 parent_aspm_enabled ? "Enabled" : "Disabled",
215 dev_aspm_enabled ? "Enabled" : "Disabled");
218 /* Initializes PCIe endpoint configuration */
219 static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
221 /* BAR0 is used for doorbell */
222 ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
224 /* update HW configuration */
225 ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
226 ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
227 ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
228 ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
231 /* This will read the BIOS WWAN RTD3 settings:
232 * D0L1.2/D3L2/Disabled
234 static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
236 enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
237 union acpi_object *object;
238 acpi_handle handle_acpi;
240 handle_acpi = ACPI_HANDLE(dev);
242 pr_debug("pci device is NOT ACPI supporting device\n");
246 object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
250 if (object->integer.value == 3)
251 sleep_state = IPC_PCIE_D3L2;
259 static int ipc_pcie_probe(struct pci_dev *pci,
260 const struct pci_device_id *pci_id)
262 struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
265 pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
271 /* Initialize ipc dbg component for the PCIe device */
272 ipc_pcie->dev = &pci->dev;
274 /* Set the driver specific data. */
275 pci_set_drvdata(pci, ipc_pcie);
277 /* Save the address of the PCI device configuration. */
280 /* Update platform configuration */
281 ipc_pcie_config_init(ipc_pcie);
283 /* Initialize the device before it is used. Ask low-level code
284 * to enable I/O and memory. Wake up the device if it was suspended.
286 if (pci_enable_device(pci)) {
287 dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
288 /* If enable of PCIe device has failed then calling
289 * ipc_pcie_cleanup will panic the system. More over
290 * ipc_pcie_cleanup() is required to be called after
293 goto pci_enable_fail;
296 ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
298 dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
302 ipc_pcie_config_aspm(ipc_pcie);
303 dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
305 /* Read WWAN RTD3 BIOS Setting
307 ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
309 ipc_pcie->suspend = 0;
311 if (ipc_pcie_resources_request(ipc_pcie))
312 goto resources_req_fail;
314 /* Establish the link to the imem layer. */
315 ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
316 ipc_pcie->scratchpad, ipc_pcie->dev);
317 if (!ipc_pcie->imem) {
318 dev_err(ipc_pcie->dev, "failed to init imem");
325 ipc_pcie_resources_release(ipc_pcie);
328 pci_disable_device(pci);
335 static const struct pci_device_id iosm_ipc_ids[] = {
336 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
337 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
340 MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
342 /* Enter sleep in s2idle case
344 static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
346 ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
348 /* Complete all memory stores before setting bit */
349 smp_mb__before_atomic();
351 set_bit(0, &ipc_pcie->suspend);
353 /* Complete all memory stores after setting bit */
354 smp_mb__after_atomic();
356 ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
361 /* Resume from sleep in s2idle case
363 static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
365 ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
367 ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
369 /* Complete all memory stores before clearing bit. */
370 smp_mb__before_atomic();
372 clear_bit(0, &ipc_pcie->suspend);
374 /* Complete all memory stores after clearing bit. */
375 smp_mb__after_atomic();
379 int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
381 /* The HAL shall ask the shared memory layer whether D3 is allowed. */
382 ipc_imem_pm_suspend(ipc_pcie->imem);
384 dev_dbg(ipc_pcie->dev, "SUSPEND done");
388 int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
390 /* The HAL shall inform the shared memory layer that the device is
393 ipc_imem_pm_resume(ipc_pcie->imem);
395 dev_dbg(ipc_pcie->dev, "RESUME done");
399 static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
401 struct iosm_pcie *ipc_pcie;
402 struct pci_dev *pdev;
404 pdev = to_pci_dev(dev);
406 ipc_pcie = pci_get_drvdata(pdev);
408 switch (ipc_pcie->d3l2_support) {
410 ipc_pcie_suspend_s2idle(ipc_pcie);
413 ipc_pcie_suspend(ipc_pcie);
420 static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
422 struct iosm_pcie *ipc_pcie;
423 struct pci_dev *pdev;
425 pdev = to_pci_dev(dev);
427 ipc_pcie = pci_get_drvdata(pdev);
429 switch (ipc_pcie->d3l2_support) {
431 ipc_pcie_resume_s2idle(ipc_pcie);
434 ipc_pcie_resume(ipc_pcie);
441 static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
442 ipc_pcie_resume_cb, NULL);
444 static struct pci_driver iosm_ipc_driver = {
445 .name = KBUILD_MODNAME,
446 .probe = ipc_pcie_probe,
447 .remove = ipc_pcie_remove,
451 .id_table = iosm_ipc_ids,
453 module_pci_driver(iosm_ipc_driver);
455 int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
456 size_t size, dma_addr_t *mapping, int direction)
459 *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
461 if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
462 dev_err(ipc_pcie->dev, "dma mapping failed");
469 void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
470 dma_addr_t mapping, int direction)
475 dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
478 struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
479 gfp_t flags, size_t size)
483 if (!ipc_pcie || !size) {
484 pr_err("invalid pcie object or size");
488 skb = __netdev_alloc_skb(NULL, size, flags);
492 IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
493 IPC_CB(skb)->mapping = 0;
498 struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
499 gfp_t flags, dma_addr_t *mapping,
500 int direction, size_t headroom)
502 struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
508 skb_reserve(skb, headroom);
510 if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
515 BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
517 /* Store the mapping address in skb scratch pad for later usage */
518 IPC_CB(skb)->mapping = *mapping;
519 IPC_CB(skb)->direction = direction;
520 IPC_CB(skb)->len = size;
525 void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
530 ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
531 IPC_CB(skb)->direction);
532 IPC_CB(skb)->mapping = 0;