1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/moduleparam.h>
5 #include <linux/module.h>
6 #include <linux/delay.h>
7 #include <linux/sizes.h>
8 #include <linux/mutex.h>
9 #include <linux/list.h>
10 #include <linux/pci.h>
11 #include <linux/pci-doe.h>
12 #include <linux/aer.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/cxl.h>
23 * This implements the PCI exclusive functionality for a CXL device as it is
24 * defined by the Compute Express Link specification. CXL devices may surface
25 * certain functionality even if it isn't CXL enabled. While this driver is
26 * focused around the PCI specific aspects of a CXL device, it binds to the
27 * specific CXL memory device class code, and therefore the implementation of
28 * cxl_pci is focused around CXL memory devices.
30 * The driver has several responsibilities, mainly:
31 * - Create the memX device and register on the CXL bus.
32 * - Enumerate device's register interface and map them.
33 * - Registers nvdimm bridge device with cxl_core.
34 * - Registers a CXL mailbox with cxl_core.
37 #define cxl_doorbell_busy(cxlds) \
38 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
39 CXLDEV_MBOX_CTRL_DOORBELL)
41 /* CXL 2.0 - 8.2.8.4 */
42 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
45 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
46 * dictate how long to wait for the mailbox to become ready. The new
47 * field allows the device to tell software the amount of time to wait
48 * before mailbox ready. This field per the spec theoretically allows
49 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
50 * than the maximum SATA port link recovery wait. Default to 60 seconds
51 * until someone builds a CXL device that needs more time in practice.
53 static unsigned short mbox_ready_timeout = 60;
54 module_param(mbox_ready_timeout, ushort, 0644);
55 MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
57 static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
59 const unsigned long start = jiffies;
60 unsigned long end = start;
62 while (cxl_doorbell_busy(cxlds)) {
65 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
66 /* Check again in case preempted before timeout test */
67 if (!cxl_doorbell_busy(cxlds))
74 dev_dbg(cxlds->dev, "Doorbell wait took %dms",
75 jiffies_to_msecs(end) - jiffies_to_msecs(start));
79 #define cxl_err(dev, status, msg) \
80 dev_err_ratelimited(dev, msg ", device state %s%s\n", \
81 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
82 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
84 #define cxl_cmd_err(dev, cmd, status, msg) \
85 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \
87 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
88 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
91 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
92 * @cxlds: The device state to communicate with.
93 * @mbox_cmd: Command to send to the memory device.
95 * Context: Any context. Expects mbox_mutex to be held.
96 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
97 * Caller should check the return code in @mbox_cmd to make sure it
100 * This is a generic form of the CXL mailbox send command thus only using the
101 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
102 * devices, and perhaps other types of CXL devices may have further information
103 * available upon error conditions. Driver facilities wishing to send mailbox
104 * commands should use the wrapper command.
106 * The CXL spec allows for up to two mailboxes. The intention is for the primary
107 * mailbox to be OS controlled and the secondary mailbox to be used by system
108 * firmware. This allows the OS and firmware to communicate with the device and
109 * not need to coordinate with each other. The driver only uses the primary
112 static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
113 struct cxl_mbox_cmd *mbox_cmd)
115 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
116 struct device *dev = cxlds->dev;
117 u64 cmd_reg, status_reg;
121 lockdep_assert_held(&cxlds->mbox_mutex);
124 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
125 * 1. Caller reads MB Control Register to verify doorbell is clear
126 * 2. Caller writes Command Register
127 * 3. Caller writes Command Payload Registers if input payload is non-empty
128 * 4. Caller writes MB Control Register to set doorbell
129 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
130 * 6. Caller reads MB Status Register to fetch Return code
131 * 7. If command successful, Caller reads Command Register to get Payload Length
132 * 8. If output payload is non-empty, host reads Command Payload Registers
134 * Hardware is free to do whatever it wants before the doorbell is rung,
135 * and isn't allowed to change anything after it clears the doorbell. As
136 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
137 * also happen in any order (though some orders might not make sense).
141 if (cxl_doorbell_busy(cxlds)) {
143 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
145 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
146 "mailbox queue busy");
150 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
152 if (mbox_cmd->size_in) {
153 if (WARN_ON(!mbox_cmd->payload_in))
156 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
158 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
162 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
165 dev_dbg(dev, "Sending command\n");
166 writel(CXLDEV_MBOX_CTRL_DOORBELL,
167 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
170 rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
171 if (rc == -ETIMEDOUT) {
172 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
174 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
179 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
180 mbox_cmd->return_code =
181 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
183 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
184 dev_dbg(dev, "Mailbox operation had an error: %s\n",
185 cxl_mbox_cmd_rc2str(mbox_cmd));
186 return 0; /* completed but caller must check return_code */
190 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
191 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
194 if (out_len && mbox_cmd->payload_out) {
196 * Sanitize the copy. If hardware misbehaves, out_len per the
197 * spec can actually be greater than the max allowed size (21
198 * bits available but spec defined 1M max). The caller also may
199 * have requested less data than the hardware supplied even
202 size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
204 memcpy_fromio(mbox_cmd->payload_out, payload, n);
205 mbox_cmd->size_out = n;
207 mbox_cmd->size_out = 0;
213 static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
217 mutex_lock_io(&cxlds->mbox_mutex);
218 rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
219 mutex_unlock(&cxlds->mbox_mutex);
224 static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
226 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
227 unsigned long timeout;
230 timeout = jiffies + mbox_ready_timeout * HZ;
232 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
233 if (md_status & CXLMDEV_MBOX_IF_READY)
235 if (msleep_interruptible(100))
237 } while (!time_after(jiffies, timeout));
239 if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
240 cxl_err(cxlds->dev, md_status,
241 "timeout awaiting mailbox ready");
246 * A command may be in flight from a previous driver instance,
247 * think kexec, do one doorbell wait so that
248 * __cxl_pci_mbox_send_cmd() can assume that it is the only
249 * source for future doorbell busy events.
251 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
252 cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
256 cxlds->mbox_send = cxl_pci_mbox_send;
257 cxlds->payload_size =
258 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
261 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
263 * If the size is too small, mandatory commands will not work and so
264 * there's no point in going forward. If the size is too large, there's
265 * no harm is soft limiting it.
267 cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
268 if (cxlds->payload_size < 256) {
269 dev_err(cxlds->dev, "Mailbox is too small (%zub)",
270 cxlds->payload_size);
274 dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
275 cxlds->payload_size);
280 static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
282 struct device *dev = &pdev->dev;
284 map->base = ioremap(map->resource, map->max_size);
286 dev_err(dev, "failed to map registers\n");
290 dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
294 static void cxl_unmap_regblock(struct pci_dev *pdev,
295 struct cxl_register_map *map)
301 static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
303 struct cxl_component_reg_map *comp_map;
304 struct cxl_device_reg_map *dev_map;
305 struct device *dev = &pdev->dev;
306 void __iomem *base = map->base;
308 switch (map->reg_type) {
309 case CXL_REGLOC_RBI_COMPONENT:
310 comp_map = &map->component_map;
311 cxl_probe_component_regs(dev, base, comp_map);
312 if (!comp_map->hdm_decoder.valid) {
313 dev_err(dev, "HDM decoder registers not found\n");
317 if (!comp_map->ras.valid)
318 dev_dbg(dev, "RAS registers not found\n");
320 dev_dbg(dev, "Set up component registers\n");
322 case CXL_REGLOC_RBI_MEMDEV:
323 dev_map = &map->device_map;
324 cxl_probe_device_regs(dev, base, dev_map);
325 if (!dev_map->status.valid || !dev_map->mbox.valid ||
326 !dev_map->memdev.valid) {
327 dev_err(dev, "registers not found: %s%s%s\n",
328 !dev_map->status.valid ? "status " : "",
329 !dev_map->mbox.valid ? "mbox " : "",
330 !dev_map->memdev.valid ? "memdev " : "");
334 dev_dbg(dev, "Probing device registers...\n");
343 static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
344 struct cxl_register_map *map)
348 rc = cxl_find_regblock(pdev, type, map);
352 rc = cxl_map_regblock(pdev, map);
356 rc = cxl_probe_regs(pdev, map);
357 cxl_unmap_regblock(pdev, map);
362 static void cxl_pci_destroy_doe(void *mbs)
367 static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
369 struct device *dev = cxlds->dev;
370 struct pci_dev *pdev = to_pci_dev(dev);
373 xa_init(&cxlds->doe_mbs);
374 if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
375 dev_err(dev, "Failed to create XArray for DOE's\n");
380 * Mailbox creation is best effort. Higher layers must determine if
381 * the lack of a mailbox for their protocol is a device failure or not.
383 pci_doe_for_each_off(pdev, off) {
384 struct pci_doe_mb *doe_mb;
386 doe_mb = pcim_doe_create_mb(pdev, off);
387 if (IS_ERR(doe_mb)) {
388 dev_err(dev, "Failed to create MB object for MB @ %x\n",
393 if (!pci_request_config_region_exclusive(pdev, off,
396 pci_err(pdev, "Failed to exclude DOE registers\n");
398 if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
399 dev_err(dev, "xa_insert failed to insert MB @ %x\n",
404 dev_dbg(dev, "Created DOE mailbox @%x\n", off);
409 * Assume that any RCIEP that emits the CXL memory expander class code
412 static bool is_cxl_restricted(struct pci_dev *pdev)
414 return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
417 static void disable_aer(void *pdev)
419 pci_disable_pcie_error_reporting(pdev);
422 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
424 struct cxl_register_map map;
425 struct cxl_memdev *cxlmd;
426 struct cxl_dev_state *cxlds;
430 * Double check the anonymous union trickery in struct cxl_regs
431 * FIXME switch to struct_group()
433 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
434 offsetof(struct cxl_regs, device_regs.memdev));
436 rc = pcim_enable_device(pdev);
440 cxlds = cxl_dev_state_create(&pdev->dev);
442 return PTR_ERR(cxlds);
443 pci_set_drvdata(pdev, cxlds);
445 cxlds->rcd = is_cxl_restricted(pdev);
446 cxlds->serial = pci_get_dsn(pdev);
447 cxlds->cxl_dvsec = pci_find_dvsec_capability(
448 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
449 if (!cxlds->cxl_dvsec)
451 "Device DVSEC not present, skip CXL.mem init\n");
453 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
457 rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
462 * If the component registers can't be found, the cxl_pci driver may
463 * still be useful for management functions so don't return an error.
465 cxlds->component_reg_phys = CXL_RESOURCE_NONE;
466 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
468 dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
470 cxlds->component_reg_phys = map.resource;
472 devm_cxl_pci_create_doe(cxlds);
474 rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
475 &map, BIT(CXL_CM_CAP_CAP_ID_RAS));
477 dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
479 rc = cxl_pci_setup_mailbox(cxlds);
483 rc = cxl_enumerate_cmds(cxlds);
487 rc = cxl_dev_state_identify(cxlds);
491 rc = cxl_mem_create_range_info(cxlds);
495 cxlmd = devm_cxl_add_memdev(cxlds);
497 return PTR_ERR(cxlmd);
499 if (cxlds->regs.ras) {
500 pci_enable_pcie_error_reporting(pdev);
501 rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
505 pci_save_state(pdev);
510 static const struct pci_device_id cxl_mem_pci_tbl[] = {
511 /* PCI class code for CXL.mem Type-3 Devices */
512 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
513 { /* terminate list */ },
515 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
517 /* CXL spec rev3.0 8.2.4.16.1 */
518 static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
522 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
524 addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
527 for (i = 0; i < log_u32_size; i++) {
528 *log_addr = readl(addr);
535 * Log the state of the RAS status registers and prepare them to log the
536 * next error status. Return 1 if reset needed.
538 static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
540 struct cxl_memdev *cxlmd = cxlds->cxlmd;
541 struct device *dev = &cxlmd->dev;
542 u32 hl[CXL_HEADERLOG_SIZE_U32];
547 if (!cxlds->regs.ras)
550 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
551 status = readl(addr);
552 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
555 /* If multiple errors, log header points to first error from ctrl reg */
556 if (hweight32(status) > 1) {
557 void __iomem *rcc_addr =
558 cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
560 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
566 header_log_copy(cxlds, hl);
567 trace_cxl_aer_uncorrectable_error(dev, status, fe, hl);
568 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
573 static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
574 pci_channel_state_t state)
576 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
577 struct cxl_memdev *cxlmd = cxlds->cxlmd;
578 struct device *dev = &cxlmd->dev;
582 * A frozen channel indicates an impending reset which is fatal to
583 * CXL.mem operation, and will likely crash the system. On the off
584 * chance the situation is recoverable dump the status of the RAS
585 * capability registers and bounce the active state of the memdev.
587 ue = cxl_report_and_clear(cxlds);
590 case pci_channel_io_normal:
592 device_release_driver(dev);
593 return PCI_ERS_RESULT_NEED_RESET;
595 return PCI_ERS_RESULT_CAN_RECOVER;
596 case pci_channel_io_frozen:
598 "%s: frozen state error detected, disable CXL.mem\n",
600 device_release_driver(dev);
601 return PCI_ERS_RESULT_NEED_RESET;
602 case pci_channel_io_perm_failure:
604 "failure state error detected, request disconnect\n");
605 return PCI_ERS_RESULT_DISCONNECT;
607 return PCI_ERS_RESULT_NEED_RESET;
610 static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
612 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
613 struct cxl_memdev *cxlmd = cxlds->cxlmd;
614 struct device *dev = &cxlmd->dev;
616 dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
618 pci_restore_state(pdev);
619 if (device_attach(dev) <= 0)
620 return PCI_ERS_RESULT_DISCONNECT;
621 return PCI_ERS_RESULT_RECOVERED;
624 static void cxl_error_resume(struct pci_dev *pdev)
626 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
627 struct cxl_memdev *cxlmd = cxlds->cxlmd;
628 struct device *dev = &cxlmd->dev;
630 dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
631 dev->driver ? "successful" : "failed");
634 static void cxl_cor_error_detected(struct pci_dev *pdev)
636 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
637 struct cxl_memdev *cxlmd = cxlds->cxlmd;
638 struct device *dev = &cxlmd->dev;
642 if (!cxlds->regs.ras)
645 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
646 status = readl(addr);
647 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
648 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
649 trace_cxl_aer_correctable_error(dev, status);
653 static const struct pci_error_handlers cxl_error_handlers = {
654 .error_detected = cxl_error_detected,
655 .slot_reset = cxl_slot_reset,
656 .resume = cxl_error_resume,
657 .cor_error_detected = cxl_cor_error_detected,
660 static struct pci_driver cxl_pci_driver = {
661 .name = KBUILD_MODNAME,
662 .id_table = cxl_mem_pci_tbl,
663 .probe = cxl_pci_probe,
664 .err_handler = &cxl_error_handlers,
666 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
670 MODULE_LICENSE("GPL v2");
671 module_pci_driver(cxl_pci_driver);
672 MODULE_IMPORT_NS(CXL);