return xa_load(&port->regions, (unsigned long)cxlr);
}
+ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
+ {
+ if (!cpu_cache_has_invalidate_memregion()) {
+ if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
+ dev_warn_once(
+ &cxlr->dev,
+ "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
+ return 0;
+ } else {
+ dev_err(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
+ }
+ }
+
+ cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ return 0;
+ }
+
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i;
+ int i, rc = 0;
+
+ /*
+ * Before region teardown attempt to flush, and if the flush
+ * fails cancel the region teardown for data consistency
+ * concerns
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
- int rc = 0;
if (cxlds->rcd)
goto endpoint_reset;
rc = cxld->reset(cxld);
if (rc)
return rc;
+ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
+ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
+ /* all decoders associated with this region have been torn down */
+ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+
return 0;
}
goto out;
}
- if (commit)
+ /*
+ * Invalidate caches before region setup to drop any speculative
+ * consumption of this address space
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
+
+ if (commit) {
rc = cxl_region_decode_commit(cxlr);
- else {
+ if (rc == 0)
+ p->state = CXL_CONFIG_COMMIT;
+ } else {
p->state = CXL_CONFIG_RESET_PENDING;
up_write(&cxl_region_rwsem);
device_release_driver(&cxlr->dev);
* The lock was dropped, so need to revalidate that the reset is
* still pending.
*/
- if (p->state == CXL_CONFIG_RESET_PENDING)
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ /*
+ * Revert to committed since there may still be active
+ * decoders associated with this region, or move forward
+ * to active to mark the reset successful
+ */
+ if (rc)
+ p->state = CXL_CONFIG_COMMIT;
+ else
+ p->state = CXL_CONFIG_ACTIVE;
+ }
}
- if (rc)
- goto out;
-
- if (commit)
- p->state = CXL_CONFIG_COMMIT;
- else if (p->state == CXL_CONFIG_RESET_PENDING)
- p->state = CXL_CONFIG_ACTIVE;
-
out:
up_write(&cxl_region_rwsem);
return -EBUSY;
}
+ /*
+ * Endpoints should already match the region type, but backstop that
+ * assumption with an assertion. Switch-decoders change mapping-type
+ * based on what is mapped when they are assigned to a region.
+ */
+ dev_WARN_ONCE(&cxlr->dev,
+ port == cxled_to_port(cxled) &&
+ cxld->target_type != cxlr->type,
+ "%s:%s mismatch decoder type %d -> %d\n",
+ dev_name(&cxled_to_memdev(cxled)->dev),
+ dev_name(&cxld->dev), cxld->target_type, cxlr->type);
+ cxld->target_type = cxlr->type;
cxl_rr->decoder = cxld;
return 0;
}
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
return ERR_PTR(-EBUSY);
}
- return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_EXPANDER);
+ return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
static ssize_t create_pmem_region_store(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
- static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
- {
- if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
- return 0;
-
- if (!cpu_cache_has_invalidate_memregion()) {
- if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn_once(
- &cxlr->dev,
- "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
- } else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
- }
- }
-
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
- }
-
static int is_system_ram(struct resource *res, void *arg)
{
struct cxl_region *cxlr = arg;
goto out;
}
- rc = cxl_region_invalidate_memregion(cxlr);
+ if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
+ dev_err(&cxlr->dev,
+ "failed to activate, re-commit region and retry\n");
+ rc = -ENXIO;
+ goto out;
+ }
/*
* From this point on any path that changes the region's state away from
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
-#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
+#define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
/* CXL 2.0 8.2.8.4 Mailbox Registers */
#define CXLDEV_MBOX_CAPS_OFFSET 0x00
#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
+#define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6)
+#define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7)
#define CXLDEV_MBOX_CTRL_OFFSET 0x04
#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
+#define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2)
#define CXLDEV_MBOX_CMD_OFFSET 0x08
#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
#define CXLDEV_MBOX_STATUS_OFFSET 0x10
+#define CXLDEV_MBOX_STATUS_BG_CMD BIT(0)
#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
+#define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48)
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
/*
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_reg_map *map);
int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
- struct cxl_register_map *map,
+ const struct cxl_register_map *map,
unsigned long map_mask);
int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs,
- struct cxl_register_map *map);
+ const struct cxl_register_map *map);
enum cxl_regloc_type;
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
#define CXL_DECODER_F_MASK GENMASK(5, 0)
enum cxl_decoder_type {
- CXL_DECODER_ACCELERATOR = 2,
- CXL_DECODER_EXPANDER = 3,
+ CXL_DECODER_DEVMEM = 2,
+ CXL_DECODER_HOSTONLYMEM = 3,
};
/*
};
/*
- * Flag whether this region needs to have its HPA span synchronized with
- * CPU cache state at region activation time.
- */
- #define CXL_REGION_F_INCOHERENT 0
-
- /*
* Indicate whether this region has been assembled by autodetection or
* userspace assembly. Prevent endpoint decoders outside of automatic
* detection from being added to the region.
*/
- #define CXL_REGION_F_AUTO 1
+ #define CXL_REGION_F_AUTO 0
+
+ /*
+ * Require that a committed region successfully complete a teardown once
+ * any of its associated decoders have been torn down. This maintains
+ * the commit state for the region since there are committed decoders,
+ * but blocks cxl_region_probe().
+ */
+ #define CXL_REGION_F_NEEDS_RESET 1
/**
* struct cxl_region - CXL region
struct cxl_hdm;
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);