return xa_load(&port->regions, (unsigned long)cxlr);
}
+static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
+{
+ if (!cpu_cache_has_invalidate_memregion()) {
+ if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
+ dev_warn_once(
+ &cxlr->dev,
+ "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
+ return 0;
+ } else {
+ dev_err(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
+ }
+ }
+
+ cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ return 0;
+}
+
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i;
+ int i, rc = 0;
+
+ /*
+ * Before region teardown attempt to flush, and if the flush
+ * fails cancel the region teardown for data consistency
+ * concerns
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
- int rc = 0;
if (cxlds->rcd)
goto endpoint_reset;
goto out;
}
+ /*
+ * Invalidate caches before region setup to drop any speculative
+ * consumption of this address space
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
+
if (commit)
rc = cxl_region_decode_commit(cxlr);
else {
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
-static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
-{
- if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
- return 0;
-
- if (!cpu_cache_has_invalidate_memregion()) {
- if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn_once(
- &cxlr->dev,
- "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
- } else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
- }
- }
-
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
-}
-
static int is_system_ram(struct resource *res, void *arg)
{
struct cxl_region *cxlr = arg;
goto out;
}
- rc = cxl_region_invalidate_memregion(cxlr);
-
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
};
/*
- * Flag whether this region needs to have its HPA span synchronized with
- * CPU cache state at region activation time.
- */
-#define CXL_REGION_F_INCOHERENT 0
-
-/*
* Indicate whether this region has been assembled by autodetection or
* userspace assembly. Prevent endpoint decoders outside of automatic
* detection from being added to the region.
*/
-#define CXL_REGION_F_AUTO 1
+#define CXL_REGION_F_AUTO 0
/**
* struct cxl_region - CXL region