nvdimm/region: Move cache management to the region driver
authorDan Williams <dan.j.williams@intel.com>
Thu, 1 Dec 2022 22:03:35 +0000 (14:03 -0800)
committerDan Williams <dan.j.williams@intel.com>
Sat, 3 Dec 2022 07:52:32 +0000 (23:52 -0800)
Now that cpu_cache_invalidate_memregion() is generically available, use
it to centralize CPU cache management in the nvdimm region driver.

This trades off removing redundant per-dimm CPU cache flushing with an
opportunistic flush on every region disable event to cover the case of
sensitive dirty data in the cache being written back to media after a
secure erase / overwrite event.

Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/166993221550.1995348.16843505129579060258.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/acpi/nfit/intel.c
drivers/nvdimm/region.c
drivers/nvdimm/region_devs.c
drivers/nvdimm/security.c
include/linux/libnvdimm.h

index fa0e57e..3902759 100644 (file)
@@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
        if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
        memcpy(nd_cmd.cmd.passphrase, key_data->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
                return -EIO;
        }
 
-       /* DIMM unlocked, invalidate all CPU caches before we read it */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
-
        return 0;
 }
 
@@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
-       /* flush all cache before we erase DIMM */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        memcpy(nd_cmd.cmd.passphrase, key->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
                return -ENXIO;
        }
 
-       /* DIMM erased, invalidate all CPU caches before we read it */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        return 0;
 }
 
@@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
        if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
@@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
                return -ENXIO;
        }
 
-       /* flush all cache before we make the nvdimms available */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        return 0;
 }
 
@@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
        if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (!cpu_cache_has_invalidate_memregion())
-               return -EINVAL;
-
-       /* flush all cache before we erase DIMM */
-       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
        memcpy(nd_cmd.cmd.passphrase, nkey->data,
                        sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = {
 };
 
 const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
-
-MODULE_IMPORT_NS(DEVMEM);
index 390123d..88dc062 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  */
+#include <linux/memregion.h>
 #include <linux/cpumask.h>
 #include <linux/module.h>
 #include <linux/device.h>
@@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev)
         */
        sysfs_put(nd_region->bb_state);
        nd_region->bb_state = NULL;
+
+       /*
+        * Try to flush caches here since a disabled region may be subject to
+        * secure erase while disabled, and previous dirty data should not be
+        * written back to a new instance of the region. This only matters on
+        * bare metal where security commands are available, so silent failure
+        * here is ok.
+        */
+       if (cpu_cache_has_invalidate_memregion())
+               cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
 }
 
 static int child_notify(struct device *dev, void *data)
index e0875d3..83dbf39 100644 (file)
@@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
        return 0;
 }
 
+static int nd_region_invalidate_memregion(struct nd_region *nd_region)
+{
+       int i, incoherent = 0;
+
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+               if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
+                       incoherent++;
+                       break;
+               }
+       }
+
+       if (!incoherent)
+               return 0;
+
+       if (!cpu_cache_has_invalidate_memregion()) {
+               if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
+                       dev_warn(
+                               &nd_region->dev,
+                               "Bypassing cpu_cache_invalidate_memergion() for testing!\n");
+                       goto out;
+               } else {
+                       dev_err(&nd_region->dev,
+                               "Failed to synchronize CPU cache state\n");
+                       return -ENXIO;
+               }
+       }
+
+       cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+out:
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+               clear_bit(NDD_INCOHERENT, &nvdimm->flags);
+       }
+
+       return 0;
+}
+
 int nd_region_activate(struct nd_region *nd_region)
 {
-       int i, j, num_flush = 0;
+       int i, j, rc, num_flush = 0;
        struct nd_region_data *ndrd;
        struct device *dev = &nd_region->dev;
        size_t flush_data_size = sizeof(void *);
@@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region)
        }
        nvdimm_bus_unlock(&nd_region->dev);
 
+       rc = nd_region_invalidate_memregion(nd_region);
+       if (rc)
+               return rc;
+
        ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
        if (!ndrd)
                return -ENOMEM;
@@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
 
        return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
 }
+
+MODULE_IMPORT_NS(DEVMEM);
index 6814339..a03e3c4 100644 (file)
@@ -208,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
        rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
 
        nvdimm_put_key(key);
        nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -374,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
                return -ENOKEY;
 
        rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
@@ -408,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
                return -ENOKEY;
 
        rc = nvdimm->sec.ops->overwrite(nvdimm, data);
+       if (rc == 0)
+               set_bit(NDD_INCOHERENT, &nvdimm->flags);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
index 3bf658a..af38252 100644 (file)
@@ -35,6 +35,11 @@ enum {
        NDD_WORK_PENDING = 4,
        /* dimm supports namespace labels */
        NDD_LABELING = 6,
+       /*
+        * dimm contents have changed requiring invalidation of CPU caches prior
+        * to activation of a region that includes this device
+        */
+       NDD_INCOHERENT = 7,
 
        /* need to set a limit somewhere, but yes, this is likely overkill */
        ND_IOCTL_MAX_BUFLEN = SZ_4M,