cxl/memdev: Change cxl_mem to a more descriptive name
authorIra Weiny <ira.weiny@intel.com>
Tue, 2 Nov 2021 20:29:01 +0000 (13:29 -0700)
committerDan Williams <dan.j.williams@intel.com>
Mon, 15 Nov 2021 19:02:58 +0000 (11:02 -0800)
The 'struct cxl_mem' object actually represents the state of a CXL
device within the driver. Comments indicating that 'struct cxl_mem' is a
device itself are incorrect. It is data layered on top of a CXL Memory
Expander class device. Rename it 'struct cxl_dev_state'. The 'struct'
cxl_memdev' structure represents a Linux CXL memory device object, and
it uses services and information provided by 'struct cxl_dev_state'.

Update the structure name, function names, and the kdocs to reflect the
real uses of this structure.

Some helper functions that were previously prefixed "cxl_mem_" are
renamed to just "cxl_".

Acked-by: Ben Widawsky <ben.widawsky@intel.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Link: https://lore.kernel.org/r/20211102202901.3675568-3-ira.weiny@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/cxlmem.h
drivers/cxl/pci.c
drivers/cxl/pmem.c
tools/testing/cxl/test/mem.c

index 79bb642..0806ba8 100644 (file)
@@ -128,8 +128,8 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
 }
 
 /**
- * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
- * @cxlm: The CXL memory device to communicate with.
+ * cxl_mbox_send_cmd() - Send a mailbox command to a device.
+ * @cxlds: The device data for the operation
  * @opcode: Opcode for the mailbox command.
  * @in: The input payload for the mailbox command.
  * @in_size: The length of the input payload
@@ -149,8 +149,8 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
  * error. While this distinction can be useful for commands from userspace, the
  * kernel will only be able to use results when both are successful.
  */
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
-                         size_t in_size, void *out, size_t out_size)
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+                     size_t in_size, void *out, size_t out_size)
 {
        const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
        struct cxl_mbox_cmd mbox_cmd = {
@@ -162,10 +162,10 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
        };
        int rc;
 
-       if (out_size > cxlm->payload_size)
+       if (out_size > cxlds->payload_size)
                return -E2BIG;
 
-       rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+       rc = cxlds->mbox_send(cxlds, &mbox_cmd);
        if (rc)
                return rc;
 
@@ -182,7 +182,7 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd);
+EXPORT_SYMBOL_GPL(cxl_mbox_send_cmd);
 
 static bool cxl_mem_raw_command_allowed(u16 opcode)
 {
@@ -209,7 +209,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
 
 /**
  * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
- * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @cxlds: The device data for the operation
  * @send_cmd: &struct cxl_send_command copied in from userspace.
  * @out_cmd: Sanitized and populated &struct cxl_mem_command.
  *
@@ -226,7 +226,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
  *
  * See handle_mailbox_cmd_from_user()
  */
-static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
                                      const struct cxl_send_command *send_cmd,
                                      struct cxl_mem_command *out_cmd)
 {
@@ -241,7 +241,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
         * supports, but output can be arbitrarily large (simply write out as
         * much data as the hardware provides).
         */
-       if (send_cmd->in.size > cxlm->payload_size)
+       if (send_cmd->in.size > cxlds->payload_size)
                return -EINVAL;
 
        /*
@@ -267,7 +267,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
                 * gets passed along without further checking, so it must be
                 * validated here.
                 */
-               if (send_cmd->out.size > cxlm->payload_size)
+               if (send_cmd->out.size > cxlds->payload_size)
                        return -EINVAL;
 
                if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -292,11 +292,11 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
        info = &c->info;
 
        /* Check that the command is enabled for hardware */
-       if (!test_bit(info->id, cxlm->enabled_cmds))
+       if (!test_bit(info->id, cxlds->enabled_cmds))
                return -ENOTTY;
 
        /* Check that the command is not claimed for exclusive kernel use */
-       if (test_bit(info->id, cxlm->exclusive_cmds))
+       if (test_bit(info->id, cxlds->exclusive_cmds))
                return -EBUSY;
 
        /* Check the input buffer is the expected size */
@@ -354,7 +354,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
 
 /**
  * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device data for the operation
  * @cmd: The validated command.
  * @in_payload: Pointer to userspace's input payload.
  * @out_payload: Pointer to userspace's output payload.
@@ -377,12 +377,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
  *
  * See cxl_send_cmd().
  */
-static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
                                        const struct cxl_mem_command *cmd,
                                        u64 in_payload, u64 out_payload,
                                        s32 *size_out, u32 *retval)
 {
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
        struct cxl_mbox_cmd mbox_cmd = {
                .opcode = cmd->opcode,
                .size_in = cmd->info.size_in,
@@ -415,7 +415,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
        dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
                      "raw command path used\n");
 
-       rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+       rc = cxlds->mbox_send(cxlds, &mbox_cmd);
        if (rc)
                goto out;
 
@@ -445,7 +445,7 @@ out:
 
 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
 {
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct device *dev = &cxlmd->dev;
        struct cxl_send_command send;
        struct cxl_mem_command c;
@@ -456,15 +456,15 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
        if (copy_from_user(&send, s, sizeof(send)))
                return -EFAULT;
 
-       rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+       rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c);
        if (rc)
                return rc;
 
        /* Prepare to handle a full payload for variable sized output */
        if (c.info.size_out < 0)
-               c.info.size_out = cxlm->payload_size;
+               c.info.size_out = cxlds->payload_size;
 
-       rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+       rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload,
                                          send.out.payload, &send.out.size,
                                          &send.retval);
        if (rc)
@@ -476,13 +476,13 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
        return 0;
 }
 
-static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
 {
        u32 remaining = size;
        u32 offset = 0;
 
        while (remaining) {
-               u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+               u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
                struct cxl_mbox_get_log log = {
                        .uuid = *uuid,
                        .offset = cpu_to_le32(offset),
@@ -490,8 +490,8 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
                };
                int rc;
 
-               rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
-                                          sizeof(log), out, xfer_size);
+               rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
+                                      out, xfer_size);
                if (rc < 0)
                        return rc;
 
@@ -505,14 +505,14 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
 
 /**
  * cxl_walk_cel() - Walk through the Command Effects Log.
- * @cxlm: Device.
+ * @cxlds: The device data for the operation
  * @size: Length of the Command Effects Log.
  * @cel: CEL
  *
  * Iterate over each entry in the CEL and determine if the driver supports the
  * command. If so, the command is enabled for the device and can be used later.
  */
-static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
 {
        struct cxl_cel_entry *cel_entry;
        const int cel_entries = size / sizeof(*cel_entry);
@@ -525,26 +525,26 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
                struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
 
                if (!cmd) {
-                       dev_dbg(cxlm->dev,
+                       dev_dbg(cxlds->dev,
                                "Opcode 0x%04x unsupported by driver", opcode);
                        continue;
                }
 
-               set_bit(cmd->info.id, cxlm->enabled_cmds);
+               set_bit(cmd->info.id, cxlds->enabled_cmds);
        }
 }
 
-static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
 {
        struct cxl_mbox_get_supported_logs *ret;
        int rc;
 
-       ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+       ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
        if (!ret)
                return ERR_PTR(-ENOMEM);
 
-       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
-                                  0, ret, cxlm->payload_size);
+       rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
+                              cxlds->payload_size);
        if (rc < 0) {
                kvfree(ret);
                return ERR_PTR(rc);
@@ -565,23 +565,23 @@ static const uuid_t log_uuid[] = {
 };
 
 /**
- * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
- * @cxlm: The device.
+ * cxl_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlds: The device data for the operation
  *
  * Returns 0 if enumerate completed successfully.
  *
  * CXL devices have optional support for certain commands. This function will
  * determine the set of supported commands for the hardware and update the
- * enabled_cmds bitmap in the @cxlm.
+ * enabled_cmds bitmap in the @cxlds.
  */
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
 {
        struct cxl_mbox_get_supported_logs *gsl;
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
        struct cxl_mem_command *cmd;
        int i, rc;
 
-       gsl = cxl_get_gsl(cxlm);
+       gsl = cxl_get_gsl(cxlds);
        if (IS_ERR(gsl))
                return PTR_ERR(gsl);
 
@@ -602,19 +602,19 @@ int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
                        goto out;
                }
 
-               rc = cxl_xfer_log(cxlm, &uuid, size, log);
+               rc = cxl_xfer_log(cxlds, &uuid, size, log);
                if (rc) {
                        kvfree(log);
                        goto out;
                }
 
-               cxl_walk_cel(cxlm, size, log);
+               cxl_walk_cel(cxlds, size, log);
                kvfree(log);
 
                /* In case CEL was bogus, enable some default commands. */
                cxl_for_each_cmd(cmd)
                        if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
-                               set_bit(cmd->info.id, cxlm->enabled_cmds);
+                               set_bit(cmd->info.id, cxlds->enabled_cmds);
 
                /* Found the required CEL */
                rc = 0;
@@ -624,11 +624,11 @@ out:
        kvfree(gsl);
        return rc;
 }
-EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
+EXPORT_SYMBOL_GPL(cxl_enumerate_cmds);
 
 /**
  * cxl_mem_get_partition_info - Get partition info
- * @cxlm: cxl_mem instance to update partition info
+ * @cxlds: The device data for the operation
  *
  * Retrieve the current partition info for the device specified.  The active
  * values are the current capacity in bytes.  If not 0, the 'next' values are
@@ -638,7 +638,7 @@ EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
  *
  * See CXL @8.2.9.5.2.1 Get Partition Info
  */
-static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
+static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
 {
        struct cxl_mbox_get_partition_info {
                __le64 active_volatile_cap;
@@ -648,124 +648,124 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
        } __packed pi;
        int rc;
 
-       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
-                                  NULL, 0, &pi, sizeof(pi));
+       rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
+                              &pi, sizeof(pi));
 
        if (rc)
                return rc;
 
-       cxlm->active_volatile_bytes =
+       cxlds->active_volatile_bytes =
                le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->active_persistent_bytes =
+       cxlds->active_persistent_bytes =
                le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->next_volatile_bytes =
+       cxlds->next_volatile_bytes =
                le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->next_persistent_bytes =
+       cxlds->next_persistent_bytes =
                le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
 
        return 0;
 }
 
 /**
- * cxl_mem_identify() - Send the IDENTIFY command to the device.
- * @cxlm: The device to identify.
+ * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
+ * @cxlds: The device data for the operation
  *
  * Return: 0 if identify was executed successfully.
  *
  * This will dispatch the identify command to the device and on success populate
  * structures to be exported to sysfs.
  */
-int cxl_mem_identify(struct cxl_mem *cxlm)
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
 {
        /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
        struct cxl_mbox_identify id;
        int rc;
 
-       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
-                                  sizeof(id));
+       rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+                              sizeof(id));
        if (rc < 0)
                return rc;
 
-       cxlm->total_bytes =
+       cxlds->total_bytes =
                le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->volatile_only_bytes =
+       cxlds->volatile_only_bytes =
                le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->persistent_only_bytes =
+       cxlds->persistent_only_bytes =
                le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlm->partition_align_bytes =
+       cxlds->partition_align_bytes =
                le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
 
-       dev_dbg(cxlm->dev,
+       dev_dbg(cxlds->dev,
                "Identify Memory Device\n"
                "     total_bytes = %#llx\n"
                "     volatile_only_bytes = %#llx\n"
                "     persistent_only_bytes = %#llx\n"
                "     partition_align_bytes = %#llx\n",
-               cxlm->total_bytes, cxlm->volatile_only_bytes,
-               cxlm->persistent_only_bytes, cxlm->partition_align_bytes);
+               cxlds->total_bytes, cxlds->volatile_only_bytes,
+               cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
 
-       cxlm->lsa_size = le32_to_cpu(id.lsa_size);
-       memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+       cxlds->lsa_size = le32_to_cpu(id.lsa_size);
+       memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(cxl_mem_identify);
+EXPORT_SYMBOL_GPL(cxl_dev_state_identify);
 
-int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
 {
        int rc;
 
-       if (cxlm->partition_align_bytes == 0) {
-               cxlm->ram_range.start = 0;
-               cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
-               cxlm->pmem_range.start = cxlm->volatile_only_bytes;
-               cxlm->pmem_range.end = cxlm->volatile_only_bytes +
-                                      cxlm->persistent_only_bytes - 1;
+       if (cxlds->partition_align_bytes == 0) {
+               cxlds->ram_range.start = 0;
+               cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
+               cxlds->pmem_range.start = cxlds->volatile_only_bytes;
+               cxlds->pmem_range.end = cxlds->volatile_only_bytes +
+                                      cxlds->persistent_only_bytes - 1;
                return 0;
        }
 
-       rc = cxl_mem_get_partition_info(cxlm);
+       rc = cxl_mem_get_partition_info(cxlds);
        if (rc) {
-               dev_err(cxlm->dev, "Failed to query partition information\n");
+               dev_err(cxlds->dev, "Failed to query partition information\n");
                return rc;
        }
 
-       dev_dbg(cxlm->dev,
+       dev_dbg(cxlds->dev,
                "Get Partition Info\n"
                "     active_volatile_bytes = %#llx\n"
                "     active_persistent_bytes = %#llx\n"
                "     next_volatile_bytes = %#llx\n"
                "     next_persistent_bytes = %#llx\n",
-               cxlm->active_volatile_bytes, cxlm->active_persistent_bytes,
-               cxlm->next_volatile_bytes, cxlm->next_persistent_bytes);
+               cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
+               cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
 
-       cxlm->ram_range.start = 0;
-       cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+       cxlds->ram_range.start = 0;
+       cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
 
-       cxlm->pmem_range.start = cxlm->active_volatile_bytes;
-       cxlm->pmem_range.end =
-               cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1;
+       cxlds->pmem_range.start = cxlds->active_volatile_bytes;
+       cxlds->pmem_range.end =
+               cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(cxl_mem_create_range_info);
 
-struct cxl_mem *cxl_mem_create(struct device *dev)
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
 {
-       struct cxl_mem *cxlm;
+       struct cxl_dev_state *cxlds;
 
-       cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
-       if (!cxlm) {
+       cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
+       if (!cxlds) {
                dev_err(dev, "No memory available\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       mutex_init(&cxlm->mbox_mutex);
-       cxlm->dev = dev;
+       mutex_init(&cxlds->mbox_mutex);
+       cxlds->dev = dev;
 
-       return cxlm;
+       return cxlds;
 }
-EXPORT_SYMBOL_GPL(cxl_mem_create);
+EXPORT_SYMBOL_GPL(cxl_dev_state_create);
 
 static struct dentry *cxl_debugfs;
 
index bf1b04d..6d6c738 100644 (file)
@@ -37,9 +37,9 @@ static ssize_t firmware_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 
-       return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+       return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -47,9 +47,9 @@ static ssize_t payload_max_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 
-       return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+       return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
 }
 static DEVICE_ATTR_RO(payload_max);
 
@@ -57,9 +57,9 @@ static ssize_t label_storage_size_show(struct device *dev,
                                       struct device_attribute *attr, char *buf)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 
-       return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+       return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
 }
 static DEVICE_ATTR_RO(label_storage_size);
 
@@ -67,8 +67,8 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-       unsigned long long len = range_len(&cxlm->ram_range);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       unsigned long long len = range_len(&cxlds->ram_range);
 
        return sysfs_emit(buf, "%#llx\n", len);
 }
@@ -80,8 +80,8 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-       unsigned long long len = range_len(&cxlm->pmem_range);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       unsigned long long len = range_len(&cxlds->pmem_range);
 
        return sysfs_emit(buf, "%#llx\n", len);
 }
@@ -136,17 +136,17 @@ static const struct device_type cxl_memdev_type = {
 
 /**
  * set_exclusive_cxl_commands() - atomically disable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to operate on
  * @cmds: bitmap of commands to mark exclusive
  *
  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
  * invocations of the ioctl path and then disable future execution of
  * commands with the command ids set in @cmds.
  */
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
 {
        down_write(&cxl_memdev_rwsem);
-       bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+       bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
                  CXL_MEM_COMMAND_ID_MAX);
        up_write(&cxl_memdev_rwsem);
 }
@@ -154,13 +154,13 @@ EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
 
 /**
  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to modify
  * @cmds: bitmap of commands to mark available for userspace
  */
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
 {
        down_write(&cxl_memdev_rwsem);
-       bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+       bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
                      CXL_MEM_COMMAND_ID_MAX);
        up_write(&cxl_memdev_rwsem);
 }
@@ -171,7 +171,7 @@ static void cxl_memdev_shutdown(struct device *dev)
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 
        down_write(&cxl_memdev_rwsem);
-       cxlmd->cxlm = NULL;
+       cxlmd->cxlds = NULL;
        up_write(&cxl_memdev_rwsem);
 }
 
@@ -185,7 +185,7 @@ static void cxl_memdev_unregister(void *_cxlmd)
        put_device(dev);
 }
 
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
                                           const struct file_operations *fops)
 {
        struct cxl_memdev *cxlmd;
@@ -204,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
 
        dev = &cxlmd->dev;
        device_initialize(dev);
-       dev->parent = cxlm->dev;
+       dev->parent = cxlds->dev;
        dev->bus = &cxl_bus_type;
        dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
        dev->type = &cxl_memdev_type;
@@ -239,7 +239,7 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
        int rc = -ENXIO;
 
        down_read(&cxl_memdev_rwsem);
-       if (cxlmd->cxlm)
+       if (cxlmd->cxlds)
                rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
        up_read(&cxl_memdev_rwsem);
 
@@ -276,15 +276,14 @@ static const struct file_operations cxl_memdev_fops = {
        .llseek = noop_llseek,
 };
 
-struct cxl_memdev *
-devm_cxl_add_memdev(struct cxl_mem *cxlm)
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
 {
        struct cxl_memdev *cxlmd;
        struct device *dev;
        struct cdev *cdev;
        int rc;
 
-       cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
+       cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
        if (IS_ERR(cxlmd))
                return cxlmd;
 
@@ -297,14 +296,14 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm)
         * Activate ioctl operations, no cxl_memdev_rwsem manipulation
         * needed as this is ordered with cdev_add() publishing the device.
         */
-       cxlmd->cxlm = cxlm;
+       cxlmd->cxlds = cxlds;
 
        cdev = &cxlmd->cdev;
        rc = cdev_device_add(cdev, dev);
        if (rc)
                goto err;
 
-       rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
+       rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
        if (rc)
                return ERR_PTR(rc);
        return cxlmd;
index c4f450a..f978a04 100644 (file)
  * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
  * @dev: driver core device object
  * @cdev: char dev core object for ioctl operations
- * @cxlm: pointer to the parent device driver data
+ * @cxlds: The device state backing this device
  * @id: id number of this memdev instance.
  */
 struct cxl_memdev {
        struct device dev;
        struct cdev cdev;
-       struct cxl_mem *cxlm;
+       struct cxl_dev_state *cxlds;
        int id;
 };
 
@@ -48,7 +48,7 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
        return container_of(dev, struct cxl_memdev, dev);
 }
 
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
 
 /**
  * struct cxl_mbox_cmd - A command to be submitted to hardware.
@@ -90,8 +90,13 @@ struct cxl_mbox_cmd {
 #define CXL_CAPACITY_MULTIPLIER SZ_256M
 
 /**
- * struct cxl_mem - A CXL memory device
- * @dev: The device associated with this CXL device.
+ * struct cxl_dev_state - The driver device state
+ *
+ * cxl_dev_state represents the CXL driver/device state.  It provides an
+ * interface to mailbox commands as well as some cached data about the device.
+ * Currently only memory devices are represented.
+ *
+ * @dev: The device associated with this CXL state
  * @cxlmd: Logical memory device chardev / interface
  * @regs: Parsed register blocks
  * @payload_size: Size of space for payload
@@ -117,7 +122,7 @@ struct cxl_mbox_cmd {
  * See section 8.2.9.5.2 Capacity Configuration and Label Storage for
  * details on capacity parameters.
  */
-struct cxl_mem {
+struct cxl_dev_state {
        struct device *dev;
        struct cxl_memdev *cxlmd;
 
@@ -142,7 +147,7 @@ struct cxl_mem {
        u64 next_volatile_bytes;
        u64 next_persistent_bytes;
 
-       int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
+       int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
 };
 
 enum cxl_opcode {
@@ -253,12 +258,12 @@ struct cxl_mem_command {
 #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
 };
 
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
-                         size_t in_size, void *out, size_t out_size);
-int cxl_mem_identify(struct cxl_mem *cxlm);
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm);
-struct cxl_mem *cxl_mem_create(struct device *dev);
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+                     size_t in_size, void *out, size_t out_size);
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
 #endif /* __CXL_MEM_H__ */
index c734e21..8dc91fd 100644 (file)
  *  - Registers a CXL mailbox with cxl_core.
  */
 
-#define cxl_doorbell_busy(cxlm)                                                \
-       (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) &                  \
+#define cxl_doorbell_busy(cxlds)                                                \
+       (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) &                  \
         CXLDEV_MBOX_CTRL_DOORBELL)
 
 /* CXL 2.0 - 8.2.8.4 */
 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
 
-static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
 {
        const unsigned long start = jiffies;
        unsigned long end = start;
 
-       while (cxl_doorbell_busy(cxlm)) {
+       while (cxl_doorbell_busy(cxlds)) {
                end = jiffies;
 
                if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
                        /* Check again in case preempted before timeout test */
-                       if (!cxl_doorbell_busy(cxlm))
+                       if (!cxl_doorbell_busy(cxlds))
                                break;
                        return -ETIMEDOUT;
                }
                cpu_relax();
        }
 
-       dev_dbg(cxlm->dev, "Doorbell wait took %dms",
+       dev_dbg(cxlds->dev, "Doorbell wait took %dms",
                jiffies_to_msecs(end) - jiffies_to_msecs(start));
        return 0;
 }
 
-static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
+static void cxl_pci_mbox_timeout(struct cxl_dev_state *cxlds,
                                 struct cxl_mbox_cmd *mbox_cmd)
 {
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
 
        dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
                mbox_cmd->opcode, mbox_cmd->size_in);
@@ -68,7 +68,7 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
 
 /**
  * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
  * @mbox_cmd: Command to send to the memory device.
  *
  * Context: Any context. Expects mbox_mutex to be held.
@@ -88,16 +88,16 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
  * not need to coordinate with each other. The driver only uses the primary
  * mailbox.
  */
-static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
+static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
                                   struct cxl_mbox_cmd *mbox_cmd)
 {
-       void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
-       struct device *dev = cxlm->dev;
+       void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
+       struct device *dev = cxlds->dev;
        u64 cmd_reg, status_reg;
        size_t out_len;
        int rc;
 
-       lockdep_assert_held(&cxlm->mbox_mutex);
+       lockdep_assert_held(&cxlds->mbox_mutex);
 
        /*
         * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -117,7 +117,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
         */
 
        /* #1 */
-       if (cxl_doorbell_busy(cxlm)) {
+       if (cxl_doorbell_busy(cxlds)) {
                dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n");
                return -EBUSY;
        }
@@ -134,22 +134,22 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
        }
 
        /* #2, #3 */
-       writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+       writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
 
        /* #4 */
        dev_dbg(dev, "Sending command\n");
        writel(CXLDEV_MBOX_CTRL_DOORBELL,
-              cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+              cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 
        /* #5 */
-       rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+       rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
        if (rc == -ETIMEDOUT) {
-               cxl_pci_mbox_timeout(cxlm, mbox_cmd);
+               cxl_pci_mbox_timeout(cxlds, mbox_cmd);
                return rc;
        }
 
        /* #6 */
-       status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
+       status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
        mbox_cmd->return_code =
                FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
 
@@ -159,7 +159,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
        }
 
        /* #7 */
-       cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+       cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
        out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
 
        /* #8 */
@@ -171,7 +171,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
                 * have requested less data than the hardware supplied even
                 * within spec.
                 */
-               size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
+               size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
 
                memcpy_fromio(mbox_cmd->payload_out, payload, n);
                mbox_cmd->size_out = n;
@@ -184,18 +184,18 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
 
 /**
  * cxl_pci_mbox_get() - Acquire exclusive access to the mailbox.
- * @cxlm: The memory device to gain access to.
+ * @cxlds: The device state to gain access to.
  *
  * Context: Any context. Takes the mbox_mutex.
  * Return: 0 if exclusive access was acquired.
  */
-static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_get(struct cxl_dev_state *cxlds)
 {
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
        u64 md_status;
        int rc;
 
-       mutex_lock_io(&cxlm->mbox_mutex);
+       mutex_lock_io(&cxlds->mbox_mutex);
 
        /*
         * XXX: There is some amount of ambiguity in the 2.0 version of the spec
@@ -214,13 +214,13 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
         *    Mailbox Interface Ready bit. Therefore, waiting for the doorbell
         *    to be ready is sufficient.
         */
-       rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+       rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
        if (rc) {
                dev_warn(dev, "Mailbox interface not ready\n");
                goto out;
        }
 
-       md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
+       md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
        if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
                dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
                rc = -EBUSY;
@@ -249,41 +249,41 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
        return 0;
 
 out:
-       mutex_unlock(&cxlm->mbox_mutex);
+       mutex_unlock(&cxlds->mbox_mutex);
        return rc;
 }
 
 /**
  * cxl_pci_mbox_put() - Release exclusive access to the mailbox.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
  *
  * Context: Any context. Expects mbox_mutex to be held.
  */
-static void cxl_pci_mbox_put(struct cxl_mem *cxlm)
+static void cxl_pci_mbox_put(struct cxl_dev_state *cxlds)
 {
-       mutex_unlock(&cxlm->mbox_mutex);
+       mutex_unlock(&cxlds->mbox_mutex);
 }
 
-static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
        int rc;
 
-       rc = cxl_pci_mbox_get(cxlm);
+       rc = cxl_pci_mbox_get(cxlds);
        if (rc)
                return rc;
 
-       rc = __cxl_pci_mbox_send_cmd(cxlm, cmd);
-       cxl_pci_mbox_put(cxlm);
+       rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
+       cxl_pci_mbox_put(cxlds);
 
        return rc;
 }
 
-static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
+static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
 {
-       const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+       const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
 
-       cxlm->mbox_send = cxl_pci_mbox_send;
-       cxlm->payload_size =
+       cxlds->mbox_send = cxl_pci_mbox_send;
+       cxlds->payload_size =
                1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
 
        /*
@@ -293,15 +293,15 @@ static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
         * there's no point in going forward. If the size is too large, there's
         * no harm is soft limiting it.
         */
-       cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
-       if (cxlm->payload_size < 256) {
-               dev_err(cxlm->dev, "Mailbox is too small (%zub)",
-                       cxlm->payload_size);
+       cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
+       if (cxlds->payload_size < 256) {
+               dev_err(cxlds->dev, "Mailbox is too small (%zub)",
+                       cxlds->payload_size);
                return -ENXIO;
        }
 
-       dev_dbg(cxlm->dev, "Mailbox payload sized %zu",
-               cxlm->payload_size);
+       dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
+               cxlds->payload_size);
 
        return 0;
 }
@@ -379,18 +379,18 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
        return 0;
 }
 
-static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
+static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
 {
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
        struct pci_dev *pdev = to_pci_dev(dev);
 
        switch (map->reg_type) {
        case CXL_REGLOC_RBI_COMPONENT:
-               cxl_map_component_regs(pdev, &cxlm->regs.component, map);
+               cxl_map_component_regs(pdev, &cxlds->regs.component, map);
                dev_dbg(dev, "Mapping component registers...\n");
                break;
        case CXL_REGLOC_RBI_MEMDEV:
-               cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
+               cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
                dev_dbg(dev, "Probing device registers...\n");
                break;
        default:
@@ -475,7 +475,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct cxl_register_map map;
        struct cxl_memdev *cxlmd;
-       struct cxl_mem *cxlm;
+       struct cxl_dev_state *cxlds;
        int rc;
 
        /*
@@ -489,39 +489,39 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (rc)
                return rc;
 
-       cxlm = cxl_mem_create(&pdev->dev);
-       if (IS_ERR(cxlm))
-               return PTR_ERR(cxlm);
+       cxlds = cxl_dev_state_create(&pdev->dev);
+       if (IS_ERR(cxlds))
+               return PTR_ERR(cxlds);
 
        rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
        if (rc)
                return rc;
 
-       rc = cxl_map_regs(cxlm, &map);
+       rc = cxl_map_regs(cxlds, &map);
        if (rc)
                return rc;
 
-       rc = cxl_pci_setup_mailbox(cxlm);
+       rc = cxl_pci_setup_mailbox(cxlds);
        if (rc)
                return rc;
 
-       rc = cxl_mem_enumerate_cmds(cxlm);
+       rc = cxl_enumerate_cmds(cxlds);
        if (rc)
                return rc;
 
-       rc = cxl_mem_identify(cxlm);
+       rc = cxl_dev_state_identify(cxlds);
        if (rc)
                return rc;
 
-       rc = cxl_mem_create_range_info(cxlm);
+       rc = cxl_mem_create_range_info(cxlds);
        if (rc)
                return rc;
 
-       cxlmd = devm_cxl_add_memdev(cxlm);
+       cxlmd = devm_cxl_add_memdev(cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
 
-       if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+       if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
                rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
 
        return rc;
index 38bcbb4..17e82ae 100644 (file)
@@ -19,9 +19,9 @@ static struct workqueue_struct *cxl_pmem_wq;
 
 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
 
-static void clear_exclusive(void *cxlm)
+static void clear_exclusive(void *cxlds)
 {
-       clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
+       clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
 }
 
 static void unregister_nvdimm(void *nvdimm)
@@ -34,7 +34,7 @@ static int cxl_nvdimm_probe(struct device *dev)
        struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
        unsigned long flags = 0, cmd_mask = 0;
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_nvdimm_bridge *cxl_nvb;
        struct nvdimm *nvdimm;
        int rc;
@@ -49,8 +49,8 @@ static int cxl_nvdimm_probe(struct device *dev)
                goto out;
        }
 
-       set_exclusive_cxl_commands(cxlm, exclusive_cmds);
-       rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
+       set_exclusive_cxl_commands(cxlds, exclusive_cmds);
+       rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
        if (rc)
                goto out;
 
@@ -80,7 +80,7 @@ static struct cxl_driver cxl_nvdimm_driver = {
        .id = CXL_DEVICE_NVDIMM,
 };
 
-static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
                                    struct nd_cmd_get_config_size *cmd,
                                    unsigned int buf_len)
 {
@@ -88,14 +88,14 @@ static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
                return -EINVAL;
 
        *cmd = (struct nd_cmd_get_config_size) {
-                .config_size = cxlm->lsa_size,
-                .max_xfer = cxlm->payload_size,
+                .config_size = cxlds->lsa_size,
+                .max_xfer = cxlds->payload_size,
        };
 
        return 0;
 }
 
-static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
                                    struct nd_cmd_get_config_data_hdr *cmd,
                                    unsigned int buf_len)
 {
@@ -112,15 +112,14 @@ static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
                .length = cmd->in_length,
        };
 
-       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
-                                  sizeof(get_lsa), cmd->out_buf,
-                                  cmd->in_length);
+       rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
+                              sizeof(get_lsa), cmd->out_buf, cmd->in_length);
        cmd->status = 0;
 
        return rc;
 }
 
-static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
                                    struct nd_cmd_set_config_hdr *cmd,
                                    unsigned int buf_len)
 {
@@ -144,9 +143,9 @@ static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
        };
        memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
 
-       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
-                                  struct_size(set_lsa, data, cmd->in_length),
-                                  NULL, 0);
+       rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
+                              struct_size(set_lsa, data, cmd->in_length),
+                              NULL, 0);
 
        /*
         * Set "firmware" status (4-packed bytes at the end of the input
@@ -164,18 +163,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_mem *cxlm = cxlmd->cxlm;
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 
        if (!test_bit(cmd, &cmd_mask))
                return -ENOTTY;
 
        switch (cmd) {
        case ND_CMD_GET_CONFIG_SIZE:
-               return cxl_pmem_get_config_size(cxlm, buf, buf_len);
+               return cxl_pmem_get_config_size(cxlds, buf, buf_len);
        case ND_CMD_GET_CONFIG_DATA:
-               return cxl_pmem_get_config_data(cxlm, buf, buf_len);
+               return cxl_pmem_get_config_data(cxlds, buf, buf_len);
        case ND_CMD_SET_CONFIG_DATA:
-               return cxl_pmem_set_config_data(cxlm, buf, buf_len);
+               return cxl_pmem_set_config_data(cxlds, buf, buf_len);
        default:
                return -ENOTTY;
        }
index 12a8437..555faa4 100644 (file)
@@ -54,7 +54,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
-static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
        struct cxl_mbox_get_log *gl = cmd->payload_in;
        u32 offset = le32_to_cpu(gl->offset);
@@ -64,7 +64,7 @@ static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
 
        if (cmd->size_in < sizeof(*gl))
                return -EINVAL;
-       if (length > cxlm->payload_size)
+       if (length > cxlds->payload_size)
                return -EINVAL;
        if (offset + length > sizeof(mock_cel))
                return -EINVAL;
@@ -78,9 +78,9 @@ static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
-static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
-       struct platform_device *pdev = to_platform_device(cxlm->dev);
+       struct platform_device *pdev = to_platform_device(cxlds->dev);
        struct cxl_mbox_identify id = {
                .fw_revision = { "mock fw v1 " },
                .lsa_size = cpu_to_le32(LSA_SIZE),
@@ -120,10 +120,10 @@ static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
-static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
        struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
-       void *lsa = dev_get_drvdata(cxlm->dev);
+       void *lsa = dev_get_drvdata(cxlds->dev);
        u32 offset, length;
 
        if (sizeof(*get_lsa) > cmd->size_in)
@@ -139,10 +139,10 @@ static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
-static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
        struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
-       void *lsa = dev_get_drvdata(cxlm->dev);
+       void *lsa = dev_get_drvdata(cxlds->dev);
        u32 offset, length;
 
        if (sizeof(*set_lsa) > cmd->size_in)
@@ -156,9 +156,9 @@ static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
-static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
 {
-       struct device *dev = cxlm->dev;
+       struct device *dev = cxlds->dev;
        int rc = -EIO;
 
        switch (cmd->opcode) {
@@ -166,16 +166,16 @@ static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
                rc = mock_gsl(cmd);
                break;
        case CXL_MBOX_OP_GET_LOG:
-               rc = mock_get_log(cxlm, cmd);
+               rc = mock_get_log(cxlds, cmd);
                break;
        case CXL_MBOX_OP_IDENTIFY:
-               rc = mock_id(cxlm, cmd);
+               rc = mock_id(cxlds, cmd);
                break;
        case CXL_MBOX_OP_GET_LSA:
-               rc = mock_get_lsa(cxlm, cmd);
+               rc = mock_get_lsa(cxlds, cmd);
                break;
        case CXL_MBOX_OP_SET_LSA:
-               rc = mock_set_lsa(cxlm, cmd);
+               rc = mock_set_lsa(cxlds, cmd);
                break;
        default:
                break;
@@ -196,7 +196,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct cxl_memdev *cxlmd;
-       struct cxl_mem *cxlm;
+       struct cxl_dev_state *cxlds;
        void *lsa;
        int rc;
 
@@ -208,30 +208,30 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
                return rc;
        dev_set_drvdata(dev, lsa);
 
-       cxlm = cxl_mem_create(dev);
-       if (IS_ERR(cxlm))
-               return PTR_ERR(cxlm);
+       cxlds = cxl_dev_state_create(dev);
+       if (IS_ERR(cxlds))
+               return PTR_ERR(cxlds);
 
-       cxlm->mbox_send = cxl_mock_mbox_send;
-       cxlm->payload_size = SZ_4K;
+       cxlds->mbox_send = cxl_mock_mbox_send;
+       cxlds->payload_size = SZ_4K;
 
-       rc = cxl_mem_enumerate_cmds(cxlm);
+       rc = cxl_enumerate_cmds(cxlds);
        if (rc)
                return rc;
 
-       rc = cxl_mem_identify(cxlm);
+       rc = cxl_dev_state_identify(cxlds);
        if (rc)
                return rc;
 
-       rc = cxl_mem_create_range_info(cxlm);
+       rc = cxl_mem_create_range_info(cxlds);
        if (rc)
                return rc;
 
-       cxlmd = devm_cxl_add_memdev(cxlm);
+       cxlmd = devm_cxl_add_memdev(cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
 
-       if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+       if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
                rc = devm_cxl_add_nvdimm(dev, cxlmd);
 
        return 0;