Merge tag 'scmi-updates-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep...
authorArnd Bergmann <arnd@arndb.de>
Tue, 24 Jan 2023 20:44:03 +0000 (21:44 +0100)
committerArnd Bergmann <arnd@arndb.de>
Tue, 24 Jan 2023 20:44:52 +0000 (21:44 +0100)
Arm SCMI updates for v6.3

The main addition is a unified userspace interface for SCMI irrespective
of the underlying transport and along with some changed to refactor the
SCMI stack probing sequence.

1. SCMI unified userspace interface

   This is to have a unified way of testing an SCMI platform firmware
   implementation for compliance, fuzzing etc., from the perspective of
   the non-secure OSPM irrespective of the underlying transport supporting
   SCMI. It is just for testing/development and not a feature intended fo
   use in production.

   Currently an SCMI Compliance Suite[1] can only work by injecting SCMI
   messages using the mailbox test driver only which makes it transport
   specific and can't be used with any other transport like virtio,
   smc/hvc, optee, etc. Also the shared memory can be transport specific
   and it is better to even abstract/hide those details while providing
   the userspace access. So in order to scale with any transport, we need
   a unified interface for the same.

   In order to achieve that, SCMI "raw mode support" is being added through
   debugfs which is more configurable as well. A userspace application
   can inject bare SCMI binary messages into the SCMI core stack; such
   messages will be routed by the SCMI regular kernel stack to the backend
   platform firmware using the configured transport transparently. This
   eliminates the to know about the specific underlying transport
   internals that will be taken care of by the SCMI core stack itself.
   Further no additional changes needed in the device tree like in the
   mailbox-test driver.

[1] https://gitlab.arm.com/tests/scmi-tests

2. Refactoring of the SCMI stack probing sequence

   On some platforms, SCMI transport can be provide by OPTEE/TEE which
   introduces certain dependency in the probe ordering. In order to address
   the same, the SCMI bus is split into its own module which continues to
   be initialized at subsys_initcall, while the SCMI core stack, including
   its various transport backends (like optee, mailbox, virtio, smc), is
   now moved into a separate module at module_init level.

   This allows the other possibly dependent subsystems to register and/or
   access SCMI bus well before the core SCMI stack and its dependent
   transport backends.

* tag 'scmi-updates-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (31 commits)
  firmware: arm_scmi: Clarify raw per-channel ABI documentation
  firmware: arm_scmi: Add per-channel raw injection support
  firmware: arm_scmi: Add the raw mode co-existence support
  firmware: arm_scmi: Call raw mode hooks from the core stack
  firmware: arm_scmi: Reject SCMI drivers when configured in raw mode
  firmware: arm_scmi: Add debugfs ABI documentation for raw mode
  firmware: arm_scmi: Add core raw transmission support
  firmware: arm_scmi: Add debugfs ABI documentation for common entries
  firmware: arm_scmi: Populate a common SCMI debugfs root
  debugfs: Export debugfs_create_str symbol
  include: trace: Add platform and channel instance references
  firmware: arm_scmi: Add internal platform/channel identifiers
  firmware: arm_scmi: Move errors defs and code to common.h
  firmware: arm_scmi: Add xfer helpers to provide raw access
  firmware: arm_scmi: Add flags field to xfer
  firmware: arm_scmi: Refactor scmi_wait_for_message_response
  firmware: arm_scmi: Refactor polling helpers
  firmware: arm_scmi: Refactor xfer in-flight registration routines
  firmware: arm_scmi: Split bus and driver into distinct modules
  firmware: arm_scmi: Introduce a new lifecycle for protocol devices
  ...

Link: https://lore.kernel.org/r/20230120162152.1438456-1-sudeep.holla@arm.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
18 files changed:
Documentation/ABI/testing/debugfs-scmi [new file with mode: 0644]
Documentation/ABI/testing/debugfs-scmi-raw [new file with mode: 0644]
drivers/firmware/arm_scmi/Kconfig
drivers/firmware/arm_scmi/Makefile
drivers/firmware/arm_scmi/bus.c
drivers/firmware/arm_scmi/common.h
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/arm_scmi/protocols.h
drivers/firmware/arm_scmi/raw_mode.c [new file with mode: 0644]
drivers/firmware/arm_scmi/raw_mode.h [new file with mode: 0644]
drivers/firmware/arm_scmi/shmem.c
drivers/firmware/arm_scmi/smc.c
drivers/firmware/arm_scmi/virtio.c
fs/debugfs/file.c
include/linux/scmi_protocol.h
include/trace/events/scmi.h

diff --git a/Documentation/ABI/testing/debugfs-scmi b/Documentation/ABI/testing/debugfs-scmi
new file mode 100644 (file)
index 0000000..ee7179a
--- /dev/null
@@ -0,0 +1,70 @@
+What:          /sys/kernel/debug/scmi/<n>/instance_name
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   The name of the underlying SCMI instance <n> described by
+               all the debugfs accessors rooted at /sys/kernel/debug/scmi/<n>,
+               expressed as the full name of the top DT SCMI node under which
+               this SCMI instance is rooted.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/atomic_threshold_us
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   An optional time value, expressed in microseconds, representing,
+               on this SCMI instance <n>, the threshold above which any SCMI
+               command, advertised to have an higher-than-threshold execution
+               latency, should not be considered for atomic mode of operation,
+               even if requested.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/type
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   A string representing the type of transport configured for this
+               SCMI instance <n>.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/is_atomic
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   A boolean stating if the transport configured on the underlying
+               SCMI instance <n> is capable of atomic mode of operation.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/max_rx_timeout_ms
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   Timeout in milliseconds allowed for SCMI synchronous replies
+               for the currently configured SCMI transport for instance <n>.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/max_msg_size
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   Max message size of allowed SCMI messages for the currently
+               configured SCMI transport for instance <n>.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/tx_max_msg
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   Max number of concurrently allowed in-flight SCMI messages for
+               the currently configured SCMI transport for instance <n> on the
+               TX channels.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/transport/rx_max_msg
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   Max number of concurrently allowed in-flight SCMI messages for
+               the currently configured SCMI transport for instance <n> on the
+               RX channels.
+Users:         Debugging, any userspace test suite
diff --git a/Documentation/ABI/testing/debugfs-scmi-raw b/Documentation/ABI/testing/debugfs-scmi-raw
new file mode 100644 (file)
index 0000000..97678cc
--- /dev/null
@@ -0,0 +1,117 @@
+What:          /sys/kernel/debug/scmi/<n>/raw/message
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw synchronous message injection/snooping facility; write
+               a complete SCMI synchronous command message (header included)
+               in little-endian binary format to have it sent to the configured
+               backend SCMI server for instance <n>.
+               Any subsequently received response can be read from this same
+               entry if it arrived within the configured timeout.
+               Each write to the entry causes one command request to be built
+               and sent while the replies are read back one message at time
+               (receiving an EOF at each message boundary).
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/message_async
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw asynchronous message injection/snooping facility; write
+               a complete SCMI asynchronous command message (header included)
+               in little-endian binary format to have it sent to the configured
+               backend SCMI server for instance <n>.
+               Any subsequently received response can be read from this same
+               entry if it arrived within the configured timeout.
+               Any additional delayed response received afterwards can be read
+               from this same entry too if it arrived within the configured
+               timeout.
+               Each write to the entry causes one command request to be built
+               and sent while the replies are read back one message at time
+               (receiving an EOF at each message boundary).
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/errors
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw message errors facility; any kind of timed-out or
+               generally unexpectedly received SCMI message, for instance <n>,
+               can be read from this entry.
+               Each read gives back one message at time (receiving an EOF at
+               each message boundary).
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/notification
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw notification snooping facility; any notification
+               emitted by the backend SCMI server, for instance <n>, can be
+               read from this entry.
+               Each read gives back one message at time (receiving an EOF at
+               each message boundary).
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/reset
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw stack reset facility; writing a value to this entry
+               causes the internal queues of any kind of received message,
+               still pending to be read out for instance <n>, to be immediately
+               flushed.
+               Can be used to reset and clean the SCMI Raw stack between to
+               different test-run.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw synchronous message injection/snooping facility; write
+               a complete SCMI synchronous command message (header included)
+               in little-endian binary format to have it sent to the configured
+               backend SCMI server for instance <n> through the <m> transport
+               channel.
+               Any subsequently received response can be read from this same
+               entry if it arrived on channel <m> within the configured
+               timeout.
+               Each write to the entry causes one command request to be built
+               and sent while the replies are read back one message at time
+               (receiving an EOF at each message boundary).
+               Channel identifier <m> matches the SCMI protocol number which
+               has been associated with this transport channel in the DT
+               description, with base protocol number 0x10 being the default
+               channel for this instance.
+               Note that these per-channel entries rooted at <..>/channels
+               exist only if the transport is configured to have more than
+               one default channel.
+Users:         Debugging, any userspace test suite
+
+What:          /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message_async
+Date:          March 2023
+KernelVersion: 6.3
+Contact:       cristian.marussi@arm.com
+Description:   SCMI Raw asynchronous message injection/snooping facility; write
+               a complete SCMI asynchronous command message (header included)
+               in little-endian binary format to have it sent to the configured
+               backend SCMI server for instance <n> through the <m> transport
+               channel.
+               Any subsequently received response can be read from this same
+               entry if it arrived on channel <m> within the configured
+               timeout.
+               Any additional delayed response received afterwards can be read
+               from this same entry too if it arrived within the configured
+               timeout.
+               Each write to the entry causes one command request to be built
+               and sent while the replies are read back one message at time
+               (receiving an EOF at each message boundary).
+               Channel identifier <m> matches the SCMI protocol number which
+               has been associated with this transport channel in the DT
+               description, with base protocol number 0x10 being the default
+               channel for this instance.
+               Note that these per-channel entries rooted at <..>/channels
+               exist only if the transport is configured to have more than
+               one default channel.
+Users:         Debugging, any userspace test suite
index a14f654..ea0f508 100644 (file)
@@ -23,6 +23,38 @@ config ARM_SCMI_PROTOCOL
 
 if ARM_SCMI_PROTOCOL
 
+config ARM_SCMI_NEED_DEBUGFS
+       bool
+       help
+         This declares whether at least one SCMI facility is configured
+         which needs debugfs support. When selected causess the creation
+         of a common SCMI debugfs root directory.
+
+config ARM_SCMI_RAW_MODE_SUPPORT
+       bool "Enable support for SCMI Raw transmission mode"
+       depends on DEBUG_FS
+       select ARM_SCMI_NEED_DEBUGFS
+       help
+         Enable support for SCMI Raw transmission mode.
+
+         If enabled allows the direct injection and snooping of SCMI bare
+         messages through a dedicated debugfs interface.
+         It is meant to be used by SCMI compliance/testing suites.
+
+         When enabled regular SCMI drivers interactions are inhibited in
+         order to avoid unexpected interactions with the SCMI Raw message
+         flow. If unsure say N.
+
+config ARM_SCMI_RAW_MODE_SUPPORT_COEX
+       bool "Allow SCMI Raw mode coexistence with normal SCMI stack"
+       depends on ARM_SCMI_RAW_MODE_SUPPORT
+       help
+         Allow SCMI Raw transmission mode to coexist with normal SCMI stack.
+
+         This will allow regular SCMI drivers to register with the core and
+         operate normally, thing which could make an SCMI test suite using the
+         SCMI Raw mode support unreliable. If unsure, say N.
+
 config ARM_SCMI_HAVE_TRANSPORT
        bool
        help
index 9ea86f8..b31d78f 100644 (file)
@@ -1,6 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0-only
 scmi-bus-y = bus.o
+scmi-core-objs := $(scmi-bus-y)
+
 scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
 scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
 scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
 scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
@@ -8,9 +11,11 @@ scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
 scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
 scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
 scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
-scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
-                   $(scmi-transport-y)
+scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
 obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+
 obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
 obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
 
index 35bb707..68cc4b4 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/atomic.h>
 #include <linux/types.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 
 #include "common.h"
 
+BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
+EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
+
 static DEFINE_IDA(scmi_bus_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+
+static DEFINE_IDR(scmi_requested_devices);
+/* Protect access to scmi_requested_devices */
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+       const struct scmi_device_id *id_table;
+       struct list_head node;
+};
+
+/* Track globally the creation of SCMI SystemPower related devices */
+static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+
+/**
+ * scmi_protocol_device_request  - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices and then the requested device is advertised to any
+ * registered party via the @scmi_requested_devices_nh notification chain.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+       int ret = 0;
+       unsigned int id = 0;
+       struct list_head *head, *phead = NULL;
+       struct scmi_requested_dev *rdev;
+
+       pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+                id_table->name, id_table->protocol_id);
+
+       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
+           !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
+               pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
+                       id_table->name, id_table->protocol_id);
+               return -EINVAL;
+       }
+
+       /*
+        * Search for the matching protocol rdev list and then search
+        * of any existent equally named device...fails if any duplicate found.
+        */
+       mutex_lock(&scmi_requested_devices_mtx);
+       idr_for_each_entry(&scmi_requested_devices, head, id) {
+               if (!phead) {
+                       /* A list found registered in the IDR is never empty */
+                       rdev = list_first_entry(head, struct scmi_requested_dev,
+                                               node);
+                       if (rdev->id_table->protocol_id ==
+                           id_table->protocol_id)
+                               phead = head;
+               }
+               list_for_each_entry(rdev, head, node) {
+                       if (!strcmp(rdev->id_table->name, id_table->name)) {
+                               pr_err("Ignoring duplicate request [%d] %s\n",
+                                      rdev->id_table->protocol_id,
+                                      rdev->id_table->name);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
+       }
+
+       /*
+        * No duplicate found for requested id_table, so let's create a new
+        * requested device entry for this new valid request.
+        */
+       rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+       if (!rdev) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       rdev->id_table = id_table;
+
+       /*
+        * Append the new requested device table descriptor to the head of the
+        * related protocol list, eventually creating such head if not already
+        * there.
+        */
+       if (!phead) {
+               phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+               if (!phead) {
+                       kfree(rdev);
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               INIT_LIST_HEAD(phead);
+
+               ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+                               id_table->protocol_id,
+                               id_table->protocol_id + 1, GFP_KERNEL);
+               if (ret != id_table->protocol_id) {
+                       pr_err("Failed to save SCMI device - ret:%d\n", ret);
+                       kfree(rdev);
+                       kfree(phead);
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = 0;
+       }
+       list_add(&rdev->node, phead);
+
+out:
+       mutex_unlock(&scmi_requested_devices_mtx);
+
+       if (!ret)
+               blocking_notifier_call_chain(&scmi_requested_devices_nh,
+                                            SCMI_BUS_NOTIFY_DEVICE_REQUEST,
+                                            (void *)rdev->id_table);
+
+       return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest  - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * The unrequested device, described by the provided id_table, is at first
+ * removed from the IDR @scmi_requested_devices and then the removal is
+ * advertised to any registered party via the @scmi_requested_devices_nh
+ * notification chain.
+ */
+static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+       struct list_head *phead;
+
+       pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+                id_table->name, id_table->protocol_id);
+
+       mutex_lock(&scmi_requested_devices_mtx);
+       phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+       if (phead) {
+               struct scmi_requested_dev *victim, *tmp;
+
+               list_for_each_entry_safe(victim, tmp, phead, node) {
+                       if (!strcmp(victim->id_table->name, id_table->name)) {
+                               list_del(&victim->node);
+
+                               mutex_unlock(&scmi_requested_devices_mtx);
+                               blocking_notifier_call_chain(&scmi_requested_devices_nh,
+                                                            SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
+                                                            (void *)victim->id_table);
+                               kfree(victim);
+                               mutex_lock(&scmi_requested_devices_mtx);
+                               break;
+                       }
+               }
+
+               if (list_empty(phead)) {
+                       idr_remove(&scmi_requested_devices,
+                                  id_table->protocol_id);
+                       kfree(phead);
+               }
+       }
+       mutex_unlock(&scmi_requested_devices_mtx);
+}
 
 static const struct scmi_device_id *
 scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
@@ -57,11 +224,11 @@ static int scmi_match_by_id_table(struct device *dev, void *data)
        struct scmi_device_id *id_table = data;
 
        return sdev->protocol_id == id_table->protocol_id &&
-               !strcmp(sdev->name, id_table->name);
+               (id_table->name && !strcmp(sdev->name, id_table->name));
 }
 
-struct scmi_device *scmi_child_dev_find(struct device *parent,
-                                       int prot_id, const char *name)
+static struct scmi_device *scmi_child_dev_find(struct device *parent,
+                                              int prot_id, const char *name)
 {
        struct scmi_device_id id_table;
        struct device *dev;
@@ -76,30 +243,6 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
        return to_scmi_dev(dev);
 }
 
-const struct scmi_protocol *scmi_protocol_get(int protocol_id)
-{
-       const struct scmi_protocol *proto;
-
-       proto = idr_find(&scmi_protocols, protocol_id);
-       if (!proto || !try_module_get(proto->owner)) {
-               pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
-               return NULL;
-       }
-
-       pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
-
-       return proto;
-}
-
-void scmi_protocol_put(int protocol_id)
-{
-       const struct scmi_protocol *proto;
-
-       proto = idr_find(&scmi_protocols, protocol_id);
-       if (proto)
-               module_put(proto->owner);
-}
-
 static int scmi_dev_probe(struct device *dev)
 {
        struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -120,12 +263,13 @@ static void scmi_dev_remove(struct device *dev)
                scmi_drv->remove(scmi_dev);
 }
 
-static struct bus_type scmi_bus_type = {
+struct bus_type scmi_bus_type = {
        .name = "scmi_protocol",
        .match = scmi_dev_match,
        .probe = scmi_dev_probe,
        .remove = scmi_dev_remove,
 };
+EXPORT_SYMBOL_GPL(scmi_bus_type);
 
 int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
                         const char *mod_name)
@@ -146,7 +290,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
 
        retval = driver_register(&driver->driver);
        if (!retval)
-               pr_debug("registered new scmi driver %s\n", driver->name);
+               pr_debug("Registered new scmi driver %s\n", driver->name);
 
        return retval;
 }
@@ -164,13 +308,53 @@ static void scmi_device_release(struct device *dev)
        kfree(to_scmi_dev(dev));
 }
 
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
-                  const char *name)
+static void __scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+       pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+                of_node_full_name(scmi_dev->dev.parent->of_node),
+                dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+                scmi_dev->name);
+
+       if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
+               atomic_set(&scmi_syspower_registered, 0);
+
+       kfree_const(scmi_dev->name);
+       ida_free(&scmi_bus_id, scmi_dev->id);
+       device_unregister(&scmi_dev->dev);
+}
+
+static struct scmi_device *
+__scmi_device_create(struct device_node *np, struct device *parent,
+                    int protocol, const char *name)
 {
        int id, retval;
        struct scmi_device *scmi_dev;
 
+       /*
+        * If the same protocol/name device already exist under the same parent
+        * (i.e. SCMI instance) just return the existent device.
+        * This avoids any race between the SCMI driver, creating devices for
+        * each DT defined protocol at probe time, and the concurrent
+        * registration of SCMI drivers.
+        */
+       scmi_dev = scmi_child_dev_find(parent, protocol, name);
+       if (scmi_dev)
+               return scmi_dev;
+
+       /*
+        * Ignore any possible subsequent failures while creating the device
+        * since we are doomed anyway at that point; not using a mutex which
+        * spans across this whole function to keep things simple and to avoid
+        * to serialize all the __scmi_device_create calls across possibly
+        * different SCMI server instances (parent)
+        */
+       if (protocol == SCMI_PROTOCOL_SYSTEM &&
+           atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
+               dev_warn(parent,
+                        "SCMI SystemPower protocol device must be unique !\n");
+               return NULL;
+       }
+
        scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
        if (!scmi_dev)
                return NULL;
@@ -200,6 +384,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
        if (retval)
                goto put_dev;
 
+       pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+                of_node_full_name(parent->of_node),
+                dev_name(&scmi_dev->dev), protocol, name);
+
        return scmi_dev;
 put_dev:
        kfree_const(scmi_dev->name);
@@ -208,77 +396,85 @@ put_dev:
        return NULL;
 }
 
-void scmi_device_destroy(struct scmi_device *scmi_dev)
-{
-       kfree_const(scmi_dev->name);
-       scmi_handle_put(scmi_dev->handle);
-       ida_free(&scmi_bus_id, scmi_dev->id);
-       device_unregister(&scmi_dev->dev);
-}
-
-void scmi_device_link_add(struct device *consumer, struct device *supplier)
-{
-       struct device_link *link;
-
-       link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
-
-       WARN_ON(!link);
-}
-
-void scmi_set_handle(struct scmi_device *scmi_dev)
-{
-       scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
-       if (scmi_dev->handle)
-               scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
-}
-
-int scmi_protocol_register(const struct scmi_protocol *proto)
+/**
+ * scmi_device_create  - A method to create one or more SCMI devices
+ *
+ * @np: A reference to the device node to use for the new device(s)
+ * @parent: The parent device to use identifying a specific SCMI instance
+ * @protocol: The SCMI protocol to be associated with this device
+ * @name: The requested-name of the device to be created; this is optional
+ *       and if no @name is provided, all the devices currently known to
+ *       be requested on the SCMI bus for @protocol will be created.
+ *
+ * This method can be invoked to create a single well-defined device (like
+ * a transport device or a device requested by an SCMI driver loaded after
+ * the core SCMI stack has been probed), or to create all the devices currently
+ * known to have been requested by the loaded SCMI drivers for a specific
+ * protocol (typically during SCMI core protocol enumeration at probe time).
+ *
+ * Return: The created device (or one of them if @name was NOT provided and
+ *        multiple devices were created) or NULL if no device was created;
+ *        note that NULL indicates an error ONLY in case a specific @name
+ *        was provided: when @name param was not provided, a number of devices
+ *        could have been potentially created for a whole protocol, unless no
+ *        device was found to have been requested for that specific protocol.
+ */
+struct scmi_device *scmi_device_create(struct device_node *np,
+                                      struct device *parent, int protocol,
+                                      const char *name)
 {
-       int ret;
-
-       if (!proto) {
-               pr_err("invalid protocol\n");
-               return -EINVAL;
-       }
-
-       if (!proto->instance_init) {
-               pr_err("missing init for protocol 0x%x\n", proto->id);
-               return -EINVAL;
+       struct list_head *phead;
+       struct scmi_requested_dev *rdev;
+       struct scmi_device *scmi_dev = NULL;
+
+       if (name)
+               return __scmi_device_create(np, parent, protocol, name);
+
+       mutex_lock(&scmi_requested_devices_mtx);
+       phead = idr_find(&scmi_requested_devices, protocol);
+       /* Nothing to do. */
+       if (!phead) {
+               mutex_unlock(&scmi_requested_devices_mtx);
+               return scmi_dev;
        }
 
-       spin_lock(&protocol_lock);
-       ret = idr_alloc(&scmi_protocols, (void *)proto,
-                       proto->id, proto->id + 1, GFP_ATOMIC);
-       spin_unlock(&protocol_lock);
-       if (ret != proto->id) {
-               pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
-                      proto->id, ret);
-               return ret;
+       /* Walk the list of requested devices for protocol and create them */
+       list_for_each_entry(rdev, phead, node) {
+               struct scmi_device *sdev;
+
+               sdev = __scmi_device_create(np, parent,
+                                           rdev->id_table->protocol_id,
+                                           rdev->id_table->name);
+               /* Report errors and carry on... */
+               if (sdev)
+                       scmi_dev = sdev;
+               else
+                       pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+                              of_node_full_name(parent->of_node),
+                              rdev->id_table->protocol_id,
+                              rdev->id_table->name);
        }
+       mutex_unlock(&scmi_requested_devices_mtx);
 
-       pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
-
-       return 0;
+       return scmi_dev;
 }
-EXPORT_SYMBOL_GPL(scmi_protocol_register);
+EXPORT_SYMBOL_GPL(scmi_device_create);
 
-void scmi_protocol_unregister(const struct scmi_protocol *proto)
+void scmi_device_destroy(struct device *parent, int protocol, const char *name)
 {
-       spin_lock(&protocol_lock);
-       idr_remove(&scmi_protocols, proto->id);
-       spin_unlock(&protocol_lock);
-
-       pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+       struct scmi_device *scmi_dev;
 
-       return;
+       scmi_dev = scmi_child_dev_find(parent, protocol, name);
+       if (scmi_dev)
+               __scmi_device_destroy(scmi_dev);
 }
-EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+EXPORT_SYMBOL_GPL(scmi_device_destroy);
 
 static int __scmi_devices_unregister(struct device *dev, void *data)
 {
        struct scmi_device *scmi_dev = to_scmi_dev(dev);
 
-       scmi_device_destroy(scmi_dev);
+       __scmi_device_destroy(scmi_dev);
        return 0;
 }
 
@@ -287,20 +483,33 @@ static void scmi_devices_unregister(void)
        bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
 }
 
-int __init scmi_bus_init(void)
+static int __init scmi_bus_init(void)
 {
        int retval;
 
        retval = bus_register(&scmi_bus_type);
        if (retval)
-               pr_err("scmi protocol bus register failed (%d)\n", retval);
+               pr_err("SCMI protocol bus register failed (%d)\n", retval);
+
+       pr_info("SCMI protocol bus registered\n");
 
        return retval;
 }
+subsys_initcall(scmi_bus_init);
 
-void __exit scmi_bus_exit(void)
+static void __exit scmi_bus_exit(void)
 {
+       /*
+        * Destroy all remaining devices: just in case the drivers were
+        * manually unbound and at first and then the modules unloaded.
+        */
        scmi_devices_unregister();
        bus_unregister(&scmi_bus_type);
        ida_destroy(&scmi_bus_id);
 }
+module_exit(scmi_bus_exit);
+
+MODULE_ALIAS("scmi-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol bus");
+MODULE_LICENSE("GPL");
index a1c0154..c46dc52 100644 (file)
 #include "protocols.h"
 #include "notify.h"
 
+#define SCMI_MAX_CHANNELS              256
+
+#define SCMI_MAX_RESPONSE_TIMEOUT      (2 * MSEC_PER_SEC)
+
+enum scmi_error_codes {
+       SCMI_SUCCESS = 0,       /* Success */
+       SCMI_ERR_SUPPORT = -1,  /* Not supported */
+       SCMI_ERR_PARAMS = -2,   /* Invalid Parameters */
+       SCMI_ERR_ACCESS = -3,   /* Invalid access/permission denied */
+       SCMI_ERR_ENTRY = -4,    /* Not found */
+       SCMI_ERR_RANGE = -5,    /* Value out of range */
+       SCMI_ERR_BUSY = -6,     /* Device busy */
+       SCMI_ERR_COMMS = -7,    /* Communication Error */
+       SCMI_ERR_GENERIC = -8,  /* Generic Error */
+       SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+       SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+static const int scmi_linux_errmap[] = {
+       /* better than switch case as long as return value is continuous */
+       0,                      /* SCMI_SUCCESS */
+       -EOPNOTSUPP,            /* SCMI_ERR_SUPPORT */
+       -EINVAL,                /* SCMI_ERR_PARAM */
+       -EACCES,                /* SCMI_ERR_ACCESS */
+       -ENOENT,                /* SCMI_ERR_ENTRY */
+       -ERANGE,                /* SCMI_ERR_RANGE */
+       -EBUSY,                 /* SCMI_ERR_BUSY */
+       -ECOMM,                 /* SCMI_ERR_COMMS */
+       -EIO,                   /* SCMI_ERR_GENERIC */
+       -EREMOTEIO,             /* SCMI_ERR_HARDWARE */
+       -EPROTO,                /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+       int err_idx = -errno;
+
+       if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+               return scmi_linux_errmap[err_idx];
+       return -EIO;
+}
+
 #define MSG_ID_MASK            GENMASK(7, 0)
 #define MSG_XTRACT_ID(hdr)     FIELD_GET(MSG_ID_MASK, (hdr))
 #define MSG_TYPE_MASK          GENMASK(9, 8)
@@ -96,18 +138,19 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
 
 struct scmi_revision_info *
 scmi_revision_area_get(const struct scmi_protocol_handle *ph);
-int scmi_handle_put(const struct scmi_handle *handle);
-void scmi_device_link_add(struct device *consumer, struct device *supplier);
-struct scmi_handle *scmi_handle_get(struct device *dev);
-void scmi_set_handle(struct scmi_device *scmi_dev);
 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
                                     u8 *prot_imp);
 
-int __init scmi_bus_init(void);
-void __exit scmi_bus_exit(void);
+extern struct bus_type scmi_bus_type;
+
+#define SCMI_BUS_NOTIFY_DEVICE_REQUEST         0
+#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST       1
+extern struct blocking_notifier_head scmi_requested_devices_nh;
 
-const struct scmi_protocol *scmi_protocol_get(int protocol_id);
-void scmi_protocol_put(int protocol_id);
+struct scmi_device *scmi_device_create(struct device_node *np,
+                                      struct device *parent, int protocol,
+                                      const char *name);
+void scmi_device_destroy(struct device *parent, int protocol, const char *name);
 
 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
@@ -116,6 +159,8 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
 /**
  * struct scmi_chan_info - Structure representing a SCMI channel information
  *
+ * @id: An identifier for this channel: this matches the protocol number
+ *      used to initialize this channel
  * @dev: Reference to device in the SCMI hierarchy corresponding to this
  *      channel
  * @rx_timeout_ms: The configured RX timeout in milliseconds.
@@ -127,6 +172,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
  * @transport_info: Transport layer related information
  */
 struct scmi_chan_info {
+       int id;
        struct device *dev;
        unsigned int rx_timeout_ms;
        struct scmi_handle *handle;
@@ -153,7 +199,7 @@ struct scmi_chan_info {
  */
 struct scmi_transport_ops {
        int (*link_supplier)(struct device *dev);
-       bool (*chan_available)(struct device *dev, int idx);
+       bool (*chan_available)(struct device_node *of_node, int idx);
        int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
                          bool tx);
        int (*chan_free)(int id, void *p, void *data);
@@ -170,11 +216,6 @@ struct scmi_transport_ops {
        bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
 };
 
-int scmi_protocol_device_request(const struct scmi_device_id *id_table);
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
-struct scmi_device *scmi_child_dev_find(struct device *parent,
-                                       int prot_id, const char *name);
-
 /**
  * struct scmi_desc - Description of SoC integration
  *
@@ -215,6 +256,36 @@ struct scmi_desc {
        const bool atomic_enabled;
 };
 
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+                                      const struct scmi_desc *desc)
+{
+       return cinfo->no_completion_irq || desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
+{
+       return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+                                     const struct scmi_desc *desc)
+{
+       return is_polling_required(cinfo, desc) &&
+               is_transport_polling_capable(desc);
+}
+
+void scmi_xfer_raw_put(const struct scmi_handle *handle,
+                      struct scmi_xfer *xfer);
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
+
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+                                   struct scmi_xfer *xfer);
+
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+                                           struct scmi_xfer *xfer,
+                                           unsigned int timeout_ms);
 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
 extern const struct scmi_desc scmi_mailbox_desc;
 #endif
@@ -229,7 +300,6 @@ extern const struct scmi_desc scmi_optee_desc;
 #endif
 
 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
 
 /* shmem related declarations */
 struct scmi_shared_mem;
index f818d00..d21c7ea 100644 (file)
  * Copyright (C) 2018-2021 ARM Ltd.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/bitmap.h>
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/idr.h>
 #include "common.h"
 #include "notify.h"
 
+#include "raw_mode.h"
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/scmi.h>
 
-enum scmi_error_codes {
-       SCMI_SUCCESS = 0,       /* Success */
-       SCMI_ERR_SUPPORT = -1,  /* Not supported */
-       SCMI_ERR_PARAMS = -2,   /* Invalid Parameters */
-       SCMI_ERR_ACCESS = -3,   /* Invalid access/permission denied */
-       SCMI_ERR_ENTRY = -4,    /* Not found */
-       SCMI_ERR_RANGE = -5,    /* Value out of range */
-       SCMI_ERR_BUSY = -6,     /* Device busy */
-       SCMI_ERR_COMMS = -7,    /* Communication Error */
-       SCMI_ERR_GENERIC = -8,  /* Generic Error */
-       SCMI_ERR_HARDWARE = -9, /* Hardware Error */
-       SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
-};
+static DEFINE_IDA(scmi_id);
+
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
 
 /* List of all SCMI devices active in system */
 static LIST_HEAD(scmi_list);
@@ -58,18 +54,7 @@ static DEFINE_MUTEX(scmi_list_mutex);
 /* Track the unique id for the transfers for debug & profiling purpose */
 static atomic_t transfer_last_id;
 
-static DEFINE_IDR(scmi_requested_devices);
-static DEFINE_MUTEX(scmi_requested_devices_mtx);
-
-/* Track globally the creation of SCMI SystemPower related devices */
-static bool scmi_syspower_registered;
-/* Protect access to scmi_syspower_registered */
-static DEFINE_MUTEX(scmi_syspower_mtx);
-
-struct scmi_requested_dev {
-       const struct scmi_device_id *id_table;
-       struct list_head node;
-};
+static struct dentry *scmi_top_dentry;
 
 /**
  * struct scmi_xfers_info - Structure to manage transfer information
@@ -118,8 +103,23 @@ struct scmi_protocol_instance {
 #define ph_to_pi(h)    container_of(h, struct scmi_protocol_instance, ph)
 
 /**
+ * struct scmi_debug_info  - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ */
+struct scmi_debug_info {
+       struct dentry *top_dentry;
+       const char *name;
+       const char *type;
+       bool is_atomic;
+};
+
+/**
  * struct scmi_info - Structure representing a SCMI instance
  *
+ * @id: A sequence number starting from zero identifying this instance
  * @dev: Device pointer
  * @desc: SoC description for this instance
  * @version: SCMI revision information containing protocol version,
@@ -147,8 +147,15 @@ struct scmi_protocol_instance {
  * @notify_priv: Pointer to private data structure specific to notifications.
  * @node: List head
  * @users: Number of users of this instance
+ * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ *             bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
+ * @dbg: A pointer to debugfs related data (if any)
+ * @raw: An opaque reference handle used by SCMI Raw mode.
  */
 struct scmi_info {
+       int id;
        struct device *dev;
        const struct scmi_desc *desc;
        struct scmi_revision_info version;
@@ -166,32 +173,114 @@ struct scmi_info {
        void *notify_priv;
        struct list_head node;
        int users;
+       struct notifier_block bus_nb;
+       struct notifier_block dev_req_nb;
+       /* Serialize device creation process for this instance */
+       struct mutex devreq_mtx;
+       struct scmi_debug_info *dbg;
+       void *raw;
 };
 
 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define bus_nb_to_scmi_info(nb)        container_of(nb, struct scmi_info, bus_nb)
+#define req_nb_to_scmi_info(nb)        container_of(nb, struct scmi_info, dev_req_nb)
 
-static const int scmi_linux_errmap[] = {
-       /* better than switch case as long as return value is continuous */
-       0,                      /* SCMI_SUCCESS */
-       -EOPNOTSUPP,            /* SCMI_ERR_SUPPORT */
-       -EINVAL,                /* SCMI_ERR_PARAM */
-       -EACCES,                /* SCMI_ERR_ACCESS */
-       -ENOENT,                /* SCMI_ERR_ENTRY */
-       -ERANGE,                /* SCMI_ERR_RANGE */
-       -EBUSY,                 /* SCMI_ERR_BUSY */
-       -ECOMM,                 /* SCMI_ERR_COMMS */
-       -EIO,                   /* SCMI_ERR_GENERIC */
-       -EREMOTEIO,             /* SCMI_ERR_HARDWARE */
-       -EPROTO,                /* SCMI_ERR_PROTOCOL */
-};
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+       const struct scmi_protocol *proto;
+
+       proto = idr_find(&scmi_protocols, protocol_id);
+       if (!proto || !try_module_get(proto->owner)) {
+               pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+               return NULL;
+       }
+
+       pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+       return proto;
+}
+
+static void scmi_protocol_put(int protocol_id)
+{
+       const struct scmi_protocol *proto;
+
+       proto = idr_find(&scmi_protocols, protocol_id);
+       if (proto)
+               module_put(proto->owner);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+       int ret;
+
+       if (!proto) {
+               pr_err("invalid protocol\n");
+               return -EINVAL;
+       }
+
+       if (!proto->instance_init) {
+               pr_err("missing init for protocol 0x%x\n", proto->id);
+               return -EINVAL;
+       }
+
+       spin_lock(&protocol_lock);
+       ret = idr_alloc(&scmi_protocols, (void *)proto,
+                       proto->id, proto->id + 1, GFP_ATOMIC);
+       spin_unlock(&protocol_lock);
+       if (ret != proto->id) {
+               pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+                      proto->id, ret);
+               return ret;
+       }
+
+       pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+       spin_lock(&protocol_lock);
+       idr_remove(&scmi_protocols, proto->id);
+       spin_unlock(&protocol_lock);
 
-static inline int scmi_to_linux_errno(int errno)
+       pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+/**
+ * scmi_create_protocol_devices  - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ *       call will lead to the creation of all the devices currently requested
+ *       for the specified protocol.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+                                        struct scmi_info *info,
+                                        int prot_id, const char *name)
 {
-       int err_idx = -errno;
+       struct scmi_device *sdev;
 
-       if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
-               return scmi_linux_errmap[err_idx];
-       return -EIO;
+       mutex_lock(&info->devreq_mtx);
+       sdev = scmi_device_create(np, info->dev, prot_id, name);
+       if (name && !sdev)
+               dev_err(info->dev,
+                       "failed to create device for protocol 0x%X (%s)\n",
+                       prot_id, name);
+       mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+                                         int prot_id, const char *name)
+{
+       mutex_lock(&info->devreq_mtx);
+       scmi_device_destroy(info->dev, prot_id, name);
+       mutex_unlock(&info->devreq_mtx);
 }
 
 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
@@ -311,8 +400,6 @@ static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
        if (xfer_id != next_token)
                atomic_add((int)(xfer_id - next_token), &transfer_last_id);
 
-       /* Set in-flight */
-       set_bit(xfer_id, minfo->xfer_alloc_table);
        xfer->hdr.seq = (u16)xfer_id;
 
        return 0;
@@ -331,32 +418,123 @@ static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
 }
 
 /**
+ * scmi_xfer_inflight_register_unlocked  - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+                                    struct scmi_xfers_info *minfo)
+{
+       /* Set in-flight */
+       set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+       hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+       xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_inflight_register  - Try to register an xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper does NOT assume anything about the sequence number
+ * that was baked into the provided xfer, so it checks at first if it can
+ * be mapped to a free slot and fails with an error if another xfer with the
+ * same sequence number is currently still registered as in-flight.
+ *
+ * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
+ *        could not rbe mapped to a free slot in the xfer_alloc_table.
+ */
+static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
+                                      struct scmi_xfers_info *minfo)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&minfo->xfer_lock, flags);
+       if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
+               scmi_xfer_inflight_register_unlocked(xfer, minfo);
+       else
+               ret = -EBUSY;
+       spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+       return ret;
+}
+
+/**
+ * scmi_xfer_raw_inflight_register  - An helper to register the given xfer as in
+ * flight on the TX channel, if possible.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: The xfer to register
+ *
+ * Return: 0 on Success, error otherwise
+ */
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+                                   struct scmi_xfer *xfer)
+{
+       struct scmi_info *info = handle_to_scmi_info(handle);
+
+       return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
+}
+
+/**
+ * scmi_xfer_pending_set  - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+                                       struct scmi_xfers_info *minfo)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&minfo->xfer_lock, flags);
+       /* Set a new monotonic token as the xfer sequence number */
+       ret = scmi_xfer_token_set(minfo, xfer);
+       if (!ret)
+               scmi_xfer_inflight_register_unlocked(xfer, minfo);
+       spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+       return ret;
+}
+
+/**
  * scmi_xfer_get() - Allocate one message
  *
  * @handle: Pointer to SCMI entity handle
  * @minfo: Pointer to Tx/Rx Message management info based on channel type
- * @set_pending: If true a monotonic token is picked and the xfer is added to
- *              the pending hash table.
  *
  * Helper function which is used by various message functions that are
  * exposed to clients of this driver for allocating a message traffic event.
  *
- * Picks an xfer from the free list @free_xfers (if any available) and, if
- * required, sets a monotonically increasing token and stores the inflight xfer
- * into the @pending_xfers hashtable for later retrieval.
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
  *
  * The successfully initialized xfer is refcounted.
  *
- * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
- *         @free_xfers.
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
  *
- * Return: 0 if all went fine, else corresponding error.
+ * Return: An initialized xfer if all went fine, else pointer error.
  */
 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
-                                      struct scmi_xfers_info *minfo,
-                                      bool set_pending)
+                                      struct scmi_xfers_info *minfo)
 {
-       int ret;
        unsigned long flags;
        struct scmi_xfer *xfer;
 
@@ -376,31 +554,71 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
         */
        xfer->transfer_id = atomic_inc_return(&transfer_last_id);
 
-       if (set_pending) {
-               /* Pick and set monotonic token */
-               ret = scmi_xfer_token_set(minfo, xfer);
-               if (!ret) {
-                       hash_add(minfo->pending_xfers, &xfer->node,
-                                xfer->hdr.seq);
-                       xfer->pending = true;
-               } else {
-                       dev_err(handle->dev,
-                               "Failed to get monotonic token %d\n", ret);
-                       hlist_add_head(&xfer->node, &minfo->free_xfers);
-                       xfer = ERR_PTR(ret);
-               }
-       }
-
-       if (!IS_ERR(xfer)) {
-               refcount_set(&xfer->users, 1);
-               atomic_set(&xfer->busy, SCMI_XFER_FREE);
-       }
+       refcount_set(&xfer->users, 1);
+       atomic_set(&xfer->busy, SCMI_XFER_FREE);
        spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 
        return xfer;
 }
 
 /**
+ * scmi_xfer_raw_get  - Helper to get a bare free xfer from the TX channel
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Note that xfer is taken from the TX channel structures.
+ *
+ * Return: A valid xfer on Success, or an error-pointer otherwise
+ */
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
+{
+       struct scmi_xfer *xfer;
+       struct scmi_info *info = handle_to_scmi_info(handle);
+
+       xfer = scmi_xfer_get(handle, &info->tx_minfo);
+       if (!IS_ERR(xfer))
+               xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
+
+       return xfer;
+}
+
+/**
+ * scmi_xfer_raw_channel_get  - Helper to get a reference to the proper channel
+ * to use for a specific protocol_id Raw transaction.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol_id: Identifier of the protocol
+ *
+ * Note that in a regular SCMI stack, usually, a protocol has to be defined in
+ * the DT to have an associated channel and be usable; but in Raw mode any
+ * protocol in range is allowed, re-using the Base channel, so as to enable
+ * fuzzing on any protocol without the need of a fully compiled DT.
+ *
+ * Return: A reference to the channel to use, or an ERR_PTR
+ */
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
+{
+       struct scmi_chan_info *cinfo;
+       struct scmi_info *info = handle_to_scmi_info(handle);
+
+       cinfo = idr_find(&info->tx_idr, protocol_id);
+       if (!cinfo) {
+               if (protocol_id == SCMI_PROTOCOL_BASE)
+                       return ERR_PTR(-EINVAL);
+               /* Use Base channel for protocols not defined for DT */
+               cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+               if (!cinfo)
+                       return ERR_PTR(-EINVAL);
+               dev_warn_once(handle->dev,
+                             "Using Base channel for protocol 0x%X\n",
+                             protocol_id);
+       }
+
+       return cinfo;
+}
+
+/**
  * __scmi_xfer_put() - Release a message
  *
  * @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -429,6 +647,24 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
 }
 
 /**
+ * scmi_xfer_raw_put  - Release an xfer that was taken by @scmi_xfer_raw_get
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: A reference to the xfer to put
+ *
+ * Note that as with other xfer_put() handlers the xfer is really effectively
+ * released only if there are no more users on the system.
+ */
+void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+       struct scmi_info *info = handle_to_scmi_info(handle);
+
+       xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
+       xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
+       return __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
  * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
  *
  * @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -623,25 +859,6 @@ static inline void scmi_clear_channel(struct scmi_info *info,
                info->desc->ops->clear_channel(cinfo);
 }
 
-static inline bool is_polling_required(struct scmi_chan_info *cinfo,
-                                      struct scmi_info *info)
-{
-       return cinfo->no_completion_irq || info->desc->force_polling;
-}
-
-static inline bool is_transport_polling_capable(struct scmi_info *info)
-{
-       return info->desc->ops->poll_done ||
-               info->desc->sync_cmds_completed_on_ret;
-}
-
-static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
-                                     struct scmi_info *info)
-{
-       return is_polling_required(cinfo, info) &&
-               is_transport_polling_capable(info);
-}
-
 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
                                     u32 msg_hdr, void *priv)
 {
@@ -652,7 +869,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
        ktime_t ts;
 
        ts = ktime_get_boottime();
-       xfer = scmi_xfer_get(cinfo->handle, minfo, false);
+       xfer = scmi_xfer_get(cinfo->handle, minfo);
        if (IS_ERR(xfer)) {
                dev_err(dev, "failed to get free message slot (%ld)\n",
                        PTR_ERR(xfer));
@@ -667,9 +884,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
        info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
                                            xfer);
 
-       trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
-                           xfer->hdr.seq, xfer->hdr.status,
-                           xfer->rx.buf, xfer->rx.len);
+       trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+                           xfer->hdr.id, "NOTI", xfer->hdr.seq,
+                           xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
 
        scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
                    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -678,6 +895,12 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
                           xfer->hdr.protocol_id, xfer->hdr.seq,
                           MSG_TYPE_NOTIFICATION);
 
+       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+               xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+               scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
+                                       cinfo->id);
+       }
+
        __scmi_xfer_put(minfo, xfer);
 
        scmi_clear_channel(info, cinfo);
@@ -691,6 +914,9 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
 
        xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
        if (IS_ERR(xfer)) {
+               if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+                       scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
+
                if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
                        scmi_clear_channel(info, cinfo);
                return;
@@ -705,9 +931,11 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
                smp_store_mb(xfer->priv, priv);
        info->desc->ops->fetch_response(cinfo, xfer);
 
-       trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+       trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+                           xfer->hdr.id,
                            xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
-                           "DLYD" : "RESP",
+                           (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
+                           (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
                            xfer->hdr.seq, xfer->hdr.status,
                            xfer->rx.buf, xfer->rx.len);
 
@@ -722,6 +950,18 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
                complete(&xfer->done);
        }
 
+       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+               /*
+                * When in polling mode avoid to queue the Raw xfer on the IRQ
+                * RX path since it will be already queued at the end of the TX
+                * poll loop.
+                */
+               if (!xfer->hdr.poll_completion)
+                       scmi_raw_message_report(info->raw, xfer,
+                                               SCMI_RAW_REPLY_QUEUE,
+                                               cinfo->id);
+       }
+
        scmi_xfer_command_release(info, xfer);
 }
 
@@ -785,36 +1025,18 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
               ktime_after(ktime_get(), stop);
 }
 
-/**
- * scmi_wait_for_message_response  - An helper to group all the possible ways of
- * waiting for a synchronous message response.
- *
- * @cinfo: SCMI channel info
- * @xfer: Reference to the transfer being waited for.
- *
- * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
- * configuration flags like xfer->hdr.poll_completion.
- *
- * Return: 0 on Success, error otherwise.
- */
-static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
-                                         struct scmi_xfer *xfer)
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+                              struct scmi_chan_info *cinfo,
+                              struct scmi_xfer *xfer, unsigned int timeout_ms)
 {
-       struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
-       struct device *dev = info->dev;
-       int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
-
-       trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
-                                     xfer->hdr.protocol_id, xfer->hdr.seq,
-                                     timeout_ms,
-                                     xfer->hdr.poll_completion);
+       int ret = 0;
 
        if (xfer->hdr.poll_completion) {
                /*
                 * Real polling is needed only if transport has NOT declared
                 * itself to support synchronous commands replies.
                 */
-               if (!info->desc->sync_cmds_completed_on_ret) {
+               if (!desc->sync_cmds_completed_on_ret) {
                        /*
                         * Poll on xfer using transport provided .poll_done();
                         * assumes no completion interrupt was available.
@@ -833,6 +1055,8 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
 
                if (!ret) {
                        unsigned long flags;
+                       struct scmi_info *info =
+                               handle_to_scmi_info(cinfo->handle);
 
                        /*
                         * Do not fetch_response if an out-of-order delayed
@@ -840,16 +1064,27 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
                         */
                        spin_lock_irqsave(&xfer->lock, flags);
                        if (xfer->state == SCMI_XFER_SENT_OK) {
-                               info->desc->ops->fetch_response(cinfo, xfer);
+                               desc->ops->fetch_response(cinfo, xfer);
                                xfer->state = SCMI_XFER_RESP_OK;
                        }
                        spin_unlock_irqrestore(&xfer->lock, flags);
 
                        /* Trace polled replies. */
-                       trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
-                                           "RESP",
+                       trace_scmi_msg_dump(info->id, cinfo->id,
+                                           xfer->hdr.protocol_id, xfer->hdr.id,
+                                           !SCMI_XFER_IS_RAW(xfer) ?
+                                           "RESP" : "resp",
                                            xfer->hdr.seq, xfer->hdr.status,
                                            xfer->rx.buf, xfer->rx.len);
+
+                       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+                               struct scmi_info *info =
+                                       handle_to_scmi_info(cinfo->handle);
+
+                               scmi_raw_message_report(info->raw, xfer,
+                                                       SCMI_RAW_REPLY_QUEUE,
+                                                       cinfo->id);
+                       }
                }
        } else {
                /* And we wait for the response. */
@@ -865,6 +1100,59 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
 }
 
 /**
+ * scmi_wait_for_message_response  - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+                                         struct scmi_xfer *xfer)
+{
+       struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+       struct device *dev = info->dev;
+
+       trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+                                     xfer->hdr.protocol_id, xfer->hdr.seq,
+                                     info->desc->max_rx_timeout_ms,
+                                     xfer->hdr.poll_completion);
+
+       return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+                                  info->desc->max_rx_timeout_ms);
+}
+
+/**
+ * scmi_xfer_raw_wait_for_message_response  - An helper to wait for a message
+ * reply to an xfer raw request on a specific channel for the required timeout.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ * @timeout_ms: The maximum timeout in milliseconds
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+                                           struct scmi_xfer *xfer,
+                                           unsigned int timeout_ms)
+{
+       int ret;
+       struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+       struct device *dev = info->dev;
+
+       ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
+       if (ret)
+               dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
+                       pack_scmi_header(&xfer->hdr));
+
+       return ret;
+}
+
+/**
  * do_xfer() - Do one transfer
  *
  * @ph: Pointer to SCMI protocol handle
@@ -884,7 +1172,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
        struct scmi_chan_info *cinfo;
 
        /* Check for polling request on custom command xfers at first */
-       if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
+       if (xfer->hdr.poll_completion &&
+           !is_transport_polling_capable(info->desc)) {
                dev_warn_once(dev,
                              "Polling mode is not supported by transport.\n");
                return -EINVAL;
@@ -895,7 +1184,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                return -EINVAL;
 
        /* True ONLY if also supported by transport. */
-       if (is_polling_enabled(cinfo, info))
+       if (is_polling_enabled(cinfo, info->desc))
                xfer->hdr.poll_completion = true;
 
        /*
@@ -910,6 +1199,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                              xfer->hdr.protocol_id, xfer->hdr.seq,
                              xfer->hdr.poll_completion);
 
+       /* Clear any stale status */
+       xfer->hdr.status = SCMI_SUCCESS;
        xfer->state = SCMI_XFER_SENT_OK;
        /*
         * Even though spinlocking is not needed here since no race is possible
@@ -926,9 +1217,9 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                return ret;
        }
 
-       trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
-                           xfer->hdr.seq, xfer->hdr.status,
-                           xfer->tx.buf, xfer->tx.len);
+       trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+                           xfer->hdr.id, "CMND", xfer->hdr.seq,
+                           xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
 
        ret = scmi_wait_for_message_response(cinfo, xfer);
        if (!ret && xfer->hdr.status)
@@ -952,8 +1243,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
        xfer->rx.len = info->desc->max_msg_size;
 }
 
-#define SCMI_MAX_RESPONSE_TIMEOUT      (2 * MSEC_PER_SEC)
-
 /**
  * do_xfer_with_response() - Do one transfer and wait until the delayed
  *     response is received
@@ -1041,13 +1330,22 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
            tx_size > info->desc->max_msg_size)
                return -ERANGE;
 
-       xfer = scmi_xfer_get(pi->handle, minfo, true);
+       xfer = scmi_xfer_get(pi->handle, minfo);
        if (IS_ERR(xfer)) {
                ret = PTR_ERR(xfer);
                dev_err(dev, "failed to get free message slot(%d)\n", ret);
                return ret;
        }
 
+       /* Pick a sequence number and register this xfer as in-flight */
+       ret = scmi_xfer_pending_set(xfer, minfo);
+       if (ret) {
+               dev_err(pi->handle->dev,
+                       "Failed to get monotonic token %d\n", ret);
+               __scmi_xfer_put(minfo, xfer);
+               return ret;
+       }
+
        xfer->tx.len = tx_size;
        xfer->rx.len = rx_size ? : info->desc->max_msg_size;
        xfer->hdr.type = MSG_TYPE_COMMAND;
@@ -1820,20 +2118,14 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
        bool ret;
        struct scmi_info *info = handle_to_scmi_info(handle);
 
-       ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+       ret = info->desc->atomic_enabled &&
+               is_transport_polling_capable(info->desc);
        if (ret && atomic_threshold)
                *atomic_threshold = info->atomic_threshold;
 
        return ret;
 }
 
-static inline
-struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
-{
-       info->users++;
-       return &info->handle;
-}
-
 /**
  * scmi_handle_get() - Get the SCMI handle for a device
  *
@@ -1845,7 +2137,7 @@ struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
  *
  * Return: pointer to handle if successful, NULL on error
  */
-struct scmi_handle *scmi_handle_get(struct device *dev)
+static struct scmi_handle *scmi_handle_get(struct device *dev)
 {
        struct list_head *p;
        struct scmi_info *info;
@@ -1855,7 +2147,8 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
        list_for_each(p, &scmi_list) {
                info = list_entry(p, struct scmi_info, node);
                if (dev->parent == info->dev) {
-                       handle = scmi_handle_get_from_info_unlocked(info);
+                       info->users++;
+                       handle = &info->handle;
                        break;
                }
        }
@@ -1876,7 +2169,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
  * Return: 0 is successfully released
  *     if null was passed, it returns -EINVAL;
  */
-int scmi_handle_put(const struct scmi_handle *handle)
+static int scmi_handle_put(const struct scmi_handle *handle)
 {
        struct scmi_info *info;
 
@@ -1892,6 +2185,23 @@ int scmi_handle_put(const struct scmi_handle *handle)
        return 0;
 }
 
+static void scmi_device_link_add(struct device *consumer,
+                                struct device *supplier)
+{
+       struct device_link *link;
+
+       link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+       WARN_ON(!link);
+}
+
+static void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+       scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+       if (scmi_dev->handle)
+               scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
+}
+
 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
                                 struct scmi_xfers_info *info)
 {
@@ -1985,23 +2295,20 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
        return ret;
 }
 
-static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
                           int prot_id, bool tx)
 {
        int ret, idx;
+       char name[32];
        struct scmi_chan_info *cinfo;
        struct idr *idr;
+       struct scmi_device *tdev = NULL;
 
        /* Transmit channel is first entry i.e. index 0 */
        idx = tx ? 0 : 1;
        idr = tx ? &info->tx_idr : &info->rx_idr;
 
-       /* check if already allocated, used for multiple device per protocol */
-       cinfo = idr_find(idr, prot_id);
-       if (cinfo)
-               return 0;
-
-       if (!info->desc->ops->chan_available(dev, idx)) {
+       if (!info->desc->ops->chan_available(of_node, idx)) {
                cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
                if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
                        return -EINVAL;
@@ -2012,27 +2319,52 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
        if (!cinfo)
                return -ENOMEM;
 
-       cinfo->dev = dev;
        cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
 
+       /* Create a unique name for this transport device */
+       snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+                idx ? "rx" : "tx", prot_id);
+       /* Create a uniquely named, dedicated transport device for this chan */
+       tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+       if (!tdev) {
+               dev_err(info->dev,
+                       "failed to create transport device (%s)\n", name);
+               devm_kfree(info->dev, cinfo);
+               return -EINVAL;
+       }
+       of_node_get(of_node);
+
+       cinfo->id = prot_id;
+       cinfo->dev = &tdev->dev;
        ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
-       if (ret)
+       if (ret) {
+               of_node_put(of_node);
+               scmi_device_destroy(info->dev, prot_id, name);
+               devm_kfree(info->dev, cinfo);
                return ret;
+       }
 
-       if (tx && is_polling_required(cinfo, info)) {
-               if (is_transport_polling_capable(info))
-                       dev_info(dev,
+       if (tx && is_polling_required(cinfo, info->desc)) {
+               if (is_transport_polling_capable(info->desc))
+                       dev_info(&tdev->dev,
                                 "Enabled polling mode TX channel - prot_id:%d\n",
                                 prot_id);
                else
-                       dev_warn(dev,
+                       dev_warn(&tdev->dev,
                                 "Polling mode NOT supported by transport.\n");
        }
 
 idr_alloc:
        ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
        if (ret != prot_id) {
-               dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+               dev_err(info->dev,
+                       "unable to allocate SCMI idr slot err %d\n", ret);
+               /* Destroy channel and device only if created by this call. */
+               if (tdev) {
+                       of_node_put(of_node);
+                       scmi_device_destroy(info->dev, prot_id, name);
+                       devm_kfree(info->dev, cinfo);
+               }
                return ret;
        }
 
@@ -2041,13 +2373,14 @@ idr_alloc:
 }
 
 static inline int
-scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+               int prot_id)
 {
-       int ret = scmi_chan_setup(info, dev, prot_id, true);
+       int ret = scmi_chan_setup(info, of_node, prot_id, true);
 
        if (!ret) {
                /* Rx is optional, report only memory errors */
-               ret = scmi_chan_setup(info, dev, prot_id, false);
+               ret = scmi_chan_setup(info, of_node, prot_id, false);
                if (ret && ret != -ENOMEM)
                        ret = 0;
        }
@@ -2056,306 +2389,264 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
 }
 
 /**
- * scmi_get_protocol_device  - Helper to get/create an SCMI device.
- *
- * @np: A device node representing a valid active protocols for the referred
- * SCMI instance.
- * @info: The referred SCMI instance for which we are getting/creating this
- * device.
- * @prot_id: The protocol ID.
- * @name: The device name.
- *
- * Referring to the specific SCMI instance identified by @info, this helper
- * takes care to return a properly initialized device matching the requested
- * @proto_id and @name: if device was still not existent it is created as a
- * child of the specified SCMI instance @info and its transport properly
- * initialized as usual.
- *
- * Return: A properly initialized scmi device, NULL otherwise.
+ * scmi_channels_setup  - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
  */
-static inline struct scmi_device *
-scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
-                        int prot_id, const char *name)
+static int scmi_channels_setup(struct scmi_info *info)
 {
-       struct scmi_device *sdev;
+       int ret;
+       struct device_node *child, *top_np = info->dev->of_node;
 
-       /* Already created for this parent SCMI instance ? */
-       sdev = scmi_child_dev_find(info->dev, prot_id, name);
-       if (sdev)
-               return sdev;
+       /* Initialize a common generic channel at first */
+       ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+       if (ret)
+               return ret;
 
-       mutex_lock(&scmi_syspower_mtx);
-       if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
-               dev_warn(info->dev,
-                        "SCMI SystemPower protocol device must be unique !\n");
-               mutex_unlock(&scmi_syspower_mtx);
+       for_each_available_child_of_node(top_np, child) {
+               u32 prot_id;
 
-               return NULL;
+               if (of_property_read_u32(child, "reg", &prot_id))
+                       continue;
+
+               if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+                       dev_err(info->dev,
+                               "Out of range protocol %d\n", prot_id);
+
+               ret = scmi_txrx_setup(info, child, prot_id);
+               if (ret) {
+                       of_node_put(child);
+                       return ret;
+               }
        }
 
-       pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+       return 0;
+}
 
-       sdev = scmi_device_create(np, info->dev, prot_id, name);
-       if (!sdev) {
-               dev_err(info->dev, "failed to create %d protocol device\n",
-                       prot_id);
-               mutex_unlock(&scmi_syspower_mtx);
+static int scmi_chan_destroy(int id, void *p, void *idr)
+{
+       struct scmi_chan_info *cinfo = p;
 
-               return NULL;
+       if (cinfo->dev) {
+               struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+               struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
+
+               of_node_put(cinfo->dev->of_node);
+               scmi_device_destroy(info->dev, id, sdev->name);
+               cinfo->dev = NULL;
        }
 
-       if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
-               dev_err(&sdev->dev, "failed to setup transport\n");
-               scmi_device_destroy(sdev);
-               mutex_unlock(&scmi_syspower_mtx);
+       idr_remove(idr, id);
 
-               return NULL;
-       }
+       return 0;
+}
 
-       if (prot_id == SCMI_PROTOCOL_SYSTEM)
-               scmi_syspower_registered = true;
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
+{
+       /* At first free all channels at the transport layer ... */
+       idr_for_each(idr, info->desc->ops->chan_free, idr);
 
-       mutex_unlock(&scmi_syspower_mtx);
+       /* ...then destroy all underlying devices */
+       idr_for_each(idr, scmi_chan_destroy, idr);
 
-       return sdev;
+       idr_destroy(idr);
 }
 
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
-                           int prot_id, const char *name)
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
 {
-       struct scmi_device *sdev;
-
-       sdev = scmi_get_protocol_device(np, info, prot_id, name);
-       if (!sdev)
-               return;
+       scmi_cleanup_channels(info, &info->tx_idr);
 
-       /* setup handle now as the transport is ready */
-       scmi_set_handle(sdev);
+       scmi_cleanup_channels(info, &info->rx_idr);
 }
 
-/**
- * scmi_create_protocol_devices  - Create devices for all pending requests for
- * this SCMI instance.
- *
- * @np: The device node describing the protocol
- * @info: The SCMI instance descriptor
- * @prot_id: The protocol ID
- *
- * All devices previously requested for this instance (if any) are found and
- * created by scanning the proper @&scmi_requested_devices entry.
- */
-static void scmi_create_protocol_devices(struct device_node *np,
-                                        struct scmi_info *info, int prot_id)
+static int scmi_bus_notifier(struct notifier_block *nb,
+                            unsigned long action, void *data)
 {
-       struct list_head *phead;
+       struct scmi_info *info = bus_nb_to_scmi_info(nb);
+       struct scmi_device *sdev = to_scmi_dev(data);
 
-       mutex_lock(&scmi_requested_devices_mtx);
-       phead = idr_find(&scmi_requested_devices, prot_id);
-       if (phead) {
-               struct scmi_requested_dev *rdev;
+       /* Skip transport devices and devices of different SCMI instances */
+       if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
+           sdev->dev.parent != info->dev)
+               return NOTIFY_DONE;
 
-               list_for_each_entry(rdev, phead, node)
-                       scmi_create_protocol_device(np, info, prot_id,
-                                                   rdev->id_table->name);
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               /* setup handle now as the transport is ready */
+               scmi_set_handle(sdev);
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               scmi_handle_put(sdev->handle);
+               sdev->handle = NULL;
+               break;
+       default:
+               return NOTIFY_DONE;
        }
-       mutex_unlock(&scmi_requested_devices_mtx);
+
+       dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
+               sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
+               "about to be BOUND." : "UNBOUND.");
+
+       return NOTIFY_OK;
 }
 
-/**
- * scmi_protocol_device_request  - Helper to request a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be created.
- *
- * This helper let an SCMI driver request specific devices identified by the
- * @id_table to be created for each active SCMI instance.
- *
- * The requested device name MUST NOT be already existent for any protocol;
- * at first the freshly requested @id_table is annotated in the IDR table
- * @scmi_requested_devices, then a matching device is created for each already
- * active SCMI instance. (if any)
- *
- * This way the requested device is created straight-away for all the already
- * initialized(probed) SCMI instances (handles) and it remains also annotated
- * as pending creation if the requesting SCMI driver was loaded before some
- * SCMI instance and related transports were available: when such late instance
- * is probed, its probe will take care to scan the list of pending requested
- * devices and create those on its own (see @scmi_create_protocol_devices and
- * its enclosing loop)
- *
- * Return: 0 on Success
- */
-int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+static int scmi_device_request_notifier(struct notifier_block *nb,
+                                       unsigned long action, void *data)
 {
-       int ret = 0;
-       unsigned int id = 0;
-       struct list_head *head, *phead = NULL;
-       struct scmi_requested_dev *rdev;
-       struct scmi_info *info;
+       struct device_node *np;
+       struct scmi_device_id *id_table = data;
+       struct scmi_info *info = req_nb_to_scmi_info(nb);
 
-       pr_debug("Requesting SCMI device (%s) for protocol %x\n",
-                id_table->name, id_table->protocol_id);
+       np = idr_find(&info->active_protocols, id_table->protocol_id);
+       if (!np)
+               return NOTIFY_DONE;
 
-       /*
-        * Search for the matching protocol rdev list and then search
-        * of any existent equally named device...fails if any duplicate found.
-        */
-       mutex_lock(&scmi_requested_devices_mtx);
-       idr_for_each_entry(&scmi_requested_devices, head, id) {
-               if (!phead) {
-                       /* A list found registered in the IDR is never empty */
-                       rdev = list_first_entry(head, struct scmi_requested_dev,
-                                               node);
-                       if (rdev->id_table->protocol_id ==
-                           id_table->protocol_id)
-                               phead = head;
-               }
-               list_for_each_entry(rdev, head, node) {
-                       if (!strcmp(rdev->id_table->name, id_table->name)) {
-                               pr_err("Ignoring duplicate request [%d] %s\n",
-                                      rdev->id_table->protocol_id,
-                                      rdev->id_table->name);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-               }
-       }
+       dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+               action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+               id_table->name, id_table->protocol_id);
 
-       /*
-        * No duplicate found for requested id_table, so let's create a new
-        * requested device entry for this new valid request.
-        */
-       rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
-       if (!rdev) {
-               ret = -ENOMEM;
-               goto out;
+       switch (action) {
+       case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+               scmi_create_protocol_devices(np, info, id_table->protocol_id,
+                                            id_table->name);
+               break;
+       case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+               scmi_destroy_protocol_devices(info, id_table->protocol_id,
+                                             id_table->name);
+               break;
+       default:
+               return NOTIFY_DONE;
        }
-       rdev->id_table = id_table;
 
-       /*
-        * Append the new requested device table descriptor to the head of the
-        * related protocol list, eventually creating such head if not already
-        * there.
-        */
-       if (!phead) {
-               phead = kzalloc(sizeof(*phead), GFP_KERNEL);
-               if (!phead) {
-                       kfree(rdev);
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               INIT_LIST_HEAD(phead);
-
-               ret = idr_alloc(&scmi_requested_devices, (void *)phead,
-                               id_table->protocol_id,
-                               id_table->protocol_id + 1, GFP_KERNEL);
-               if (ret != id_table->protocol_id) {
-                       pr_err("Failed to save SCMI device - ret:%d\n", ret);
-                       kfree(rdev);
-                       kfree(phead);
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = 0;
-       }
-       list_add(&rdev->node, phead);
+       return NOTIFY_OK;
+}
 
-       /*
-        * Now effectively create and initialize the requested device for every
-        * already initialized SCMI instance which has registered the requested
-        * protocol as a valid active one: i.e. defined in DT and supported by
-        * current platform FW.
-        */
-       mutex_lock(&scmi_list_mutex);
-       list_for_each_entry(info, &scmi_list, node) {
-               struct device_node *child;
-
-               child = idr_find(&info->active_protocols,
-                                id_table->protocol_id);
-               if (child) {
-                       struct scmi_device *sdev;
-
-                       sdev = scmi_get_protocol_device(child, info,
-                                                       id_table->protocol_id,
-                                                       id_table->name);
-                       if (sdev) {
-                               /* Set handle if not already set: device existed */
-                               if (!sdev->handle)
-                                       sdev->handle =
-                                               scmi_handle_get_from_info_unlocked(info);
-                               /* Relink consumer and suppliers */
-                               if (sdev->handle)
-                                       scmi_device_link_add(&sdev->dev,
-                                                            sdev->handle->dev);
-                       }
-               } else {
-                       dev_err(info->dev,
-                               "Failed. SCMI protocol %d not active.\n",
-                               id_table->protocol_id);
-               }
-       }
-       mutex_unlock(&scmi_list_mutex);
+static void scmi_debugfs_common_cleanup(void *d)
+{
+       struct scmi_debug_info *dbg = d;
 
-out:
-       mutex_unlock(&scmi_requested_devices_mtx);
+       if (!dbg)
+               return;
 
-       return ret;
+       debugfs_remove_recursive(dbg->top_dentry);
+       kfree(dbg->name);
+       kfree(dbg->type);
 }
 
-/**
- * scmi_protocol_device_unrequest  - Helper to unrequest a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be unrequested.
- *
- * An helper to let an SCMI driver release its request about devices; note that
- * devices are created and initialized once the first SCMI driver request them
- * but they destroyed only on SCMI core unloading/unbinding.
- *
- * The current SCMI transport layer uses such devices as internal references and
- * as such they could be shared as same transport between multiple drivers so
- * that cannot be safely destroyed till the whole SCMI stack is removed.
- * (unless adding further burden of refcounting.)
- */
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
 {
-       struct list_head *phead;
+       char top_dir[16];
+       struct dentry *trans, *top_dentry;
+       struct scmi_debug_info *dbg;
+       const char *c_ptr = NULL;
 
-       pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
-                id_table->name, id_table->protocol_id);
+       dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
+       if (!dbg)
+               return NULL;
 
-       mutex_lock(&scmi_requested_devices_mtx);
-       phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
-       if (phead) {
-               struct scmi_requested_dev *victim, *tmp;
+       dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
+       if (!dbg->name) {
+               devm_kfree(info->dev, dbg);
+               return NULL;
+       }
 
-               list_for_each_entry_safe(victim, tmp, phead, node) {
-                       if (!strcmp(victim->id_table->name, id_table->name)) {
-                               list_del(&victim->node);
-                               kfree(victim);
-                               break;
-                       }
-               }
+       of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
+       dbg->type = kstrdup(c_ptr, GFP_KERNEL);
+       if (!dbg->type) {
+               kfree(dbg->name);
+               devm_kfree(info->dev, dbg);
+               return NULL;
+       }
 
-               if (list_empty(phead)) {
-                       idr_remove(&scmi_requested_devices,
-                                  id_table->protocol_id);
-                       kfree(phead);
-               }
+       snprintf(top_dir, 16, "%d", info->id);
+       top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
+       trans = debugfs_create_dir("transport", top_dentry);
+
+       dbg->is_atomic = info->desc->atomic_enabled &&
+                               is_transport_polling_capable(info->desc);
+
+       debugfs_create_str("instance_name", 0400, top_dentry,
+                          (char **)&dbg->name);
+
+       debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
+                          &info->atomic_threshold);
+
+       debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
+
+       debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
+
+       debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
+                          (u32 *)&info->desc->max_rx_timeout_ms);
+
+       debugfs_create_u32("max_msg_size", 0400, trans,
+                          (u32 *)&info->desc->max_msg_size);
+
+       debugfs_create_u32("tx_max_msg", 0400, trans,
+                          (u32 *)&info->tx_minfo.max_msg);
+
+       debugfs_create_u32("rx_max_msg", 0400, trans,
+                          (u32 *)&info->rx_minfo.max_msg);
+
+       dbg->top_dentry = top_dentry;
+
+       if (devm_add_action_or_reset(info->dev,
+                                    scmi_debugfs_common_cleanup, dbg)) {
+               scmi_debugfs_common_cleanup(dbg);
+               return NULL;
        }
-       mutex_unlock(&scmi_requested_devices_mtx);
+
+       return dbg;
 }
 
-static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
 {
-       int ret;
-       struct idr *idr = &info->tx_idr;
+       int id, num_chans = 0, ret = 0;
+       struct scmi_chan_info *cinfo;
+       u8 channels[SCMI_MAX_CHANNELS] = {};
+       DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
+
+       if (!info->dbg)
+               return -EINVAL;
+
+       /* Enumerate all channels to collect their ids */
+       idr_for_each_entry(&info->tx_idr, cinfo, id) {
+               /*
+                * Cannot happen, but be defensive.
+                * Zero as num_chans is ok, warn and carry on.
+                */
+               if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
+                       dev_warn(info->dev,
+                                "SCMI RAW - Error enumerating channels\n");
+                       break;
+               }
 
-       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
-       idr_destroy(&info->tx_idr);
+               if (!test_bit(cinfo->id, protos)) {
+                       channels[num_chans++] = cinfo->id;
+                       set_bit(cinfo->id, protos);
+               }
+       }
 
-       idr = &info->rx_idr;
-       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
-       idr_destroy(&info->rx_idr);
+       info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
+                                      info->id, channels, num_chans,
+                                      info->desc, info->tx_minfo.max_msg);
+       if (IS_ERR(info->raw)) {
+               dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
+               ret = PTR_ERR(info->raw);
+               info->raw = NULL;
+       }
 
        return ret;
 }
@@ -2377,12 +2668,19 @@ static int scmi_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
+       info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
+       if (info->id < 0)
+               return info->id;
+
        info->dev = dev;
        info->desc = desc;
+       info->bus_nb.notifier_call = scmi_bus_notifier;
+       info->dev_req_nb.notifier_call = scmi_device_request_notifier;
        INIT_LIST_HEAD(&info->node);
        idr_init(&info->protocols);
        mutex_init(&info->protocols_mtx);
        idr_init(&info->active_protocols);
+       mutex_init(&info->devreq_mtx);
 
        platform_set_drvdata(pdev, info);
        idr_init(&info->tx_idr);
@@ -2406,21 +2704,55 @@ static int scmi_probe(struct platform_device *pdev)
        if (desc->ops->link_supplier) {
                ret = desc->ops->link_supplier(dev);
                if (ret)
-                       return ret;
+                       goto clear_ida;
        }
 
-       ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+       /* Setup all channels described in the DT at first */
+       ret = scmi_channels_setup(info);
        if (ret)
-               return ret;
+               goto clear_ida;
 
-       ret = scmi_xfer_info_init(info);
+       ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
        if (ret)
                goto clear_txrx_setup;
 
+       ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+                                              &info->dev_req_nb);
+       if (ret)
+               goto clear_bus_notifier;
+
+       ret = scmi_xfer_info_init(info);
+       if (ret)
+               goto clear_dev_req_notifier;
+
+       if (scmi_top_dentry) {
+               info->dbg = scmi_debugfs_common_setup(info);
+               if (!info->dbg)
+                       dev_warn(dev, "Failed to setup SCMI debugfs.\n");
+
+               if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+                       bool coex =
+                             IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
+
+                       ret = scmi_debugfs_raw_mode_setup(info);
+                       if (!coex) {
+                               if (ret)
+                                       goto clear_dev_req_notifier;
+
+                               /* Bail out anyway when coex enabled */
+                               return ret;
+                       }
+
+                       /* Coex enabled, carry on in any case. */
+                       dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
+               }
+       }
+
        if (scmi_notification_init(handle))
                dev_err(dev, "SCMI Notifications NOT available.\n");
 
-       if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+       if (info->desc->atomic_enabled &&
+           !is_transport_polling_capable(info->desc))
                dev_err(dev,
                        "Transport is not polling capable. Atomic mode not supported.\n");
 
@@ -2467,29 +2799,36 @@ static int scmi_probe(struct platform_device *pdev)
                }
 
                of_node_get(child);
-               scmi_create_protocol_devices(child, info, prot_id);
+               scmi_create_protocol_devices(child, info, prot_id, NULL);
        }
 
        return 0;
 
 notification_exit:
+       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+               scmi_raw_mode_cleanup(info->raw);
        scmi_notification_exit(&info->handle);
+clear_dev_req_notifier:
+       blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+                                          &info->dev_req_nb);
+clear_bus_notifier:
+       bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
 clear_txrx_setup:
        scmi_cleanup_txrx_channels(info);
+clear_ida:
+       ida_free(&scmi_id, info->id);
        return ret;
 }
 
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
-{
-       idr_remove(idr, id);
-}
-
 static int scmi_remove(struct platform_device *pdev)
 {
-       int ret, id;
+       int id;
        struct scmi_info *info = platform_get_drvdata(pdev);
        struct device_node *child;
 
+       if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+               scmi_raw_mode_cleanup(info->raw);
+
        mutex_lock(&scmi_list_mutex);
        if (info->users)
                dev_warn(&pdev->dev,
@@ -2507,10 +2846,14 @@ static int scmi_remove(struct platform_device *pdev)
                of_node_put(child);
        idr_destroy(&info->active_protocols);
 
+       blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+                                          &info->dev_req_nb);
+       bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+
        /* Safe to free channels since no more users */
-       ret = scmi_cleanup_txrx_channels(info);
-       if (ret)
-               dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
+       scmi_cleanup_txrx_channels(info);
+
+       ida_free(&scmi_id, info->id);
 
        return 0;
 }
@@ -2639,6 +2982,19 @@ static void __exit scmi_transports_exit(void)
        __scmi_transports_setup(false);
 }
 
+static struct dentry *scmi_debugfs_init(void)
+{
+       struct dentry *d;
+
+       d = debugfs_create_dir("scmi", NULL);
+       if (IS_ERR(d)) {
+               pr_err("Could NOT create SCMI top dentry.\n");
+               return NULL;
+       }
+
+       return d;
+}
+
 static int __init scmi_driver_init(void)
 {
        int ret;
@@ -2647,13 +3003,14 @@ static int __init scmi_driver_init(void)
        if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
                return -EINVAL;
 
-       scmi_bus_init();
-
        /* Initialize any compiled-in transport which provided an init/exit */
        ret = scmi_transports_init();
        if (ret)
                return ret;
 
+       if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
+               scmi_top_dentry = scmi_debugfs_init();
+
        scmi_base_register();
 
        scmi_clock_register();
@@ -2667,7 +3024,7 @@ static int __init scmi_driver_init(void)
 
        return platform_driver_register(&scmi_driver);
 }
-subsys_initcall(scmi_driver_init);
+module_init(scmi_driver_init);
 
 static void __exit scmi_driver_exit(void)
 {
@@ -2682,11 +3039,11 @@ static void __exit scmi_driver_exit(void)
        scmi_system_unregister();
        scmi_powercap_unregister();
 
-       scmi_bus_exit();
-
        scmi_transports_exit();
 
        platform_driver_unregister(&scmi_driver);
+
+       debugfs_remove_recursive(scmi_top_dentry);
 }
 module_exit(scmi_driver_exit);
 
index 1e40cb0..0d9c953 100644 (file)
@@ -46,9 +46,9 @@ static void rx_callback(struct mbox_client *cl, void *m)
        scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
 }
 
-static bool mailbox_chan_available(struct device *dev, int idx)
+static bool mailbox_chan_available(struct device_node *of_node, int idx)
 {
-       return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+       return !of_parse_phandle_with_args(of_node, "mboxes",
                                           "#mbox-cells", idx, NULL);
 }
 
@@ -120,8 +120,6 @@ static int mailbox_chan_free(int id, void *p, void *data)
                smbox->cinfo = NULL;
        }
 
-       scmi_free_channel(cinfo, data, id);
-
        return 0;
 }
 
index 2a7aeab..9297203 100644 (file)
@@ -328,11 +328,11 @@ static int scmi_optee_link_supplier(struct device *dev)
        return 0;
 }
 
-static bool scmi_optee_chan_available(struct device *dev, int idx)
+static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
 {
        u32 channel_id;
 
-       return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+       return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
                                           idx, &channel_id);
 }
 
@@ -481,8 +481,6 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
        cinfo->transport_info = NULL;
        channel->cinfo = NULL;
 
-       scmi_free_channel(cinfo, data, id);
-
        return 0;
 }
 
index 2f3bf69..78e1a01 100644 (file)
@@ -115,6 +115,7 @@ struct scmi_msg_hdr {
  *         - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
  *         - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
  *           (Missing synchronous response is assumed OK and ignored)
+ * @flags: Optional flags associated to this xfer.
  * @lock: A spinlock to protect state and busy fields.
  * @priv: A pointer for transport private usage.
  */
@@ -135,6 +136,12 @@ struct scmi_xfer {
 #define SCMI_XFER_RESP_OK      1
 #define SCMI_XFER_DRESP_OK     2
        int state;
+#define SCMI_XFER_FLAG_IS_RAW  BIT(0)
+#define SCMI_XFER_IS_RAW(x)    ((x)->flags & SCMI_XFER_FLAG_IS_RAW)
+#define SCMI_XFER_FLAG_CHAN_SET        BIT(1)
+#define SCMI_XFER_IS_CHAN_SET(x)       \
+       ((x)->flags & SCMI_XFER_FLAG_CHAN_SET)
+       int flags;
        /* A lock to protect state and busy fields */
        spinlock_t lock;
        void *priv;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
new file mode 100644 (file)
index 0000000..d40df09
--- /dev/null
@@ -0,0 +1,1443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Raw mode support
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * When enabled the SCMI Raw mode support exposes a userspace API which allows
+ * to send and receive SCMI commands, replies and notifications from a user
+ * application through injection and snooping of bare SCMI messages in binary
+ * little-endian format.
+ *
+ * Such injected SCMI transactions will then be routed through the SCMI core
+ * stack towards the SCMI backend server using whatever SCMI transport is
+ * currently configured on the system under test.
+ *
+ * It is meant to help in running any sort of SCMI backend server testing, no
+ * matter where the server is placed, as long as it is normally reachable via
+ * the transport configured on the system.
+ *
+ * It is activated by a Kernel configuration option since it is NOT meant to
+ * be used in production but only during development and in CI deployments.
+ *
+ * In order to avoid possible interferences between the SCMI Raw transactions
+ * originated from a test-suite and the normal operations of the SCMI drivers,
+ * when Raw mode is enabled, by default, all the regular SCMI drivers are
+ * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
+ * latter case the regular SCMI stack drivers will be loaded as usual and it is
+ * up to the user of this interface to take care of manually inhibiting the
+ * regular SCMI drivers in order to avoid interferences during the test runs.
+ *
+ * The exposed API is as follows.
+ *
+ * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
+ * which in turn is rooted under the corresponding underlying  SCMI instance.
+ *
+ * /sys/kernel/debug/scmi/
+ * `-- 0
+ *     |-- atomic_threshold_us
+ *     |-- instance_name
+ *     |-- raw
+ *     |   |-- channels
+ *     |   |   |-- 0x10
+ *     |   |   |   |-- message
+ *     |   |   |   `-- message_async
+ *     |   |   `-- 0x13
+ *     |   |       |-- message
+ *     |   |       `-- message_async
+ *     |   |-- errors
+ *     |   |-- message
+ *     |   |-- message_async
+ *     |   |-- notification
+ *     |   `-- reset
+ *     `-- transport
+ *         |-- is_atomic
+ *         |-- max_msg_size
+ *         |-- max_rx_timeout_ms
+ *         |-- rx_max_msg
+ *         |-- tx_max_msg
+ *         `-- type
+ *
+ * where:
+ *
+ *  - errors: used to read back timed-out and unexpected replies
+ *  - message*: used to send sync/async commands and read back immediate and
+ *             delayed reponses (if any)
+ *  - notification: used to read any notification being emitted by the system
+ *                 (if previously enabled by the user app)
+ *  - reset: used to flush the queues of messages (of any kind) still pending
+ *          to be read; this is useful at test-suite start/stop to get
+ *          rid of any unread messages from the previous run.
+ *
+ * with the per-channel entries rooted at /channels being present only on a
+ * system where multiple transport channels have been configured.
+ *
+ * Such per-channel entries can be used to explicitly choose a specific channel
+ * for SCMI bare message injection, in contrast with the general entries above
+ * where, instead, the selection of the proper channel to use is automatically
+ * performed based the protocol embedded in the injected message and on how the
+ * transport is configured on the system.
+ *
+ * Note that other common general entries are available under transport/ to let
+ * the user applications properly make up their expectations in terms of
+ * timeouts and message characteristics.
+ *
+ * Each write to the message* entries causes one command request to be built
+ * and sent while the replies or delayed response are read back from those same
+ * entries one message at time (receiving an EOF at each message boundary).
+ *
+ * The user application running the test is in charge of handling timeouts
+ * on replies and properly choosing SCMI sequence numbers for the outgoing
+ * requests (using the same sequence number is supported but discouraged).
+ *
+ * Injection of multiple in-flight requests is supported as long as the user
+ * application uses properly distinct sequence numbers for concurrent requests
+ * and takes care to properly manage all the related issues about concurrency
+ * and command/reply pairing. Keep in mind that, anyway, the real level of
+ * parallelism attainable in such scenario is dependent on the characteristics
+ * of the underlying transport being used.
+ *
+ * Since the SCMI core regular stack is partially used to deliver and collect
+ * the messages, late replies arrived after timeouts and any other sort of
+ * unexpected message can be identified by the SCMI core as usual and they will
+ * be reported as messages under "errors" for later analysis.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "common.h"
+
+#include "raw_mode.h"
+
+#include <trace/events/scmi.h>
+
+#define SCMI_XFER_RAW_MAX_RETRIES      10
+
+/**
+ * struct scmi_raw_queue  - Generic Raw queue descriptor
+ *
+ * @free_bufs: A freelists listhead used to keep unused raw buffers
+ * @free_bufs_lock: Spinlock used to protect access to @free_bufs
+ * @msg_q: A listhead to a queue of snooped messages waiting to be read out
+ * @msg_q_lock: Spinlock used to protect access to @msg_q
+ * @wq: A waitqueue used to wait and poll on related @msg_q
+ */
+struct scmi_raw_queue {
+       struct list_head free_bufs;
+       /* Protect free_bufs[] lists */
+       spinlock_t free_bufs_lock;
+       struct list_head msg_q;
+       /* Protect msg_q[] lists */
+       spinlock_t msg_q_lock;
+       wait_queue_head_t wq;
+};
+
+/**
+ * struct scmi_raw_mode_info  - Structure holding SCMI Raw instance data
+ *
+ * @id: Sequential Raw instance ID.
+ * @handle: Pointer to SCMI entity handle to use
+ * @desc: Pointer to the transport descriptor to use
+ * @tx_max_msg: Maximum number of concurrent TX in-flight messages
+ * @q: An array of Raw queue descriptors
+ * @chans_q: An XArray mapping optional additional per-channel queues
+ * @free_waiters: Head of freelist for unused waiters
+ * @free_mtx: A mutex to protect the waiters freelist
+ * @active_waiters: Head of list for currently active and used waiters
+ * @active_mtx: A mutex to protect the active waiters list
+ * @waiters_work: A work descriptor to be used with the workqueue machinery
+ * @wait_wq: A workqueue reference to the created workqueue
+ * @dentry: Top debugfs root dentry for SCMI Raw
+ * @gid: A group ID used for devres accounting
+ *
+ * Note that this descriptor is passed back to the core after SCMI Raw is
+ * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
+ *
+ */
+struct scmi_raw_mode_info {
+       unsigned int id;
+       const struct scmi_handle *handle;
+       const struct scmi_desc *desc;
+       int tx_max_msg;
+       struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
+       struct xarray chans_q;
+       struct list_head free_waiters;
+       /* Protect free_waiters list */
+       struct mutex free_mtx;
+       struct list_head active_waiters;
+       /* Protect active_waiters list */
+       struct mutex active_mtx;
+       struct work_struct waiters_work;
+       struct workqueue_struct *wait_wq;
+       struct dentry *dentry;
+       void *gid;
+};
+
+/**
+ * struct scmi_xfer_raw_waiter  - Structure to describe an xfer to be waited for
+ *
+ * @start_jiffies: The timestamp in jiffies of when this structure was queued.
+ * @cinfo: A reference to the channel to use for this transaction
+ * @xfer: A reference to the xfer to be waited for
+ * @async_response: A completion to be, optionally, used for async waits: it
+ *                 will be setup by @scmi_do_xfer_raw_start, if needed, to be
+ *                 pointed at by xfer->async_done.
+ * @node: A list node.
+ */
+struct scmi_xfer_raw_waiter {
+       unsigned long start_jiffies;
+       struct scmi_chan_info *cinfo;
+       struct scmi_xfer *xfer;
+       struct completion async_response;
+       struct list_head node;
+};
+
+/**
+ * struct scmi_raw_buffer  - Structure to hold a full SCMI message
+ *
+ * @max_len: The maximum allowed message size (header included) that can be
+ *          stored into @msg
+ * @msg: A message buffer used to collect a full message grabbed from an xfer.
+ * @node: A list node.
+ */
+struct scmi_raw_buffer {
+       size_t max_len;
+       struct scmi_msg msg;
+       struct list_head node;
+};
+
+/**
+ * struct scmi_dbg_raw_data  - Structure holding data needed by the debugfs
+ * layer
+ *
+ * @chan_id: The preferred channel to use: if zero the channel is automatically
+ *          selected based on protocol.
+ * @raw: A reference to the Raw instance.
+ * @tx: A message buffer used to collect TX message on write.
+ * @tx_size: The effective size of the TX message.
+ * @tx_req_size: The final expected size of the complete TX message.
+ * @rx: A message buffer to collect RX message on read.
+ * @rx_size: The effective size of the RX message.
+ */
+struct scmi_dbg_raw_data {
+       u8 chan_id;
+       struct scmi_raw_mode_info *raw;
+       struct scmi_msg tx;
+       size_t tx_size;
+       size_t tx_req_size;
+       struct scmi_msg rx;
+       size_t rx_size;
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
+                     unsigned int chan_id)
+{
+       if (!chan_id)
+               return raw->q[idx];
+
+       return xa_load(&raw->chans_q, chan_id);
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
+{
+       unsigned long flags;
+       struct scmi_raw_buffer *rb = NULL;
+       struct list_head *head = &q->free_bufs;
+
+       spin_lock_irqsave(&q->free_bufs_lock, flags);
+       if (!list_empty(head)) {
+               rb = list_first_entry(head, struct scmi_raw_buffer, node);
+               list_del_init(&rb->node);
+       }
+       spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+
+       return rb;
+}
+
+static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
+                               struct scmi_raw_buffer *rb)
+{
+       unsigned long flags;
+
+       /* Reset to full buffer length */
+       rb->msg.len = rb->max_len;
+
+       spin_lock_irqsave(&q->free_bufs_lock, flags);
+       list_add_tail(&rb->node, &q->free_bufs);
+       spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+}
+
+static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
+                                   struct scmi_raw_buffer *rb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->msg_q_lock, flags);
+       list_add_tail(&rb->node, &q->msg_q);
+       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+       wake_up_interruptible(&q->wq);
+}
+
+static struct scmi_raw_buffer*
+scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
+{
+       struct scmi_raw_buffer *rb = NULL;
+
+       if (!list_empty(&q->msg_q)) {
+               rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
+               list_del_init(&rb->node);
+       }
+
+       return rb;
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
+{
+       unsigned long flags;
+       struct scmi_raw_buffer *rb;
+
+       spin_lock_irqsave(&q->msg_q_lock, flags);
+       rb = scmi_raw_buffer_dequeue_unlocked(q);
+       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+       return rb;
+}
+
+static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
+{
+       struct scmi_raw_buffer *rb;
+
+       do {
+               rb = scmi_raw_buffer_dequeue(q);
+               if (rb)
+                       scmi_raw_buffer_put(q, rb);
+       } while (rb);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
+                        struct scmi_chan_info *cinfo, bool async)
+{
+       struct scmi_xfer_raw_waiter *rw = NULL;
+
+       mutex_lock(&raw->free_mtx);
+       if (!list_empty(&raw->free_waiters)) {
+               rw = list_first_entry(&raw->free_waiters,
+                                     struct scmi_xfer_raw_waiter, node);
+               list_del_init(&rw->node);
+
+               if (async) {
+                       reinit_completion(&rw->async_response);
+                       xfer->async_done = &rw->async_response;
+               }
+
+               rw->cinfo = cinfo;
+               rw->xfer = xfer;
+       }
+       mutex_unlock(&raw->free_mtx);
+
+       return rw;
+}
+
+static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
+                                    struct scmi_xfer_raw_waiter *rw)
+{
+       if (rw->xfer) {
+               rw->xfer->async_done = NULL;
+               rw->xfer = NULL;
+       }
+
+       mutex_lock(&raw->free_mtx);
+       list_add_tail(&rw->node, &raw->free_waiters);
+       mutex_unlock(&raw->free_mtx);
+}
+
+static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
+                                        struct scmi_xfer_raw_waiter *rw)
+{
+       /* A timestamp for the deferred worker to know how much this has aged */
+       rw->start_jiffies = jiffies;
+
+       trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
+                                     rw->xfer->hdr.protocol_id,
+                                     rw->xfer->hdr.seq,
+                                     raw->desc->max_rx_timeout_ms,
+                                     rw->xfer->hdr.poll_completion);
+
+       mutex_lock(&raw->active_mtx);
+       list_add_tail(&rw->node, &raw->active_waiters);
+       mutex_unlock(&raw->active_mtx);
+
+       /* kick waiter work */
+       queue_work(raw->wait_wq, &raw->waiters_work);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
+{
+       struct scmi_xfer_raw_waiter *rw = NULL;
+
+       mutex_lock(&raw->active_mtx);
+       if (!list_empty(&raw->active_waiters)) {
+               rw = list_first_entry(&raw->active_waiters,
+                                     struct scmi_xfer_raw_waiter, node);
+               list_del_init(&rw->node);
+       }
+       mutex_unlock(&raw->active_mtx);
+
+       return rw;
+}
+
+/**
+ * scmi_xfer_raw_worker  - Work function to wait for Raw xfers completions
+ *
+ * @work: A reference to the work.
+ *
+ * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
+ * cannot wait to receive its response (if any) in the context of the injection
+ * routines so as not to leave the userspace write syscall, which delivered the
+ * SCMI message to send, pending till eventually a reply is received.
+ * Userspace should and will poll/wait instead on the read syscalls which will
+ * be in charge of reading a received reply (if any).
+ *
+ * Even though reply messages are collected and reported into the SCMI Raw layer
+ * on the RX path, nonetheless we have to properly wait for their completion as
+ * usual (and async_completion too if needed) in order to properly release the
+ * xfer structure at the end: to do this out of the context of the write/send
+ * these waiting jobs are delegated to this deferred worker.
+ *
+ * Any sent xfer, to be waited for, is timestamped and queued for later
+ * consumption by this worker: queue aging is accounted for while choosing a
+ * timeout for the completion, BUT we do not really care here if we end up
+ * accidentally waiting for a bit too long.
+ */
+static void scmi_xfer_raw_worker(struct work_struct *work)
+{
+       struct scmi_raw_mode_info *raw;
+       struct device *dev;
+       unsigned long max_tmo;
+
+       raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
+       dev = raw->handle->dev;
+       max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
+
+       do {
+               int ret = 0;
+               unsigned int timeout_ms;
+               unsigned long aging;
+               struct scmi_xfer *xfer;
+               struct scmi_xfer_raw_waiter *rw;
+               struct scmi_chan_info *cinfo;
+
+               rw = scmi_xfer_raw_waiter_dequeue(raw);
+               if (!rw)
+                       return;
+
+               cinfo = rw->cinfo;
+               xfer = rw->xfer;
+               /*
+                * Waiters are queued by wait-deadline at the end, so some of
+                * them could have been already expired when processed, BUT we
+                * have to check the completion status anyway just in case a
+                * virtually expired (aged) transaction was indeed completed
+                * fine and we'll have to wait for the asynchronous part (if
+                * any): for this reason a 1 ms timeout is used for already
+                * expired/aged xfers.
+                */
+               aging = jiffies - rw->start_jiffies;
+               timeout_ms = max_tmo > aging ?
+                       jiffies_to_msecs(max_tmo - aging) : 1;
+
+               ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
+                                                             timeout_ms);
+               if (!ret && xfer->hdr.status)
+                       ret = scmi_to_linux_errno(xfer->hdr.status);
+
+               if (raw->desc->ops->mark_txdone)
+                       raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
+
+               trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+                                   xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+               /* Wait also for an async delayed response if needed */
+               if (!ret && xfer->async_done) {
+                       unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+
+                       if (!wait_for_completion_timeout(xfer->async_done, tmo))
+                               dev_err(dev,
+                                       "timed out in RAW delayed resp - HDR:%08X\n",
+                                       pack_scmi_header(&xfer->hdr));
+               }
+
+               /* Release waiter and xfer */
+               scmi_xfer_raw_put(raw->handle, xfer);
+               scmi_xfer_raw_waiter_put(raw, rw);
+       } while (1);
+}
+
+static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
+{
+       int i;
+
+       dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
+
+       for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
+               scmi_raw_buffer_queue_flush(raw->q[i]);
+}
+
+/**
+ * scmi_xfer_raw_get_init  - An helper to build a valid xfer from the provided
+ * bare SCMI message.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ *      header) in little-endian binary formmat.
+ * @len: Length of the message in @buf.
+ * @p: A pointer to return the initialized Raw xfer.
+ *
+ * After an xfer is picked from the TX pool and filled in with the message
+ * content, the xfer is registered as pending with the core in the usual way
+ * using the original sequence number provided by the user with the message.
+ *
+ * Note that, in case the testing user application is NOT using distinct
+ * sequence-numbers between successive SCMI messages such registration could
+ * fail temporarily if the previous message, using the same sequence number,
+ * had still not released; in such a case we just wait and retry.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
+                                 size_t len, struct scmi_xfer **p)
+{
+       u32 msg_hdr;
+       size_t tx_size;
+       struct scmi_xfer *xfer;
+       int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
+       struct device *dev = raw->handle->dev;
+
+       if (!buf || len < sizeof(u32))
+               return -EINVAL;
+
+       tx_size = len - sizeof(u32);
+       /* Ensure we have sane transfer sizes */
+       if (tx_size > raw->desc->max_msg_size)
+               return -ERANGE;
+
+       xfer = scmi_xfer_raw_get(raw->handle);
+       if (IS_ERR(xfer)) {
+               dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
+               return PTR_ERR(xfer);
+       }
+
+       /* Build xfer from the provided SCMI bare LE message */
+       msg_hdr = le32_to_cpu(*((__le32 *)buf));
+       unpack_scmi_header(msg_hdr, &xfer->hdr);
+       xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
+       /* Polling not supported */
+       xfer->hdr.poll_completion = false;
+       xfer->hdr.status = SCMI_SUCCESS;
+       xfer->tx.len = tx_size;
+       xfer->rx.len = raw->desc->max_msg_size;
+       /* Clear the whole TX buffer */
+       memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
+       if (xfer->tx.len)
+               memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
+       *p = xfer;
+
+       /*
+        * In flight registration can temporarily fail in case of Raw messages
+        * if the user injects messages without using monotonically increasing
+        * sequence numbers since, in Raw mode, the xfer (and the token) is
+        * finally released later by a deferred worker. Just retry for a while.
+        */
+       do {
+               ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
+               if (ret) {
+                       dev_dbg(dev,
+                               "...retrying[%d] inflight registration\n",
+                               retry);
+                       msleep(raw->desc->max_rx_timeout_ms /
+                              SCMI_XFER_RAW_MAX_RETRIES);
+               }
+       } while (ret && --retry);
+
+       if (ret) {
+               dev_warn(dev,
+                        "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
+                        xfer->hdr.seq, msg_hdr);
+               scmi_xfer_raw_put(raw->handle, xfer);
+       }
+
+       return ret;
+}
+
+/**
+ * scmi_do_xfer_raw_start  - An helper to send a valid raw xfer
+ *
+ * @raw: A reference to the Raw instance.
+ * @xfer: The xfer to send
+ * @chan_id: The channel ID to use, if zero the channels is automatically
+ *          selected based on the protocol used.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * This function send a previously built raw xfer using an appropriate channel
+ * and queues the related waiting work.
+ *
+ * Note that we need to know explicitly if the required command is meant to be
+ * asynchronous in kind since we have to properly setup the waiter.
+ * (and deducing this from the payload is weak and do not scale given there is
+ *  NOT a common header-flag stating if the command is asynchronous or not)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
+                                 struct scmi_xfer *xfer, u8 chan_id,
+                                 bool async)
+{
+       int ret;
+       struct scmi_chan_info *cinfo;
+       struct scmi_xfer_raw_waiter *rw;
+       struct device *dev = raw->handle->dev;
+
+       if (!chan_id)
+               chan_id = xfer->hdr.protocol_id;
+       else
+               xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
+
+       cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
+       if (IS_ERR(cinfo))
+               return PTR_ERR(cinfo);
+
+       rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
+       if (!rw) {
+               dev_warn(dev, "RAW - Cannot get a free waiter !\n");
+               return -ENOMEM;
+       }
+
+       /* True ONLY if also supported by transport. */
+       if (is_polling_enabled(cinfo, raw->desc))
+               xfer->hdr.poll_completion = true;
+
+       reinit_completion(&xfer->done);
+       /* Make sure xfer state update is visible before sending */
+       smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
+
+       trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+                             xfer->hdr.protocol_id, xfer->hdr.seq,
+                             xfer->hdr.poll_completion);
+
+       ret = raw->desc->ops->send_message(rw->cinfo, xfer);
+       if (ret) {
+               dev_err(dev, "Failed to send RAW message %d\n", ret);
+               scmi_xfer_raw_waiter_put(raw, rw);
+               return ret;
+       }
+
+       trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
+                           xfer->hdr.id, "cmnd", xfer->hdr.seq,
+                           xfer->hdr.status,
+                           xfer->tx.buf, xfer->tx.len);
+
+       scmi_xfer_raw_waiter_enqueue(raw, rw);
+
+       return ret;
+}
+
+/**
+ * scmi_raw_message_send  - An helper to build and send an SCMI command using
+ * the provided SCMI bare message buffer
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ *      header) in little-endian binary format.
+ * @len: Length of the message in @buf.
+ * @chan_id: The channel ID to use.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
+                                void *buf, size_t len, u8 chan_id, bool async)
+{
+       int ret;
+       struct scmi_xfer *xfer;
+
+       ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
+       if (ret)
+               return ret;
+
+       ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
+       if (ret)
+               scmi_xfer_raw_put(raw->handle, xfer);
+
+       return ret;
+}
+
+static struct scmi_raw_buffer *
+scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
+{
+       unsigned long flags;
+       struct scmi_raw_buffer *rb;
+
+       spin_lock_irqsave(&q->msg_q_lock, flags);
+       while (list_empty(&q->msg_q)) {
+               spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+               if (o_nonblock)
+                       return ERR_PTR(-EAGAIN);
+
+               if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
+                       return ERR_PTR(-ERESTARTSYS);
+
+               spin_lock_irqsave(&q->msg_q_lock, flags);
+       }
+
+       rb = scmi_raw_buffer_dequeue_unlocked(q);
+
+       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+       return rb;
+}
+
+/**
+ * scmi_raw_message_receive  - An helper to dequeue and report the next
+ * available enqueued raw message payload that has been collected.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer to get hold of the whole SCMI message received and represented
+ *      in little-endian binary format.
+ * @len: Length of @buf.
+ * @size: The effective size of the message copied into @buf
+ * @idx: The index of the queue to pick the next queued message from.
+ * @chan_id: The channel ID to use.
+ * @o_nonblock: A flag to request a non-blocking message dequeue.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
+                                   void *buf, size_t len, size_t *size,
+                                   unsigned int idx, unsigned int chan_id,
+                                   bool o_nonblock)
+{
+       int ret = 0;
+       struct scmi_raw_buffer *rb;
+       struct scmi_raw_queue *q;
+
+       q = scmi_raw_queue_select(raw, idx, chan_id);
+       if (!q)
+               return -ENODEV;
+
+       rb = scmi_raw_message_dequeue(q, o_nonblock);
+       if (IS_ERR(rb)) {
+               dev_dbg(raw->handle->dev, "RAW - No message available!\n");
+               return PTR_ERR(rb);
+       }
+
+       if (rb->msg.len <= len) {
+               memcpy(buf, rb->msg.buf, rb->msg.len);
+               *size = rb->msg.len;
+       } else {
+               ret = -ENOSPC;
+       }
+
+       scmi_raw_buffer_put(q, rb);
+
+       return ret;
+}
+
+/* SCMI Raw debugfs helpers */
+
+static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
+                                            char __user *buf,
+                                            size_t count, loff_t *ppos,
+                                            unsigned int idx)
+{
+       ssize_t cnt;
+       struct scmi_dbg_raw_data *rd = filp->private_data;
+
+       if (!rd->rx_size) {
+               int ret;
+
+               ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
+                                              &rd->rx_size, idx, rd->chan_id,
+                                              filp->f_flags & O_NONBLOCK);
+               if (ret) {
+                       rd->rx_size = 0;
+                       return ret;
+               }
+
+               /* Reset any previous filepos change, including writes */
+               *ppos = 0;
+       } else if (*ppos == rd->rx_size) {
+               /* Return EOF once all the message has been read-out */
+               rd->rx_size = 0;
+               return 0;
+       }
+
+       cnt = simple_read_from_buffer(buf, count, ppos,
+                                     rd->rx.buf, rd->rx_size);
+
+       return cnt;
+}
+
+static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
+                                             const char __user *buf,
+                                             size_t count, loff_t *ppos,
+                                             bool async)
+{
+       int ret;
+       struct scmi_dbg_raw_data *rd = filp->private_data;
+
+       if (count > rd->tx.len - rd->tx_size)
+               return -ENOSPC;
+
+       /* On first write attempt @count carries the total full message size. */
+       if (!rd->tx_size)
+               rd->tx_req_size = count;
+
+       /*
+        * Gather a full message, possibly across multiple interrupted wrrtes,
+        * before sending it with a single RAW xfer.
+        */
+       if (rd->tx_size < rd->tx_req_size) {
+               size_t cnt;
+
+               cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
+                                            buf, count);
+               rd->tx_size += cnt;
+               if (cnt < count)
+                       return cnt;
+       }
+
+       ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
+                                   rd->chan_id, async);
+
+       /* Reset ppos for next message ... */
+       rd->tx_size = 0;
+       *ppos = 0;
+
+       return ret ?: count;
+}
+
+static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
+                                             struct poll_table_struct *wait,
+                                             unsigned int idx)
+{
+       unsigned long flags;
+       struct scmi_dbg_raw_data *rd = filp->private_data;
+       struct scmi_raw_queue *q;
+       __poll_t mask = 0;
+
+       q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
+       if (!q)
+               return mask;
+
+       poll_wait(filp, &q->wq, wait);
+
+       spin_lock_irqsave(&q->msg_q_lock, flags);
+       if (!list_empty(&q->msg_q))
+               mask = EPOLLIN | EPOLLRDNORM;
+       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+       return mask;
+}
+
+static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
+                                             char __user *buf,
+                                             size_t count, loff_t *ppos)
+{
+       return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+                                            SCMI_RAW_REPLY_QUEUE);
+}
+
+static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
+                                              const char __user *buf,
+                                              size_t count, loff_t *ppos)
+{
+       return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+}
+
+static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
+                                              struct poll_table_struct *wait)
+{
+       return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
+}
+
+static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+{
+       u8 id;
+       struct scmi_raw_mode_info *raw;
+       struct scmi_dbg_raw_data *rd;
+       const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
+
+       if (!inode->i_private)
+               return -ENODEV;
+
+       raw = inode->i_private;
+       rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+       if (!rd)
+               return -ENOMEM;
+
+       rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
+       rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
+       if (!rd->rx.buf) {
+               kfree(rd);
+               return -ENOMEM;
+       }
+
+       rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
+       rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
+       if (!rd->tx.buf) {
+               kfree(rd->rx.buf);
+               kfree(rd);
+               return -ENOMEM;
+       }
+
+       /* Grab channel ID from debugfs entry naming if any */
+       if (!kstrtou8(id_str, 16, &id))
+               rd->chan_id = id;
+
+       rd->raw = raw;
+       filp->private_data = rd;
+
+       return 0;
+}
+
+static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+{
+       struct scmi_dbg_raw_data *rd = filp->private_data;
+
+       kfree(rd->rx.buf);
+       kfree(rd->tx.buf);
+       kfree(rd);
+
+       return 0;
+}
+
+static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
+                                            const char __user *buf,
+                                            size_t count, loff_t *ppos)
+{
+       struct scmi_dbg_raw_data *rd = filp->private_data;
+
+       scmi_xfer_raw_reset(rd->raw);
+
+       return count;
+}
+
+static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+       .open = scmi_dbg_raw_mode_open,
+       .release = scmi_dbg_raw_mode_release,
+       .write = scmi_dbg_raw_mode_reset_write,
+       .owner = THIS_MODULE,
+};
+
+static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+       .open = scmi_dbg_raw_mode_open,
+       .release = scmi_dbg_raw_mode_release,
+       .read = scmi_dbg_raw_mode_message_read,
+       .write = scmi_dbg_raw_mode_message_write,
+       .poll = scmi_dbg_raw_mode_message_poll,
+       .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
+                                                    const char __user *buf,
+                                                    size_t count, loff_t *ppos)
+{
+       return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+       .open = scmi_dbg_raw_mode_open,
+       .release = scmi_dbg_raw_mode_release,
+       .read = scmi_dbg_raw_mode_message_read,
+       .write = scmi_dbg_raw_mode_message_async_write,
+       .poll = scmi_dbg_raw_mode_message_poll,
+       .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
+                                                char __user *buf,
+                                                size_t count, loff_t *ppos)
+{
+       return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+                                            SCMI_RAW_NOTIF_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
+                                 struct poll_table_struct *wait)
+{
+       return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+       .open = scmi_dbg_raw_mode_open,
+       .release = scmi_dbg_raw_mode_release,
+       .read = scmi_test_dbg_raw_mode_notif_read,
+       .poll = scmi_test_dbg_raw_mode_notif_poll,
+       .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
+                                                 char __user *buf,
+                                                 size_t count, loff_t *ppos)
+{
+       return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+                                            SCMI_RAW_ERRS_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
+                                  struct poll_table_struct *wait)
+{
+       return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+       .open = scmi_dbg_raw_mode_open,
+       .release = scmi_dbg_raw_mode_release,
+       .read = scmi_test_dbg_raw_mode_errors_read,
+       .poll = scmi_test_dbg_raw_mode_errors_poll,
+       .owner = THIS_MODULE,
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
+{
+       int i;
+       struct scmi_raw_buffer *rb;
+       struct device *dev = raw->handle->dev;
+       struct scmi_raw_queue *q;
+
+       q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+       if (!q)
+               return ERR_PTR(-ENOMEM);
+
+       rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
+       if (!rb)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&q->free_bufs_lock);
+       INIT_LIST_HEAD(&q->free_bufs);
+       for (i = 0; i < raw->tx_max_msg; i++, rb++) {
+               rb->max_len = raw->desc->max_msg_size + sizeof(u32);
+               rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
+               if (!rb->msg.buf)
+                       return ERR_PTR(-ENOMEM);
+               scmi_raw_buffer_put(q, rb);
+       }
+
+       spin_lock_init(&q->msg_q_lock);
+       INIT_LIST_HEAD(&q->msg_q);
+       init_waitqueue_head(&q->wq);
+
+       return q;
+}
+
+static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
+{
+       int i;
+       struct scmi_xfer_raw_waiter *rw;
+       struct device *dev = raw->handle->dev;
+
+       rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
+       if (!rw)
+               return -ENOMEM;
+
+       raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
+                                      WQ_UNBOUND | WQ_FREEZABLE |
+                                      WQ_HIGHPRI, WQ_SYSFS, raw->id);
+       if (!raw->wait_wq)
+               return -ENOMEM;
+
+       mutex_init(&raw->free_mtx);
+       INIT_LIST_HEAD(&raw->free_waiters);
+       mutex_init(&raw->active_mtx);
+       INIT_LIST_HEAD(&raw->active_waiters);
+
+       for (i = 0; i < raw->tx_max_msg; i++, rw++) {
+               init_completion(&rw->async_response);
+               scmi_xfer_raw_waiter_put(raw, rw);
+       }
+       INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
+
+       return 0;
+}
+
+static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+                              u8 *channels, int num_chans)
+{
+       int ret, idx;
+       void *gid;
+       struct device *dev = raw->handle->dev;
+
+       gid = devres_open_group(dev, NULL, GFP_KERNEL);
+       if (!gid)
+               return -ENOMEM;
+
+       for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
+               raw->q[idx] = scmi_raw_queue_init(raw);
+               if (IS_ERR(raw->q[idx])) {
+                       ret = PTR_ERR(raw->q[idx]);
+                       goto err;
+               }
+       }
+
+       xa_init(&raw->chans_q);
+       if (num_chans > 1) {
+               int i;
+
+               for (i = 0; i < num_chans; i++) {
+                       void *xret;
+                       struct scmi_raw_queue *q;
+
+                       q = scmi_raw_queue_init(raw);
+                       if (IS_ERR(q)) {
+                               ret = PTR_ERR(q);
+                               goto err_xa;
+                       }
+
+                       xret = xa_store(&raw->chans_q, channels[i], q,
+                                       GFP_KERNEL);
+                       if (xa_err(xret)) {
+                               dev_err(dev,
+                                       "Fail to allocate Raw queue 0x%02X\n",
+                                       channels[i]);
+                               ret = xa_err(xret);
+                               goto err_xa;
+                       }
+               }
+       }
+
+       ret = scmi_xfer_raw_worker_init(raw);
+       if (ret)
+               goto err_xa;
+
+       devres_close_group(dev, gid);
+       raw->gid = gid;
+
+       return 0;
+
+err_xa:
+       xa_destroy(&raw->chans_q);
+err:
+       devres_release_group(dev, gid);
+       return ret;
+}
+
+/**
+ * scmi_raw_mode_init  - Function to initialize the SCMI Raw stack
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @top_dentry: A reference to the top Raw debugfs dentry
+ * @instance_id: The ID of the underlying SCMI platform instance represented by
+ *              this Raw instance
+ * @channels: The list of the existing channels
+ * @num_chans: The number of entries in @channels
+ * @desc: Reference to the transport operations
+ * @tx_max_msg: Max number of in-flight messages allowed by the transport
+ *
+ * This function prepare the SCMI Raw stack and creates the debugfs API.
+ *
+ * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
+ */
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+                        struct dentry *top_dentry, int instance_id,
+                        u8 *channels, int num_chans,
+                        const struct scmi_desc *desc, int tx_max_msg)
+{
+       int ret;
+       struct scmi_raw_mode_info *raw;
+       struct device *dev;
+
+       if (!handle || !desc)
+               return ERR_PTR(-EINVAL);
+
+       dev = handle->dev;
+       raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
+       if (!raw)
+               return ERR_PTR(-ENOMEM);
+
+       raw->handle = handle;
+       raw->desc = desc;
+       raw->tx_max_msg = tx_max_msg;
+       raw->id = instance_id;
+
+       ret = scmi_raw_mode_setup(raw, channels, num_chans);
+       if (ret) {
+               devm_kfree(dev, raw);
+               return ERR_PTR(ret);
+       }
+
+       raw->dentry = debugfs_create_dir("raw", top_dentry);
+
+       debugfs_create_file("reset", 0200, raw->dentry, raw,
+                           &scmi_dbg_raw_mode_reset_fops);
+
+       debugfs_create_file("message", 0600, raw->dentry, raw,
+                           &scmi_dbg_raw_mode_message_fops);
+
+       debugfs_create_file("message_async", 0600, raw->dentry, raw,
+                           &scmi_dbg_raw_mode_message_async_fops);
+
+       debugfs_create_file("notification", 0400, raw->dentry, raw,
+                           &scmi_dbg_raw_mode_notification_fops);
+
+       debugfs_create_file("errors", 0400, raw->dentry, raw,
+                           &scmi_dbg_raw_mode_errors_fops);
+
+       /*
+        * Expose per-channel entries if multiple channels available.
+        * Just ignore errors while setting up these interfaces since we
+        * have anyway already a working core Raw support.
+        */
+       if (num_chans > 1) {
+               int i;
+               struct dentry *top_chans;
+
+               top_chans = debugfs_create_dir("channels", raw->dentry);
+
+               for (i = 0; i < num_chans; i++) {
+                       char cdir[8];
+                       struct dentry *chd;
+
+                       snprintf(cdir, 8, "0x%02X", channels[i]);
+                       chd = debugfs_create_dir(cdir, top_chans);
+
+                       debugfs_create_file("message", 0600, chd, raw,
+                                           &scmi_dbg_raw_mode_message_fops);
+
+                       debugfs_create_file("message_async", 0600, chd, raw,
+                                           &scmi_dbg_raw_mode_message_async_fops);
+               }
+       }
+
+       dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
+
+       return raw;
+}
+
+/**
+ * scmi_raw_mode_cleanup  - Function to cleanup the SCMI Raw stack
+ *
+ * @r: An opaque handle to an initialized SCMI Raw instance
+ */
+void scmi_raw_mode_cleanup(void *r)
+{
+       struct scmi_raw_mode_info *raw = r;
+
+       if (!raw)
+               return;
+
+       debugfs_remove_recursive(raw->dentry);
+
+       cancel_work_sync(&raw->waiters_work);
+       destroy_workqueue(raw->wait_wq);
+       xa_destroy(&raw->chans_q);
+}
+
+static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
+                                struct scmi_xfer *xfer)
+{
+       __le32 *m;
+       size_t msg_size;
+
+       if (!xfer || !msg || !msg_len)
+               return -EINVAL;
+
+       /* Account for hdr ...*/
+       msg_size = xfer->rx.len + sizeof(u32);
+       /* ... and status if needed */
+       if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+               msg_size += sizeof(u32);
+
+       if (msg_size > *msg_len)
+               return -ENOSPC;
+
+       m = msg;
+       *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+       if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+               *++m = cpu_to_le32(xfer->hdr.status);
+
+       memcpy(++m, xfer->rx.buf, xfer->rx.len);
+
+       *msg_len = msg_size;
+
+       return 0;
+}
+
+/**
+ * scmi_raw_message_report  - Helper to report back valid reponses/notifications
+ * to raw message requests.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @xfer: The xfer containing the message to be reported
+ * @idx: The index of the queue.
+ * @chan_id: The channel ID to use.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the regular RX
+ * path to save and enqueue the response/notification payload carried by this
+ * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
+ *
+ * This way the caller can free the related xfer immediately afterwards and the
+ * user can read back the raw message payload at its own pace (if ever) without
+ * holding an xfer for too long.
+ */
+void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+                            unsigned int idx, unsigned int chan_id)
+{
+       int ret;
+       unsigned long flags;
+       struct scmi_raw_buffer *rb;
+       struct device *dev;
+       struct scmi_raw_queue *q;
+       struct scmi_raw_mode_info *raw = r;
+
+       if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
+               return;
+
+       dev = raw->handle->dev;
+       q = scmi_raw_queue_select(raw, idx,
+                                 SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+
+       /*
+        * Grab the msg_q_lock upfront to avoid a possible race between
+        * realizing the free list was empty and effectively picking the next
+        * buffer to use from the oldest one enqueued and still unread on this
+        * msg_q.
+        *
+        * Note that nowhere else these locks are taken together, so no risk of
+        * deadlocks du eto inversion.
+        */
+       spin_lock_irqsave(&q->msg_q_lock, flags);
+       rb = scmi_raw_buffer_get(q);
+       if (!rb) {
+               /*
+                * Immediate and delayed replies to previously injected Raw
+                * commands MUST be read back from userspace to free the buffers:
+                * if this is not happening something is seriously broken and
+                * must be fixed at the application level: complain loudly.
+                */
+               if (idx == SCMI_RAW_REPLY_QUEUE) {
+                       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+                       dev_warn(dev,
+                                "RAW[%d] - Buffers exhausted. Dropping report.\n",
+                                idx);
+                       return;
+               }
+
+               /*
+                * Notifications and errors queues are instead handled in a
+                * circular manner: unread old buffers are just overwritten by
+                * newer ones.
+                *
+                * The main reason for this is that notifications originated
+                * by Raw requests cannot be distinguished from normal ones, so
+                * your Raw buffers queues risk to be flooded and depleted by
+                * notifications if you left it mistakenly enabled or when in
+                * coexistence mode.
+                */
+               rb = scmi_raw_buffer_dequeue_unlocked(q);
+               if (WARN_ON(!rb)) {
+                       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+                       return;
+               }
+
+               /* Reset to full buffer length */
+               rb->msg.len = rb->max_len;
+
+               dev_warn_once(dev,
+                             "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
+                             idx);
+       }
+       spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+       ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
+       if (ret) {
+               dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
+               scmi_raw_buffer_put(q, rb);
+               return;
+       }
+
+       scmi_raw_buffer_enqueue(q, rb);
+}
+
+static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
+                              struct scmi_chan_info *cinfo,
+                              struct scmi_xfer *xfer, u32 msg_hdr)
+{
+       /* Unpack received HDR as it is */
+       unpack_scmi_header(msg_hdr, &xfer->hdr);
+       xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+
+       memset(xfer->rx.buf, 0x00, xfer->rx.len);
+
+       raw->desc->ops->fetch_response(cinfo, xfer);
+}
+
+/**
+ * scmi_raw_error_report  - Helper to report back timed-out or generally
+ * unexpected replies.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @cinfo: A reference to the channel to use to retrieve the broken xfer
+ * @msg_hdr: The SCMI message header of the message to fetch and report
+ * @priv: Any private data related to the xfer.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the RX path in
+ * case of errors to save and enqueue the bad message payload carried by the
+ * message that has just been received.
+ *
+ * Note that we have to manually fetch any available payload into a temporary
+ * xfer to be able to save and enqueue the message, since the regular RX error
+ * path which had called this would have not fetched the message payload having
+ * classified it as an error.
+ */
+void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
+                          u32 msg_hdr, void *priv)
+{
+       struct scmi_xfer xfer;
+       struct scmi_raw_mode_info *raw = r;
+
+       if (!raw)
+               return;
+
+       xfer.rx.len = raw->desc->max_msg_size;
+       xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
+       if (!xfer.rx.buf) {
+               dev_info(raw->handle->dev,
+                        "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
+                        msg_hdr);
+               return;
+       }
+
+       /* Any transport-provided priv must be passed back down to transport */
+       if (priv)
+               /* Ensure priv is visible */
+               smp_store_mb(xfer.priv, priv);
+
+       scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
+       scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
+
+       kfree(xfer.rx.buf);
+}
diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h
new file mode 100644 (file)
index 0000000..8af756a
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * Raw mode support header.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_RAW_MODE_H
+#define _SCMI_RAW_MODE_H
+
+#include "common.h"
+
+enum {
+       SCMI_RAW_REPLY_QUEUE,
+       SCMI_RAW_NOTIF_QUEUE,
+       SCMI_RAW_ERRS_QUEUE,
+       SCMI_RAW_MAX_QUEUE
+};
+
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+                        struct dentry *top_dentry, int instance_id,
+                        u8 *channels, int num_chans,
+                        const struct scmi_desc *desc, int tx_max_msg);
+void scmi_raw_mode_cleanup(void *raw);
+
+void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer,
+                            unsigned int idx, unsigned int chan_id);
+void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo,
+                          u32 msg_hdr, void *priv);
+
+#endif /* _SCMI_RAW_MODE_H */
index 1dfe534..87b4f4d 100644 (file)
@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
                          struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        xfer->hdr.status = ioread32(shmem->msg_payload);
        /* Skip the length of header and status in shmem area i.e 8 bytes */
-       xfer->rx.len = min_t(size_t, xfer->rx.len,
-                            ioread32(&shmem->length) - 8);
+       xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
                              size_t max_len, struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        /* Skip only the length of header in shmem area i.e 4 bytes */
-       xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+       xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
index 87a7b13..93272e4 100644 (file)
@@ -52,9 +52,9 @@ static irqreturn_t smc_msg_done_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static bool smc_chan_available(struct device *dev, int idx)
+static bool smc_chan_available(struct device_node *of_node, int idx)
 {
-       struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+       struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
        if (!np)
                return false;
 
@@ -171,8 +171,6 @@ static int smc_chan_free(int id, void *p, void *data)
        cinfo->transport_info = NULL;
        scmi_info->cinfo = NULL;
 
-       scmi_free_channel(cinfo, data, id);
-
        return 0;
 }
 
index 33c9b81..d68c01c 100644 (file)
@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
        }
 
        vioch->shutdown_done = &vioch_shutdown_done;
-       virtio_break_device(vioch->vqueue->vdev);
        if (!vioch->is_rx && vioch->deferred_tx_wq)
                /* Cannot be kicked anymore after this...*/
                vioch->deferred_tx_wq = NULL;
@@ -386,7 +385,7 @@ static int virtio_link_supplier(struct device *dev)
        return 0;
 }
 
-static bool virtio_chan_available(struct device *dev, int idx)
+static bool virtio_chan_available(struct device_node *of_node, int idx)
 {
        struct scmi_vio_channel *channels, *vioch = NULL;
 
@@ -482,10 +481,14 @@ static int virtio_chan_free(int id, void *p, void *data)
        struct scmi_chan_info *cinfo = p;
        struct scmi_vio_channel *vioch = cinfo->transport_info;
 
+       /*
+        * Break device to inhibit further traffic flowing while shutting down
+        * the channels: doing it later holding vioch->lock creates unsafe
+        * locking dependency chains as reported by LOCKDEP.
+        */
+       virtio_break_device(vioch->vqueue->vdev);
        scmi_vio_channel_cleanup_sync(vioch);
 
-       scmi_free_channel(cinfo, data, id);
-
        return 0;
 }
 
index b54f470..1f971c8 100644 (file)
@@ -899,6 +899,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(debugfs_create_str);
 
 static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
                                      size_t count, loff_t *ppos)
index 4f765bc..0ce5746 100644 (file)
@@ -804,11 +804,6 @@ struct scmi_device {
 
 #define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
 
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
-                  const char *name);
-void scmi_device_destroy(struct scmi_device *scmi_dev);
-
 struct scmi_device_id {
        u8 protocol_id;
        const char *name;
index f160d68..422c1ad 100644 (file)
@@ -139,11 +139,15 @@ TRACE_EVENT(scmi_rx_done,
 );
 
 TRACE_EVENT(scmi_msg_dump,
-       TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
-                int status, void *buf, size_t len),
-       TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+       TP_PROTO(int id, u8 channel_id, u8 protocol_id, u8 msg_id,
+                unsigned char *tag, u16 seq, int status,
+                void *buf, size_t len),
+       TP_ARGS(id, channel_id, protocol_id, msg_id, tag, seq, status,
+               buf, len),
 
        TP_STRUCT__entry(
+               __field(int, id)
+               __field(u8, channel_id)
                __field(u8, protocol_id)
                __field(u8, msg_id)
                __array(char, tag, 5)
@@ -154,6 +158,8 @@ TRACE_EVENT(scmi_msg_dump,
        ),
 
        TP_fast_assign(
+               __entry->id = id;
+               __entry->channel_id = channel_id;
                __entry->protocol_id = protocol_id;
                __entry->msg_id = msg_id;
                strscpy(__entry->tag, tag, 5);
@@ -163,9 +169,9 @@ TRACE_EVENT(scmi_msg_dump,
                memcpy(__get_dynamic_array(cmd), buf, __entry->len);
        ),
 
-       TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
-                 __entry->protocol_id, __entry->tag, __entry->msg_id,
-                 __entry->seq, __entry->status,
+       TP_printk("id=%d ch=%02X pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+                 __entry->id, __entry->channel_id, __entry->protocol_id,
+                 __entry->tag, __entry->msg_id, __entry->seq, __entry->status,
                __print_hex_str(__get_dynamic_array(cmd), __entry->len))
 );
 #endif /* _TRACE_SCMI_H */