ASoC: SOF: Intel: ipc4: Wait for channel to be free before sending a message
authorPeter Ujfalusi <peter.ujfalusi@linux.intel.com>
Tue, 18 Oct 2022 12:40:07 +0000 (15:40 +0300)
committerMark Brown <broonie@kernel.org>
Tue, 18 Oct 2022 18:16:44 +0000 (19:16 +0100)
Before attempting to send a message to the DSP we need to check if the
downstream BUSY flag has been cleared by the firmware to avoid lost IPC
messages by the firmware.

This is required by a firmware which only acks the received message after
it has sent a reply to the host.
With a bad luck, the host would send a message before the firmware gets to
the clearing the flag and thus losing a message.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
Reviewed-by: Rander Wang <rander.wang@intel.com>
Link: https://lore.kernel.org/r/20221018124008.6846-4-peter.ujfalusi@linux.intel.com
Signed-off-by: Mark Brown <broonie@kernel.org>
sound/soc/sof/intel/cnl.c
sound/soc/sof/intel/hda-ipc.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/intel/mtl.c

index 2f2bcde..4bf2337 100644 (file)
@@ -37,6 +37,7 @@ irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
 {
        struct sof_ipc4_msg notification_data = {{ 0 }};
        struct snd_sof_dev *sdev = context;
+       bool ack_received = false;
        bool ipc_irq = false;
        u32 hipcida, hipctdr;
 
@@ -50,6 +51,7 @@ irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
                cnl_ipc_dsp_done(sdev);
 
                ipc_irq = true;
+               ack_received = true;
        }
 
        if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
@@ -98,6 +100,13 @@ irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
                /* This interrupt is not shared so no need to return IRQ_NONE. */
                dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
 
+       if (ack_received) {
+               struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+               if (hdev->delayed_ipc_tx_msg)
+                       cnl_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -251,8 +260,16 @@ static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
 
 int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
 {
+       struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
        struct sof_ipc4_msg *msg_data = msg->msg_data;
 
+       if (hda_ipc4_tx_is_busy(sdev)) {
+               hdev->delayed_ipc_tx_msg = msg;
+               return 0;
+       }
+
+       hdev->delayed_ipc_tx_msg = NULL;
+
        /* send the message via mailbox */
        if (msg_data->data_size)
                sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
index 4118532..b4668c9 100644 (file)
@@ -69,8 +69,16 @@ int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
 
 int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
 {
+       struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
        struct sof_ipc4_msg *msg_data = msg->msg_data;
 
+       if (hda_ipc4_tx_is_busy(sdev)) {
+               hdev->delayed_ipc_tx_msg = msg;
+               return 0;
+       }
+
+       hdev->delayed_ipc_tx_msg = NULL;
+
        /* send the message via mailbox */
        if (msg_data->data_size)
                sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
@@ -122,6 +130,7 @@ irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
 {
        struct sof_ipc4_msg notification_data = {{ 0 }};
        struct snd_sof_dev *sdev = context;
+       bool ack_received = false;
        bool ipc_irq = false;
        u32 hipcie, hipct;
 
@@ -135,6 +144,7 @@ irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
                hda_dsp_ipc_dsp_done(sdev);
 
                ipc_irq = true;
+               ack_received = true;
        }
 
        if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
@@ -187,6 +197,13 @@ irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
                /* This interrupt is not shared so no need to return IRQ_NONE. */
                dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
 
+       if (ack_received) {
+               struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+               if (hdev->delayed_ipc_tx_msg)
+                       hda_dsp_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+       }
+
        return IRQ_HANDLED;
 }
 
index 1188ec5..eddd20b 100644 (file)
@@ -681,6 +681,17 @@ void hda_ipc4_dump(struct snd_sof_dev *sdev)
                hipci, hipcie, hipct, hipcte, hipcctl);
 }
 
+bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev)
+{
+       struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+       const struct sof_intel_dsp_desc *chip = hda->desc;
+       u32 val;
+
+       val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->ipc_req);
+
+       return !!(val & chip->ipc_req_mask);
+}
+
 static int hda_init(struct snd_sof_dev *sdev)
 {
        struct hda_bus *hbus;
index 2ab3c38..65657d1 100644 (file)
@@ -521,6 +521,14 @@ struct sof_intel_hda_dev {
 
        /* Intel NHLT information */
        struct nhlt_acpi_table *nhlt;
+
+       /*
+        * Pointing to the IPC message if immediate sending was not possible
+        * because the downlink communication channel was BUSY at the time.
+        * The message will be re-tried when the channel becomes free (the ACK
+        * is received from the DSP for the previous message)
+        */
+       struct snd_sof_ipc_msg *delayed_ipc_tx_msg;
 };
 
 static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
@@ -852,6 +860,7 @@ int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask);
 irqreturn_t cnl_ipc4_irq_thread(int irq, void *context);
 int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
 irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context);
+bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev);
 int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
 void hda_ipc4_dump(struct snd_sof_dev *sdev);
 extern struct sdw_intel_ops sdw_callback;
index a9b31b3..9d1bc74 100644 (file)
@@ -90,8 +90,16 @@ static bool mtl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
 
 static int mtl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
 {
+       struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
        struct sof_ipc4_msg *msg_data = msg->msg_data;
 
+       if (hda_ipc4_tx_is_busy(sdev)) {
+               hdev->delayed_ipc_tx_msg = msg;
+               return 0;
+       }
+
+       hdev->delayed_ipc_tx_msg = NULL;
+
        /* send the message via mailbox */
        if (msg_data->data_size)
                sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
@@ -492,6 +500,7 @@ static irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
 {
        struct sof_ipc4_msg notification_data = {{ 0 }};
        struct snd_sof_dev *sdev = context;
+       bool ack_received = false;
        bool ipc_irq = false;
        u32 hipcida;
        u32 hipctdr;
@@ -508,6 +517,7 @@ static irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
                mtl_ipc_dsp_done(sdev);
 
                ipc_irq = true;
+               ack_received = true;
        }
 
        if (hipctdr & MTL_DSP_REG_HFIPCXTDR_BUSY) {
@@ -558,6 +568,13 @@ static irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
                dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
        }
 
+       if (ack_received) {
+               struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+               if (hdev->delayed_ipc_tx_msg)
+                       mtl_ipc_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+       }
+
        return IRQ_HANDLED;
 }