ASoC: Intel: Skylake: Use SG allocation for SKL-based firmware load
authorCezary Rojewski <cezary.rojewski@intel.com>
Mon, 5 Dec 2022 08:53:30 +0000 (09:53 +0100)
committerMark Brown <broonie@kernel.org>
Wed, 7 Dec 2022 14:19:05 +0000 (14:19 +0000)
Resign from ->alloc_dma_buf() and use snd_dma_alloc_pages() directly.
For data i.e.: base firmware binary transfer, make use of SG allocation
to better adapt to memory-limited environment. For BDL descriptor, given
its small size this is not required.

Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com>
Tested-by: Lukasz Majczak <lma@semihlaf.com>
Link: https://lore.kernel.org/r/20221205085330.857665-7-cezary.rojewski@intel.com
Signed-off-by: Mark Brown <broonie@kernel.org>
sound/soc/intel/skylake/skl-sst-cldma.c

index b91f7a6..b0204ea 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
+#include <sound/hda_register.h>
 #include "../common/sst-dsp.h"
 #include "../common/sst-dsp-priv.h"
 
@@ -79,21 +80,25 @@ static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
                __le32 **bdlp, int size, int with_ioc)
 {
        __le32 *bdl = *bdlp;
+       int remaining = ctx->cl_dev.bufsize;
+       int offset = 0;
 
        ctx->cl_dev.frags = 0;
-       while (size > 0) {
-               phys_addr_t addr = virt_to_phys(dmab_data->area +
-                               (ctx->cl_dev.frags * ctx->cl_dev.bufsize));
+       while (remaining > 0) {
+               phys_addr_t addr;
+               int chunk;
 
+               addr = snd_sgbuf_get_addr(dmab_data, offset);
                bdl[0] = cpu_to_le32(lower_32_bits(addr));
                bdl[1] = cpu_to_le32(upper_32_bits(addr));
+               chunk = snd_sgbuf_get_chunk_size(dmab_data, offset, size);
+               bdl[2] = cpu_to_le32(chunk);
 
-               bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
-
-               size -= ctx->cl_dev.bufsize;
-               bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
+               remaining -= chunk;
+               bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
 
                bdl += 4;
+               offset += chunk;
                ctx->cl_dev.frags++;
        }
 }
@@ -338,15 +343,15 @@ int skl_cldma_prepare(struct sst_dsp *ctx)
        ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
 
        /* Allocate buffer*/
-       ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
-                       &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
+       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, ctx->dev, ctx->cl_dev.bufsize,
+                                 &ctx->cl_dev.dmab_data);
        if (ret < 0) {
                dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
                return ret;
        }
+
        /* Setup Code loader BDL */
-       ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
-                       &ctx->cl_dev.dmab_bdl, PAGE_SIZE);
+       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, ctx->dev, BDL_SIZE, &ctx->cl_dev.dmab_bdl);
        if (ret < 0) {
                dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
                ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);