ALSA: memalloc: Support for non-coherent page allocation
authorTakashi Iwai <tiwai@suse.de>
Sun, 17 Oct 2021 07:48:58 +0000 (09:48 +0200)
committerTakashi Iwai <tiwai@suse.de>
Mon, 18 Oct 2021 11:32:12 +0000 (13:32 +0200)
Following to the addition of non-contiguous pages, this patch adds the
new contiguous non-coherent page allocation to the standard memalloc
helper.  Like the previous non-contig type, this non-coherent type is
also directional and requires the explicit sync, too.  Hence the
driver using this type of buffer may need to set
SNDRV_PCM_INFO_EXPLICIT_SYNC flag to the PCM hardware.info as well,
unless it's set up in the managed mode.

Link: https://lore.kernel.org/r/20211017074859.24112-3-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
include/sound/memalloc.h
sound/core/memalloc.c

index 1457eba..0bdaa67 100644 (file)
@@ -50,6 +50,7 @@ struct snd_dma_device {
 #endif
 #define SNDRV_DMA_TYPE_VMALLOC         7       /* vmalloc'ed buffer */
 #define SNDRV_DMA_TYPE_NONCONTIG       8       /* non-coherent SG buffer */
+#define SNDRV_DMA_TYPE_NONCOHERENT     9       /* non-coherent buffer */
 
 /*
  * info for buffer allocation
index 11f9a68..99681e6 100644 (file)
@@ -560,6 +560,52 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
        .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
 };
 
+/*
+ * Non-coherent pages allocator
+ */
+static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+       return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+                                    dmab->dev.dir, DEFAULT_GFP);
+}
+
+static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
+{
+       dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
+                            dmab->addr, dmab->dev.dir);
+}
+
+static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
+                                   struct vm_area_struct *area)
+{
+       area->vm_page_prot = vm_get_page_prot(area->vm_flags);
+       return dma_mmap_pages(dmab->dev.dev, area,
+                             area->vm_end - area->vm_start,
+                             virt_to_page(dmab->area));
+}
+
+static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
+                                    enum snd_dma_sync_mode mode)
+{
+       if (mode == SNDRV_DMA_SYNC_CPU) {
+               if (dmab->dev.dir != DMA_TO_DEVICE)
+                       dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
+                                               dmab->bytes, dmab->dev.dir);
+       } else {
+               if (dmab->dev.dir != DMA_FROM_DEVICE)
+                       dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
+                                                  dmab->bytes, dmab->dev.dir);
+       }
+}
+
+static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
+       .alloc = snd_dma_noncoherent_alloc,
+       .free = snd_dma_noncoherent_free,
+       .mmap = snd_dma_noncoherent_mmap,
+       .sync = snd_dma_noncoherent_sync,
+};
+
 #endif /* CONFIG_HAS_DMA */
 
 /*
@@ -572,6 +618,7 @@ static const struct snd_malloc_ops *dma_ops[] = {
        [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
        [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
        [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
+       [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
 #ifdef CONFIG_GENERIC_ALLOCATOR
        [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */