This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <linux/dma-direction.h>
#include <asm/page.h>
struct device;
struct vm_area_struct;
+struct sg_table;
/*
* buffer device info
*/
struct snd_dma_device {
int type; /* SNDRV_DMA_TYPE_XXX */
+ enum dma_data_direction dir; /* DMA direction */
+ bool need_sync; /* explicit sync needed? */
struct device *dev; /* generic device */
};
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
+#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
/*
* info for buffer allocation
}
/* allocate/release a buffer */
-int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
- struct snd_dma_buffer *dmab);
+int snd_dma_alloc_dir_pages(int type, struct device *dev,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab);
+
+static inline int snd_dma_alloc_pages(int type, struct device *dev,
+ size_t size, struct snd_dma_buffer *dmab)
+{
+ return snd_dma_alloc_dir_pages(type, dev, DMA_BIDIRECTIONAL, size, dmab);
+}
+
int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area);
+enum snd_dma_sync_mode { SNDRV_DMA_SYNC_CPU, SNDRV_DMA_SYNC_DEVICE };
+#ifdef CONFIG_HAS_DMA
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode);
+#else
+static inline void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode) {}
+#endif
+
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode);
+
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
/* device-managed memory allocator */
-struct snd_dma_buffer *snd_devm_alloc_pages(struct device *dev, int type,
- size_t size);
+struct snd_dma_buffer *snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir,
+ size_t size);
+
+static inline struct snd_dma_buffer *
+snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+{
+ return snd_devm_alloc_dir_pages(dev, type, DMA_BIDIRECTIONAL, size);
+}
+
+static inline struct sg_table *
+snd_dma_noncontig_sg_table(struct snd_dma_buffer *dmab)
+{
+ return dmab->private_data;
+}
#endif /* __SOUND_MEMALLOC_H */
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
}
/**
- * snd_dma_alloc_pages - allocate the buffer area according to the given type
+ * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
+ * type and direction
* @type: the DMA buffer type
* @device: the device pointer
+ * @dir: DMA direction
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Return: Zero if the buffer with the given size is allocated successfully,
* otherwise a negative value on error.
*/
-int snd_dma_alloc_pages(int type, struct device *device, size_t size,
- struct snd_dma_buffer *dmab)
+int snd_dma_alloc_dir_pages(int type, struct device *device,
+ enum dma_data_direction dir, size_t size,
+ struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
size = PAGE_ALIGN(size);
dmab->dev.type = type;
dmab->dev.dev = device;
+ dmab->dev.dir = dir;
dmab->bytes = 0;
dmab->addr = 0;
dmab->private_data = NULL;
dmab->bytes = size;
return 0;
}
-EXPORT_SYMBOL(snd_dma_alloc_pages);
+EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
}
/**
- * snd_devm_alloc_pages - allocate the buffer and manage with devres
+ * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
* @dev: the device pointer
* @type: the DMA buffer type
+ * @dir: DMA direction
* @size: the buffer size to allocate
*
* Allocate buffer pages depending on the given type and manage using devres.
* The function returns the snd_dma_buffer object at success, or NULL if failed.
*/
struct snd_dma_buffer *
-snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+snd_devm_alloc_dir_pages(struct device *dev, int type,
+ enum dma_data_direction dir, size_t size)
{
struct snd_dma_buffer *dmab;
int err;
if (!dmab)
return NULL;
- err = snd_dma_alloc_pages(type, dev, size, dmab);
+ err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
if (err < 0) {
devres_free(dmab);
return NULL;
devres_add(dev, dmab);
return dmab;
}
-EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
+EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
/**
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
}
EXPORT_SYMBOL(snd_dma_buffer_mmap);
+#ifdef CONFIG_HAS_DMA
+/**
+ * snd_dma_buffer_sync - sync DMA buffer between CPU and device
+ * @dmab: buffer allocation information
+ * @mod: sync mode
+ */
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ const struct snd_malloc_ops *ops;
+
+ if (!dmab || !dmab->dev.need_sync)
+ return;
+ ops = snd_dma_get_ops(dmab);
+ if (ops && ops->sync)
+ ops->sync(dmab, mode);
+}
+EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
+#endif /* CONFIG_HAS_DMA */
+
/**
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
* @dmab: buffer allocation information
.mmap = snd_dma_wc_mmap,
};
#endif /* CONFIG_X86 */
+
+/*
+ * Non-contiguous pages allocator
+ */
+static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct sg_table *sgt;
+ void *p;
+
+ sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+ DEFAULT_GFP, 0);
+ if (!sgt)
+ return NULL;
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+ p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+ if (p)
+ dmab->private_data = sgt;
+ else
+ dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+ return p;
+}
+
+static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
+{
+ dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
+ dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
+ dmab->dev.dir);
+}
+
+static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_noncontiguous(dmab->dev.dev, area,
+ dmab->bytes, dmab->private_data);
+}
+
+static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
+ enum snd_dma_sync_mode mode)
+{
+ if (mode == SNDRV_DMA_SYNC_CPU) {
+ if (dmab->dev.dir == DMA_TO_DEVICE)
+ return;
+ dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
+ } else {
+ if (dmab->dev.dir == DMA_FROM_DEVICE)
+ return;
+ flush_kernel_vmap_range(dmab->area, dmab->bytes);
+ dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
+ dmab->dev.dir);
+ }
+}
+
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+ .alloc = snd_dma_noncontig_alloc,
+ .free = snd_dma_noncontig_free,
+ .mmap = snd_dma_noncontig_mmap,
+ .sync = snd_dma_noncontig_sync,
+ /* re-use vmalloc helpers for get_* ops */
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
#endif /* CONFIG_HAS_DMA */
/*
#ifdef CONFIG_HAS_DMA
[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+ [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size);
int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
+ void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode);
};
#ifdef CONFIG_SND_DMA_SGBUF
sstatus.suspended_state = status->suspended_state;
sstatus.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
sync_ptr.s.status.suspended_state = status->suspended_state;
sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
return 0;
frames -= transfer;
ofs = 0;
}
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
}
#ifdef CONFIG_SND_DEBUG
goto _end_unlock;
}
snd_pcm_stream_unlock_irq(substream);
+ if (!is_playback)
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
err = writer(substream, appl_ofs, data, offset, frames,
transfer);
+ if (is_playback)
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
snd_pcm_stream_lock_irq(substream);
if (err < 0)
goto _end_unlock;
for ((subs) = (pcm)->streams[str].substream; (subs); \
(subs) = (subs)->next)
+static inline void snd_pcm_dma_buffer_sync(struct snd_pcm_substream *substream,
+ enum snd_dma_sync_mode mode)
+{
+ if (substream->runtime->info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
+ snd_dma_buffer_sync(snd_pcm_get_dma_buf(substream), mode);
+}
+
#endif /* __SOUND_CORE_PCM_LOCAL_H */
MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
- size_t size, struct snd_dma_buffer *dmab)
+ int str, size_t size, struct snd_dma_buffer *dmab)
{
+ enum dma_data_direction dir;
int err;
if (max_alloc_per_card &&
card->total_pcm_alloc_bytes + size > max_alloc_per_card)
return -ENOMEM;
- err = snd_dma_alloc_pages(type, dev, size, dmab);
+ if (str == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = DMA_TO_DEVICE;
+ else
+ dir = DMA_FROM_DEVICE;
+ err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
if (!err) {
mutex_lock(&card->memory_mutex);
card->total_pcm_alloc_bytes += dmab->bytes;
do {
err = do_alloc_pages(card, dmab->dev.type, dmab->dev.dev,
- size, dmab);
+ substream->stream, size, dmab);
if (err != -ENOMEM)
return err;
if (no_fallback)
if (do_alloc_pages(card,
substream->dma_buffer.dev.type,
substream->dma_buffer.dev.dev,
+ substream->stream,
size, &new_dmab) < 0) {
buffer->error = -ENOMEM;
pr_debug("ALSA pcmC%dD%d%c,%d:%s: cannot preallocate for size %zu\n",
if (do_alloc_pages(card,
substream->dma_buffer.dev.type,
substream->dma_buffer.dev.dev,
+ substream->stream,
size, dmab) < 0) {
kfree(dmab);
pr_debug("ALSA pcmC%dD%d%c,%d:%s: cannot preallocate for size %zu\n",
goto error;
}
+ /* automatically set EXPLICIT_SYNC flag in the managed mode whenever
+ * the DMA buffer requires it
+ */
+ if (substream->managed_buffer_alloc &&
+ substream->dma_buffer.dev.need_sync)
+ substream->runtime->hw.info |= SNDRV_PCM_INFO_EXPLICIT_SYNC;
+
*rsubstream = substream;
return 0;
ret = rewind_appl_ptr(substream, frames,
snd_pcm_hw_avail(substream));
snd_pcm_stream_unlock_irq(substream);
+ if (ret >= 0)
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
return ret;
}
ret = forward_appl_ptr(substream, frames,
snd_pcm_avail(substream));
snd_pcm_stream_unlock_irq(substream);
+ if (ret >= 0)
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
return ret;
}
if (delay && !err)
*delay = snd_pcm_calc_delay(substream);
snd_pcm_stream_unlock_irq(substream);
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
+
return err;
}
sync_ptr.s.status.suspended_state = status->suspended_state;
sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
return 0;
sstatus.suspended_state = status->suspended_state;
sstatus.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+ snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||