1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/vmalloc.h>
15 #include <asm/set_memory.h>
17 #include <sound/memalloc.h>
18 #include "memalloc_local.h"
20 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
23 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
29 return (__force gfp_t)(unsigned long)dmab->dev.dev;
32 static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
36 if (WARN_ON_ONCE(!ops || !ops->alloc))
38 return ops->alloc(dmab, size);
42 * snd_dma_alloc_pages - allocate the buffer area according to the given type
43 * @type: the DMA buffer type
44 * @device: the device pointer
45 * @size: the buffer size to allocate
46 * @dmab: buffer allocation record to store the allocated data
48 * Calls the memory-allocator function for the corresponding
51 * Return: Zero if the buffer with the given size is allocated successfully,
52 * otherwise a negative value on error.
54 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
55 struct snd_dma_buffer *dmab)
64 size = PAGE_ALIGN(size);
65 dmab->dev.type = type;
66 dmab->dev.dev = device;
70 dmab->private_data = NULL;
71 err = __snd_dma_alloc_pages(dmab, size);
79 EXPORT_SYMBOL(snd_dma_alloc_pages);
82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
83 * @type: the DMA buffer type
84 * @device: the device pointer
85 * @size: the buffer size to allocate
86 * @dmab: buffer allocation record to store the allocated data
88 * Calls the memory-allocator function for the corresponding
89 * buffer type. When no space is left, this function reduces the size and
90 * tries to allocate again. The size actually allocated is stored in
93 * Return: Zero if the buffer with the given size is allocated successfully,
94 * otherwise a negative value on error.
96 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 struct snd_dma_buffer *dmab)
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
104 if (size <= PAGE_SIZE)
107 size = PAGE_SIZE << get_order(size);
113 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
116 * snd_dma_free_pages - release the allocated buffer
117 * @dmab: the buffer allocation record to release
119 * Releases the allocated buffer via snd_dma_alloc_pages().
121 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
125 if (ops && ops->free)
128 EXPORT_SYMBOL(snd_dma_free_pages);
131 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
132 * @dmab: buffer allocation information
133 * @area: VM area information
135 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
136 struct vm_area_struct *area)
138 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
140 if (ops && ops->mmap)
141 return ops->mmap(dmab, area);
145 EXPORT_SYMBOL(snd_dma_buffer_mmap);
148 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
149 * @dmab: buffer allocation information
150 * @offset: offset in the ring buffer
152 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
154 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
156 if (ops && ops->get_addr)
157 return ops->get_addr(dmab, offset);
159 return dmab->addr + offset;
161 EXPORT_SYMBOL(snd_sgbuf_get_addr);
164 * snd_sgbuf_get_page - return the physical page at the corresponding offset
165 * @dmab: buffer allocation information
166 * @offset: offset in the ring buffer
168 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
170 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
172 if (ops && ops->get_page)
173 return ops->get_page(dmab, offset);
175 return virt_to_page(dmab->area + offset);
177 EXPORT_SYMBOL(snd_sgbuf_get_page);
180 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
182 * @dmab: buffer allocation information
183 * @ofs: offset in the ring buffer
184 * @size: the requested size
186 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
187 unsigned int ofs, unsigned int size)
189 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
191 if (ops && ops->get_chunk_size)
192 return ops->get_chunk_size(dmab, ofs, size);
196 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
199 * Continuous pages allocator
201 static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
203 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
205 dmab->area = alloc_pages_exact(size, gfp);
209 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
211 free_pages_exact(dmab->area, dmab->bytes);
214 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
215 struct vm_area_struct *area)
217 return remap_pfn_range(area, area->vm_start,
218 page_to_pfn(virt_to_page(dmab->area)),
219 area->vm_end - area->vm_start,
223 static const struct snd_malloc_ops snd_dma_continuous_ops = {
224 .alloc = snd_dma_continuous_alloc,
225 .free = snd_dma_continuous_free,
226 .mmap = snd_dma_continuous_mmap,
232 static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
234 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
236 dmab->area = __vmalloc(size, gfp);
240 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
245 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
246 struct vm_area_struct *area)
248 return remap_vmalloc_range(area, dmab->area, 0);
251 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
254 return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
258 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
261 return vmalloc_to_page(dmab->area + offset);
265 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
266 unsigned int ofs, unsigned int size)
270 if (size > PAGE_SIZE)
275 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
276 .alloc = snd_dma_vmalloc_alloc,
277 .free = snd_dma_vmalloc_free,
278 .mmap = snd_dma_vmalloc_mmap,
279 .get_addr = snd_dma_vmalloc_get_addr,
280 .get_page = snd_dma_vmalloc_get_page,
281 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
284 #ifdef CONFIG_HAS_DMA
288 #ifdef CONFIG_GENERIC_ALLOCATOR
289 static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
291 struct device *dev = dmab->dev.dev;
292 struct gen_pool *pool;
295 pool = of_gen_pool_get(dev->of_node, "iram", 0);
296 /* Assign the pool into private_data field */
297 dmab->private_data = pool;
299 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
305 /* Internal memory might have limited size and no enough space,
306 * so if we fail to malloc, try to fetch memory traditionally.
308 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
309 return __snd_dma_alloc_pages(dmab, size);
312 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
314 struct gen_pool *pool = dmab->private_data;
316 if (pool && dmab->area)
317 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
320 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
321 struct vm_area_struct *area)
323 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
324 return remap_pfn_range(area, area->vm_start,
325 dmab->addr >> PAGE_SHIFT,
326 area->vm_end - area->vm_start,
330 static const struct snd_malloc_ops snd_dma_iram_ops = {
331 .alloc = snd_dma_iram_alloc,
332 .free = snd_dma_iram_free,
333 .mmap = snd_dma_iram_mmap,
335 #endif /* CONFIG_GENERIC_ALLOCATOR */
338 * Coherent device pages allocator
340 static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
344 gfp_flags = GFP_KERNEL
345 | __GFP_COMP /* compound page lets parts be mapped */
346 | __GFP_NORETRY /* don't trigger OOM-killer */
347 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
348 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
351 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
352 set_memory_wc((unsigned long)dmab->area,
353 PAGE_ALIGN(size) >> PAGE_SHIFT);
358 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
361 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
362 set_memory_wb((unsigned long)dmab->area,
363 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
365 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
368 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
369 struct vm_area_struct *area)
371 return dma_mmap_coherent(dmab->dev.dev, area,
372 dmab->area, dmab->addr, dmab->bytes);
375 static const struct snd_malloc_ops snd_dma_dev_ops = {
376 .alloc = snd_dma_dev_alloc,
377 .free = snd_dma_dev_free,
378 .mmap = snd_dma_dev_mmap,
380 #endif /* CONFIG_HAS_DMA */
385 static const struct snd_malloc_ops *dma_ops[] = {
386 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
387 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
388 #ifdef CONFIG_HAS_DMA
389 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
390 [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
391 #ifdef CONFIG_GENERIC_ALLOCATOR
392 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
393 #endif /* CONFIG_GENERIC_ALLOCATOR */
394 #endif /* CONFIG_HAS_DMA */
395 #ifdef CONFIG_SND_DMA_SGBUF
396 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
397 [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
401 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
403 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
404 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
406 return dma_ops[dmab->dev.type];