1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/vmalloc.h>
15 #include <asm/set_memory.h>
17 #include <sound/memalloc.h>
18 #include "memalloc_local.h"
20 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
23 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
29 return (__force gfp_t)(unsigned long)dmab->dev.dev;
32 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
36 if (WARN_ON_ONCE(!ops || !ops->alloc))
38 return ops->alloc(dmab, size);
42 * snd_dma_alloc_pages - allocate the buffer area according to the given type
43 * @type: the DMA buffer type
44 * @device: the device pointer
45 * @size: the buffer size to allocate
46 * @dmab: buffer allocation record to store the allocated data
48 * Calls the memory-allocator function for the corresponding
51 * Return: Zero if the buffer with the given size is allocated successfully,
52 * otherwise a negative value on error.
54 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
55 struct snd_dma_buffer *dmab)
62 size = PAGE_ALIGN(size);
63 dmab->dev.type = type;
64 dmab->dev.dev = device;
67 dmab->private_data = NULL;
68 dmab->area = __snd_dma_alloc_pages(dmab, size);
74 EXPORT_SYMBOL(snd_dma_alloc_pages);
77 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
78 * @type: the DMA buffer type
79 * @device: the device pointer
80 * @size: the buffer size to allocate
81 * @dmab: buffer allocation record to store the allocated data
83 * Calls the memory-allocator function for the corresponding
84 * buffer type. When no space is left, this function reduces the size and
85 * tries to allocate again. The size actually allocated is stored in
88 * Return: Zero if the buffer with the given size is allocated successfully,
89 * otherwise a negative value on error.
91 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
92 struct snd_dma_buffer *dmab)
96 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
99 if (size <= PAGE_SIZE)
102 size = PAGE_SIZE << get_order(size);
108 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
111 * snd_dma_free_pages - release the allocated buffer
112 * @dmab: the buffer allocation record to release
114 * Releases the allocated buffer via snd_dma_alloc_pages().
116 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
118 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
120 if (ops && ops->free)
123 EXPORT_SYMBOL(snd_dma_free_pages);
125 /* called by devres */
126 static void __snd_release_pages(struct device *dev, void *res)
128 snd_dma_free_pages(res);
132 * snd_devm_alloc_pages - allocate the buffer and manage with devres
133 * @dev: the device pointer
134 * @type: the DMA buffer type
135 * @size: the buffer size to allocate
137 * Allocate buffer pages depending on the given type and manage using devres.
138 * The pages will be released automatically at the device removal.
140 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
141 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
142 * SNDRV_DMA_TYPE_VMALLOC type.
144 * The function returns the snd_dma_buffer object at success, or NULL if failed.
146 struct snd_dma_buffer *
147 snd_devm_alloc_pages(struct device *dev, int type, size_t size)
149 struct snd_dma_buffer *dmab;
152 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
153 type == SNDRV_DMA_TYPE_VMALLOC))
156 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
160 err = snd_dma_alloc_pages(type, dev, size, dmab);
166 devres_add(dev, dmab);
169 EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
172 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
173 * @dmab: buffer allocation information
174 * @area: VM area information
176 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
177 struct vm_area_struct *area)
179 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
181 if (ops && ops->mmap)
182 return ops->mmap(dmab, area);
186 EXPORT_SYMBOL(snd_dma_buffer_mmap);
189 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
190 * @dmab: buffer allocation information
191 * @offset: offset in the ring buffer
193 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
195 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
197 if (ops && ops->get_addr)
198 return ops->get_addr(dmab, offset);
200 return dmab->addr + offset;
202 EXPORT_SYMBOL(snd_sgbuf_get_addr);
205 * snd_sgbuf_get_page - return the physical page at the corresponding offset
206 * @dmab: buffer allocation information
207 * @offset: offset in the ring buffer
209 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
211 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
213 if (ops && ops->get_page)
214 return ops->get_page(dmab, offset);
216 return virt_to_page(dmab->area + offset);
218 EXPORT_SYMBOL(snd_sgbuf_get_page);
221 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
223 * @dmab: buffer allocation information
224 * @ofs: offset in the ring buffer
225 * @size: the requested size
227 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
228 unsigned int ofs, unsigned int size)
230 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
232 if (ops && ops->get_chunk_size)
233 return ops->get_chunk_size(dmab, ofs, size);
237 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
240 * Continuous pages allocator
242 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
244 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
245 void *p = alloc_pages_exact(size, gfp);
248 dmab->addr = page_to_phys(virt_to_page(p));
252 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
254 free_pages_exact(dmab->area, dmab->bytes);
257 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
258 struct vm_area_struct *area)
260 return remap_pfn_range(area, area->vm_start,
261 dmab->addr >> PAGE_SHIFT,
262 area->vm_end - area->vm_start,
266 static const struct snd_malloc_ops snd_dma_continuous_ops = {
267 .alloc = snd_dma_continuous_alloc,
268 .free = snd_dma_continuous_free,
269 .mmap = snd_dma_continuous_mmap,
275 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
277 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
279 return __vmalloc(size, gfp);
282 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
287 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
288 struct vm_area_struct *area)
290 return remap_vmalloc_range(area, dmab->area, 0);
293 #define get_vmalloc_page_addr(dmab, offset) \
294 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
296 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
299 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
302 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
305 return vmalloc_to_page(dmab->area + offset);
309 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
310 unsigned int ofs, unsigned int size)
312 unsigned int start, end;
315 start = ALIGN_DOWN(ofs, PAGE_SIZE);
316 end = ofs + size - 1; /* the last byte address */
317 /* check page continuity */
318 addr = get_vmalloc_page_addr(dmab, start);
324 if (get_vmalloc_page_addr(dmab, start) != addr)
327 /* ok, all on continuous pages */
331 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
332 .alloc = snd_dma_vmalloc_alloc,
333 .free = snd_dma_vmalloc_free,
334 .mmap = snd_dma_vmalloc_mmap,
335 .get_addr = snd_dma_vmalloc_get_addr,
336 .get_page = snd_dma_vmalloc_get_page,
337 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
340 #ifdef CONFIG_HAS_DMA
344 #ifdef CONFIG_GENERIC_ALLOCATOR
345 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
347 struct device *dev = dmab->dev.dev;
348 struct gen_pool *pool;
352 pool = of_gen_pool_get(dev->of_node, "iram", 0);
353 /* Assign the pool into private_data field */
354 dmab->private_data = pool;
356 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
361 /* Internal memory might have limited size and no enough space,
362 * so if we fail to malloc, try to fetch memory traditionally.
364 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
365 return __snd_dma_alloc_pages(dmab, size);
368 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
370 struct gen_pool *pool = dmab->private_data;
372 if (pool && dmab->area)
373 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
376 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
377 struct vm_area_struct *area)
379 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
380 return remap_pfn_range(area, area->vm_start,
381 dmab->addr >> PAGE_SHIFT,
382 area->vm_end - area->vm_start,
386 static const struct snd_malloc_ops snd_dma_iram_ops = {
387 .alloc = snd_dma_iram_alloc,
388 .free = snd_dma_iram_free,
389 .mmap = snd_dma_iram_mmap,
391 #endif /* CONFIG_GENERIC_ALLOCATOR */
393 #define DEFAULT_GFP \
395 __GFP_COMP | /* compound page lets parts be mapped */ \
396 __GFP_NORETRY | /* don't trigger OOM-killer */ \
397 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
400 * Coherent device pages allocator
402 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
406 p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
408 if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
409 set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
414 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
417 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
418 set_memory_wb((unsigned long)dmab->area,
419 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
421 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
424 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
425 struct vm_area_struct *area)
428 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
429 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
431 return dma_mmap_coherent(dmab->dev.dev, area,
432 dmab->area, dmab->addr, dmab->bytes);
435 static const struct snd_malloc_ops snd_dma_dev_ops = {
436 .alloc = snd_dma_dev_alloc,
437 .free = snd_dma_dev_free,
438 .mmap = snd_dma_dev_mmap,
442 * Write-combined pages
445 /* On x86, share the same ops as the standard dev ops */
446 #define snd_dma_wc_ops snd_dma_dev_ops
447 #else /* CONFIG_X86 */
448 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
450 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
453 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
455 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
458 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
459 struct vm_area_struct *area)
461 return dma_mmap_wc(dmab->dev.dev, area,
462 dmab->area, dmab->addr, dmab->bytes);
465 static const struct snd_malloc_ops snd_dma_wc_ops = {
466 .alloc = snd_dma_wc_alloc,
467 .free = snd_dma_wc_free,
468 .mmap = snd_dma_wc_mmap,
470 #endif /* CONFIG_X86 */
471 #endif /* CONFIG_HAS_DMA */
476 static const struct snd_malloc_ops *dma_ops[] = {
477 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
478 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
479 #ifdef CONFIG_HAS_DMA
480 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
481 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
482 #ifdef CONFIG_GENERIC_ALLOCATOR
483 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
484 #endif /* CONFIG_GENERIC_ALLOCATOR */
485 #endif /* CONFIG_HAS_DMA */
486 #ifdef CONFIG_SND_DMA_SGBUF
487 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
488 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
492 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
494 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
495 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
497 return dma_ops[dmab->dev.type];