1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Scatter-Gather buffer
5 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
8 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <sound/memalloc.h>
13 #include "memalloc_local.h"
21 int size; /* allocated byte size */
22 int pages; /* allocated pages */
23 int tblsize; /* allocated table size */
24 struct snd_sg_page *table; /* address table */
25 struct page **page_table; /* page table (for vmap/vunmap) */
29 /* table entries are align to 32 */
30 #define SGBUF_TBL_ALIGN 32
31 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
33 static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
35 struct snd_sg_buf *sgbuf = dmab->private_data;
36 struct snd_dma_buffer tmpb;
45 tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
46 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
47 tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
48 tmpb.dev.dev = sgbuf->dev;
49 for (i = 0; i < sgbuf->pages; i++) {
50 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
51 continue; /* continuous pages */
52 tmpb.area = sgbuf->table[i].buf;
53 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
54 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
55 snd_dma_free_pages(&tmpb);
59 kfree(sgbuf->page_table);
61 dmab->private_data = NULL;
64 #define MAX_ALLOC_PAGES 32
66 static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
68 struct snd_sg_buf *sgbuf;
69 unsigned int i, pages, chunk, maxpages;
70 struct snd_dma_buffer tmpb;
71 struct snd_sg_page *table;
72 struct page **pgtable;
73 int type = SNDRV_DMA_TYPE_DEV;
74 pgprot_t prot = PAGE_KERNEL;
76 dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
79 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
80 type = SNDRV_DMA_TYPE_DEV_UC;
81 #ifdef pgprot_noncached
82 prot = pgprot_noncached(PAGE_KERNEL);
85 sgbuf->dev = dmab->dev.dev;
86 pages = snd_sgbuf_aligned_pages(size);
87 sgbuf->tblsize = sgbuf_align_table(pages);
88 table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
92 pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
95 sgbuf->page_table = pgtable;
98 maxpages = MAX_ALLOC_PAGES;
101 /* don't be too eager to take a huge chunk */
102 if (chunk > maxpages)
104 chunk <<= PAGE_SHIFT;
105 if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
109 size = sgbuf->pages * PAGE_SIZE;
112 chunk = tmpb.bytes >> PAGE_SHIFT;
113 for (i = 0; i < chunk; i++) {
114 table->buf = tmpb.area;
115 table->addr = tmpb.addr;
117 table->addr |= chunk; /* mark head */
119 *pgtable++ = virt_to_page(tmpb.area);
120 tmpb.area += PAGE_SIZE;
121 tmpb.addr += PAGE_SIZE;
123 sgbuf->pages += chunk;
125 if (chunk < maxpages)
130 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
136 snd_dma_sg_free(dmab); /* free the table */
140 static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
143 struct snd_sg_buf *sgbuf = dmab->private_data;
146 addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
147 addr &= ~((dma_addr_t)PAGE_SIZE - 1);
148 return addr + offset % PAGE_SIZE;
151 static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
154 struct snd_sg_buf *sgbuf = dmab->private_data;
155 unsigned int idx = offset >> PAGE_SHIFT;
157 if (idx >= (unsigned int)sgbuf->pages)
159 return sgbuf->page_table[idx];
162 static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
166 struct snd_sg_buf *sg = dmab->private_data;
167 unsigned int start, end, pg;
169 start = ofs >> PAGE_SHIFT;
170 end = (ofs + size - 1) >> PAGE_SHIFT;
171 /* check page continuity */
172 pg = sg->table[start].addr >> PAGE_SHIFT;
178 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
179 return (start << PAGE_SHIFT) - ofs;
181 /* ok, all on continuous pages */
185 const struct snd_malloc_ops snd_dma_sg_ops = {
186 .alloc = snd_dma_sg_alloc,
187 .free = snd_dma_sg_free,
188 .get_addr = snd_dma_sg_get_addr,
189 .get_page = snd_dma_sg_get_page,
190 .get_chunk_size = snd_dma_sg_get_chunk_size,