Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[platform/kernel/linux-rpi.git] / sound / core / sgbuf.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatter-Gather buffer
4  *
5  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
6  */
7
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <sound/memalloc.h>
13 #include "memalloc_local.h"
14
15 struct snd_sg_page {
16         void *buf;
17         dma_addr_t addr;
18 };
19
20 struct snd_sg_buf {
21         int size;       /* allocated byte size */
22         int pages;      /* allocated pages */
23         int tblsize;    /* allocated table size */
24         struct snd_sg_page *table;      /* address table */
25         struct page **page_table;       /* page table (for vmap/vunmap) */
26         struct device *dev;
27 };
28
29 /* table entries are align to 32 */
30 #define SGBUF_TBL_ALIGN         32
31 #define sgbuf_align_table(tbl)  ALIGN((tbl), SGBUF_TBL_ALIGN)
32
33 static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
34 {
35         struct snd_sg_buf *sgbuf = dmab->private_data;
36         struct snd_dma_buffer tmpb;
37         int i;
38
39         if (!sgbuf)
40                 return;
41
42         vunmap(dmab->area);
43         dmab->area = NULL;
44
45         tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
46         if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
47                 tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
48         tmpb.dev.dev = sgbuf->dev;
49         for (i = 0; i < sgbuf->pages; i++) {
50                 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
51                         continue; /* continuous pages */
52                 tmpb.area = sgbuf->table[i].buf;
53                 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
54                 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
55                 snd_dma_free_pages(&tmpb);
56         }
57
58         kfree(sgbuf->table);
59         kfree(sgbuf->page_table);
60         kfree(sgbuf);
61         dmab->private_data = NULL;
62 }
63
64 #define MAX_ALLOC_PAGES         32
65
66 static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
67 {
68         struct snd_sg_buf *sgbuf;
69         unsigned int i, pages, chunk, maxpages;
70         struct snd_dma_buffer tmpb;
71         struct snd_sg_page *table;
72         struct page **pgtable;
73         int type = SNDRV_DMA_TYPE_DEV;
74         pgprot_t prot = PAGE_KERNEL;
75
76         dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
77         if (!sgbuf)
78                 return -ENOMEM;
79         if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
80                 type = SNDRV_DMA_TYPE_DEV_UC;
81 #ifdef pgprot_noncached
82                 prot = pgprot_noncached(PAGE_KERNEL);
83 #endif
84         }
85         sgbuf->dev = dmab->dev.dev;
86         pages = snd_sgbuf_aligned_pages(size);
87         sgbuf->tblsize = sgbuf_align_table(pages);
88         table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
89         if (!table)
90                 goto _failed;
91         sgbuf->table = table;
92         pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
93         if (!pgtable)
94                 goto _failed;
95         sgbuf->page_table = pgtable;
96
97         /* allocate pages */
98         maxpages = MAX_ALLOC_PAGES;
99         while (pages > 0) {
100                 chunk = pages;
101                 /* don't be too eager to take a huge chunk */
102                 if (chunk > maxpages)
103                         chunk = maxpages;
104                 chunk <<= PAGE_SHIFT;
105                 if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
106                                                  chunk, &tmpb) < 0) {
107                         if (!sgbuf->pages)
108                                 goto _failed;
109                         size = sgbuf->pages * PAGE_SIZE;
110                         break;
111                 }
112                 chunk = tmpb.bytes >> PAGE_SHIFT;
113                 for (i = 0; i < chunk; i++) {
114                         table->buf = tmpb.area;
115                         table->addr = tmpb.addr;
116                         if (!i)
117                                 table->addr |= chunk; /* mark head */
118                         table++;
119                         *pgtable++ = virt_to_page(tmpb.area);
120                         tmpb.area += PAGE_SIZE;
121                         tmpb.addr += PAGE_SIZE;
122                 }
123                 sgbuf->pages += chunk;
124                 pages -= chunk;
125                 if (chunk < maxpages)
126                         maxpages = chunk;
127         }
128
129         sgbuf->size = size;
130         dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
131         if (! dmab->area)
132                 goto _failed;
133         return 0;
134
135  _failed:
136         snd_dma_sg_free(dmab); /* free the table */
137         return -ENOMEM;
138 }
139
140 static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
141                                       size_t offset)
142 {
143         struct snd_sg_buf *sgbuf = dmab->private_data;
144         dma_addr_t addr;
145
146         addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
147         addr &= ~((dma_addr_t)PAGE_SIZE - 1);
148         return addr + offset % PAGE_SIZE;
149 }
150
151 static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
152                                         size_t offset)
153 {
154         struct snd_sg_buf *sgbuf = dmab->private_data;
155         unsigned int idx = offset >> PAGE_SHIFT;
156
157         if (idx >= (unsigned int)sgbuf->pages)
158                 return NULL;
159         return sgbuf->page_table[idx];
160 }
161
162 static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
163                                               unsigned int ofs,
164                                               unsigned int size)
165 {
166         struct snd_sg_buf *sg = dmab->private_data;
167         unsigned int start, end, pg;
168
169         start = ofs >> PAGE_SHIFT;
170         end = (ofs + size - 1) >> PAGE_SHIFT;
171         /* check page continuity */
172         pg = sg->table[start].addr >> PAGE_SHIFT;
173         for (;;) {
174                 start++;
175                 if (start > end)
176                         break;
177                 pg++;
178                 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
179                         return (start << PAGE_SHIFT) - ofs;
180         }
181         /* ok, all on continuous pages */
182         return size;
183 }
184
185 const struct snd_malloc_ops snd_dma_sg_ops = {
186         .alloc = snd_dma_sg_alloc,
187         .free = snd_dma_sg_free,
188         .get_addr = snd_dma_sg_get_addr,
189         .get_page = snd_dma_sg_get_page,
190         .get_chunk_size = snd_dma_sg_get_chunk_size,
191 };