Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[platform/kernel/linux-starfive.git] / sound / core / sgbuf.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatter-Gather buffer
4  *
5  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
6  */
7
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <asm/pgtable.h>
13 #include <sound/memalloc.h>
14
15
16 /* table entries are align to 32 */
17 #define SGBUF_TBL_ALIGN         32
18 #define sgbuf_align_table(tbl)  ALIGN((tbl), SGBUF_TBL_ALIGN)
19
20 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
21 {
22         struct snd_sg_buf *sgbuf = dmab->private_data;
23         struct snd_dma_buffer tmpb;
24         int i;
25
26         if (! sgbuf)
27                 return -EINVAL;
28
29         vunmap(dmab->area);
30         dmab->area = NULL;
31
32         tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
33         if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
34                 tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
35         tmpb.dev.dev = sgbuf->dev;
36         for (i = 0; i < sgbuf->pages; i++) {
37                 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
38                         continue; /* continuous pages */
39                 tmpb.area = sgbuf->table[i].buf;
40                 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
41                 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
42                 snd_dma_free_pages(&tmpb);
43         }
44
45         kfree(sgbuf->table);
46         kfree(sgbuf->page_table);
47         kfree(sgbuf);
48         dmab->private_data = NULL;
49         
50         return 0;
51 }
52
53 #define MAX_ALLOC_PAGES         32
54
55 void *snd_malloc_sgbuf_pages(struct device *device,
56                              size_t size, struct snd_dma_buffer *dmab,
57                              size_t *res_size)
58 {
59         struct snd_sg_buf *sgbuf;
60         unsigned int i, pages, chunk, maxpages;
61         struct snd_dma_buffer tmpb;
62         struct snd_sg_page *table;
63         struct page **pgtable;
64         int type = SNDRV_DMA_TYPE_DEV;
65         pgprot_t prot = PAGE_KERNEL;
66
67         dmab->area = NULL;
68         dmab->addr = 0;
69         dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
70         if (! sgbuf)
71                 return NULL;
72         if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
73                 type = SNDRV_DMA_TYPE_DEV_UC;
74 #ifdef pgprot_noncached
75                 prot = pgprot_noncached(PAGE_KERNEL);
76 #endif
77         }
78         sgbuf->dev = device;
79         pages = snd_sgbuf_aligned_pages(size);
80         sgbuf->tblsize = sgbuf_align_table(pages);
81         table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
82         if (!table)
83                 goto _failed;
84         sgbuf->table = table;
85         pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
86         if (!pgtable)
87                 goto _failed;
88         sgbuf->page_table = pgtable;
89
90         /* allocate pages */
91         maxpages = MAX_ALLOC_PAGES;
92         while (pages > 0) {
93                 chunk = pages;
94                 /* don't be too eager to take a huge chunk */
95                 if (chunk > maxpages)
96                         chunk = maxpages;
97                 chunk <<= PAGE_SHIFT;
98                 if (snd_dma_alloc_pages_fallback(type, device,
99                                                  chunk, &tmpb) < 0) {
100                         if (!sgbuf->pages)
101                                 goto _failed;
102                         if (!res_size)
103                                 goto _failed;
104                         size = sgbuf->pages * PAGE_SIZE;
105                         break;
106                 }
107                 chunk = tmpb.bytes >> PAGE_SHIFT;
108                 for (i = 0; i < chunk; i++) {
109                         table->buf = tmpb.area;
110                         table->addr = tmpb.addr;
111                         if (!i)
112                                 table->addr |= chunk; /* mark head */
113                         table++;
114                         *pgtable++ = virt_to_page(tmpb.area);
115                         tmpb.area += PAGE_SIZE;
116                         tmpb.addr += PAGE_SIZE;
117                 }
118                 sgbuf->pages += chunk;
119                 pages -= chunk;
120                 if (chunk < maxpages)
121                         maxpages = chunk;
122         }
123
124         sgbuf->size = size;
125         dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
126         if (! dmab->area)
127                 goto _failed;
128         if (res_size)
129                 *res_size = sgbuf->size;
130         return dmab->area;
131
132  _failed:
133         snd_free_sgbuf_pages(dmab); /* free the table */
134         return NULL;
135 }
136
137 /*
138  * compute the max chunk size with continuous pages on sg-buffer
139  */
140 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
141                                       unsigned int ofs, unsigned int size)
142 {
143         struct snd_sg_buf *sg = dmab->private_data;
144         unsigned int start, end, pg;
145
146         start = ofs >> PAGE_SHIFT;
147         end = (ofs + size - 1) >> PAGE_SHIFT;
148         /* check page continuity */
149         pg = sg->table[start].addr >> PAGE_SHIFT;
150         for (;;) {
151                 start++;
152                 if (start > end)
153                         break;
154                 pg++;
155                 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
156                         return (start << PAGE_SHIFT) - ofs;
157         }
158         /* ok, all on continuous pages */
159         return size;
160 }
161 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);