Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-starfive.git] / drivers / media / common / videobuf2 / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-memops.h>
23 #include <media/videobuf2-dma-sg.h>
24
25 static int debug;
26 module_param(debug, int, 0644);
27
28 #define dprintk(level, fmt, arg...)                                     \
29         do {                                                            \
30                 if (debug >= level)                                     \
31                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
32         } while (0)
33
34 struct vb2_dma_sg_buf {
35         struct device                   *dev;
36         void                            *vaddr;
37         struct page                     **pages;
38         struct frame_vector             *vec;
39         int                             offset;
40         enum dma_data_direction         dma_dir;
41         struct sg_table                 sg_table;
42         /*
43          * This will point to sg_table when used with the MMAP or USERPTR
44          * memory model, and to the dma_buf sglist when used with the
45          * DMABUF memory model.
46          */
47         struct sg_table                 *dma_sgt;
48         size_t                          size;
49         unsigned int                    num_pages;
50         refcount_t                      refcount;
51         struct vb2_vmarea_handler       handler;
52
53         struct dma_buf_attachment       *db_attach;
54 };
55
56 static void vb2_dma_sg_put(void *buf_priv);
57
58 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
59                 gfp_t gfp_flags)
60 {
61         unsigned int last_page = 0;
62         unsigned long size = buf->size;
63
64         while (size > 0) {
65                 struct page *pages;
66                 int order;
67                 int i;
68
69                 order = get_order(size);
70                 /* Don't over allocate*/
71                 if ((PAGE_SIZE << order) > size)
72                         order--;
73
74                 pages = NULL;
75                 while (!pages) {
76                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
77                                         __GFP_NOWARN | gfp_flags, order);
78                         if (pages)
79                                 break;
80
81                         if (order == 0) {
82                                 while (last_page--)
83                                         __free_page(buf->pages[last_page]);
84                                 return -ENOMEM;
85                         }
86                         order--;
87                 }
88
89                 split_page(pages, order);
90                 for (i = 0; i < (1 << order); i++)
91                         buf->pages[last_page++] = &pages[i];
92
93                 size -= PAGE_SIZE << order;
94         }
95
96         return 0;
97 }
98
99 static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
100                               unsigned long size, enum dma_data_direction dma_dir,
101                               gfp_t gfp_flags)
102 {
103         struct vb2_dma_sg_buf *buf;
104         struct sg_table *sgt;
105         int ret;
106         int num_pages;
107
108         if (WARN_ON(!dev))
109                 return ERR_PTR(-EINVAL);
110
111         buf = kzalloc(sizeof *buf, GFP_KERNEL);
112         if (!buf)
113                 return ERR_PTR(-ENOMEM);
114
115         buf->vaddr = NULL;
116         buf->dma_dir = dma_dir;
117         buf->offset = 0;
118         buf->size = size;
119         /* size is already page aligned */
120         buf->num_pages = size >> PAGE_SHIFT;
121         buf->dma_sgt = &buf->sg_table;
122
123         /*
124          * NOTE: dma-sg allocates memory using the page allocator directly, so
125          * there is no memory consistency guarantee, hence dma-sg ignores DMA
126          * attributes passed from the upper layer.
127          */
128         buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
129                                     GFP_KERNEL | __GFP_ZERO);
130         if (!buf->pages)
131                 goto fail_pages_array_alloc;
132
133         ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
134         if (ret)
135                 goto fail_pages_alloc;
136
137         ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
138                         buf->num_pages, 0, size, GFP_KERNEL);
139         if (ret)
140                 goto fail_table_alloc;
141
142         /* Prevent the device from being released while the buffer is used */
143         buf->dev = get_device(dev);
144
145         sgt = &buf->sg_table;
146         /*
147          * No need to sync to the device, this will happen later when the
148          * prepare() memop is called.
149          */
150         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
151                             DMA_ATTR_SKIP_CPU_SYNC))
152                 goto fail_map;
153
154         buf->handler.refcount = &buf->refcount;
155         buf->handler.put = vb2_dma_sg_put;
156         buf->handler.arg = buf;
157
158         refcount_set(&buf->refcount, 1);
159
160         dprintk(1, "%s: Allocated buffer of %d pages\n",
161                 __func__, buf->num_pages);
162         return buf;
163
164 fail_map:
165         put_device(buf->dev);
166         sg_free_table(buf->dma_sgt);
167 fail_table_alloc:
168         num_pages = buf->num_pages;
169         while (num_pages--)
170                 __free_page(buf->pages[num_pages]);
171 fail_pages_alloc:
172         kvfree(buf->pages);
173 fail_pages_array_alloc:
174         kfree(buf);
175         return ERR_PTR(-ENOMEM);
176 }
177
178 static void vb2_dma_sg_put(void *buf_priv)
179 {
180         struct vb2_dma_sg_buf *buf = buf_priv;
181         struct sg_table *sgt = &buf->sg_table;
182         int i = buf->num_pages;
183
184         if (refcount_dec_and_test(&buf->refcount)) {
185                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
186                         buf->num_pages);
187                 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
188                                   DMA_ATTR_SKIP_CPU_SYNC);
189                 if (buf->vaddr)
190                         vm_unmap_ram(buf->vaddr, buf->num_pages);
191                 sg_free_table(buf->dma_sgt);
192                 while (--i >= 0)
193                         __free_page(buf->pages[i]);
194                 kvfree(buf->pages);
195                 put_device(buf->dev);
196                 kfree(buf);
197         }
198 }
199
200 static void vb2_dma_sg_prepare(void *buf_priv)
201 {
202         struct vb2_dma_sg_buf *buf = buf_priv;
203         struct sg_table *sgt = buf->dma_sgt;
204
205         dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
206 }
207
208 static void vb2_dma_sg_finish(void *buf_priv)
209 {
210         struct vb2_dma_sg_buf *buf = buf_priv;
211         struct sg_table *sgt = buf->dma_sgt;
212
213         dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
214 }
215
216 static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
217                                     unsigned long size,
218                                     enum dma_data_direction dma_dir)
219 {
220         struct vb2_dma_sg_buf *buf;
221         struct sg_table *sgt;
222         struct frame_vector *vec;
223
224         if (WARN_ON(!dev))
225                 return ERR_PTR(-EINVAL);
226
227         buf = kzalloc(sizeof *buf, GFP_KERNEL);
228         if (!buf)
229                 return ERR_PTR(-ENOMEM);
230
231         buf->vaddr = NULL;
232         buf->dev = dev;
233         buf->dma_dir = dma_dir;
234         buf->offset = vaddr & ~PAGE_MASK;
235         buf->size = size;
236         buf->dma_sgt = &buf->sg_table;
237         vec = vb2_create_framevec(vaddr, size);
238         if (IS_ERR(vec))
239                 goto userptr_fail_pfnvec;
240         buf->vec = vec;
241
242         buf->pages = frame_vector_pages(vec);
243         if (IS_ERR(buf->pages))
244                 goto userptr_fail_sgtable;
245         buf->num_pages = frame_vector_count(vec);
246
247         if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
248                         buf->num_pages, buf->offset, size, 0))
249                 goto userptr_fail_sgtable;
250
251         sgt = &buf->sg_table;
252         /*
253          * No need to sync to the device, this will happen later when the
254          * prepare() memop is called.
255          */
256         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
257                             DMA_ATTR_SKIP_CPU_SYNC))
258                 goto userptr_fail_map;
259
260         return buf;
261
262 userptr_fail_map:
263         sg_free_table(&buf->sg_table);
264 userptr_fail_sgtable:
265         vb2_destroy_framevec(vec);
266 userptr_fail_pfnvec:
267         kfree(buf);
268         return ERR_PTR(-ENOMEM);
269 }
270
271 /*
272  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
273  *               be used
274  */
275 static void vb2_dma_sg_put_userptr(void *buf_priv)
276 {
277         struct vb2_dma_sg_buf *buf = buf_priv;
278         struct sg_table *sgt = &buf->sg_table;
279         int i = buf->num_pages;
280
281         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
282                __func__, buf->num_pages);
283         dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
284         if (buf->vaddr)
285                 vm_unmap_ram(buf->vaddr, buf->num_pages);
286         sg_free_table(buf->dma_sgt);
287         if (buf->dma_dir == DMA_FROM_DEVICE ||
288             buf->dma_dir == DMA_BIDIRECTIONAL)
289                 while (--i >= 0)
290                         set_page_dirty_lock(buf->pages[i]);
291         vb2_destroy_framevec(buf->vec);
292         kfree(buf);
293 }
294
295 static void *vb2_dma_sg_vaddr(void *buf_priv)
296 {
297         struct vb2_dma_sg_buf *buf = buf_priv;
298         struct dma_buf_map map;
299         int ret;
300
301         BUG_ON(!buf);
302
303         if (!buf->vaddr) {
304                 if (buf->db_attach) {
305                         ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
306                         buf->vaddr = ret ? NULL : map.vaddr;
307                 } else {
308                         buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
309                 }
310         }
311
312         /* add offset in case userptr is not page-aligned */
313         return buf->vaddr ? buf->vaddr + buf->offset : NULL;
314 }
315
316 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
317 {
318         struct vb2_dma_sg_buf *buf = buf_priv;
319
320         return refcount_read(&buf->refcount);
321 }
322
323 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
324 {
325         struct vb2_dma_sg_buf *buf = buf_priv;
326         int err;
327
328         if (!buf) {
329                 printk(KERN_ERR "No memory to map\n");
330                 return -EINVAL;
331         }
332
333         err = vm_map_pages(vma, buf->pages, buf->num_pages);
334         if (err) {
335                 printk(KERN_ERR "Remapping memory, error: %d\n", err);
336                 return err;
337         }
338
339         /*
340          * Use common vm_area operations to track buffer refcount.
341          */
342         vma->vm_private_data    = &buf->handler;
343         vma->vm_ops             = &vb2_common_vm_ops;
344
345         vma->vm_ops->open(vma);
346
347         return 0;
348 }
349
350 /*********************************************/
351 /*         DMABUF ops for exporters          */
352 /*********************************************/
353
354 struct vb2_dma_sg_attachment {
355         struct sg_table sgt;
356         enum dma_data_direction dma_dir;
357 };
358
359 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
360         struct dma_buf_attachment *dbuf_attach)
361 {
362         struct vb2_dma_sg_attachment *attach;
363         unsigned int i;
364         struct scatterlist *rd, *wr;
365         struct sg_table *sgt;
366         struct vb2_dma_sg_buf *buf = dbuf->priv;
367         int ret;
368
369         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
370         if (!attach)
371                 return -ENOMEM;
372
373         sgt = &attach->sgt;
374         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
375          * map the same scatter list to multiple attachments at the same time.
376          */
377         ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
378         if (ret) {
379                 kfree(attach);
380                 return -ENOMEM;
381         }
382
383         rd = buf->dma_sgt->sgl;
384         wr = sgt->sgl;
385         for (i = 0; i < sgt->orig_nents; ++i) {
386                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
387                 rd = sg_next(rd);
388                 wr = sg_next(wr);
389         }
390
391         attach->dma_dir = DMA_NONE;
392         dbuf_attach->priv = attach;
393
394         return 0;
395 }
396
397 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
398         struct dma_buf_attachment *db_attach)
399 {
400         struct vb2_dma_sg_attachment *attach = db_attach->priv;
401         struct sg_table *sgt;
402
403         if (!attach)
404                 return;
405
406         sgt = &attach->sgt;
407
408         /* release the scatterlist cache */
409         if (attach->dma_dir != DMA_NONE)
410                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
411         sg_free_table(sgt);
412         kfree(attach);
413         db_attach->priv = NULL;
414 }
415
416 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
417         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
418 {
419         struct vb2_dma_sg_attachment *attach = db_attach->priv;
420         /* stealing dmabuf mutex to serialize map/unmap operations */
421         struct mutex *lock = &db_attach->dmabuf->lock;
422         struct sg_table *sgt;
423
424         mutex_lock(lock);
425
426         sgt = &attach->sgt;
427         /* return previously mapped sg table */
428         if (attach->dma_dir == dma_dir) {
429                 mutex_unlock(lock);
430                 return sgt;
431         }
432
433         /* release any previous cache */
434         if (attach->dma_dir != DMA_NONE) {
435                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
436                 attach->dma_dir = DMA_NONE;
437         }
438
439         /* mapping to the client with new direction */
440         if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
441                 pr_err("failed to map scatterlist\n");
442                 mutex_unlock(lock);
443                 return ERR_PTR(-EIO);
444         }
445
446         attach->dma_dir = dma_dir;
447
448         mutex_unlock(lock);
449
450         return sgt;
451 }
452
453 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
454         struct sg_table *sgt, enum dma_data_direction dma_dir)
455 {
456         /* nothing to be done here */
457 }
458
459 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
460 {
461         /* drop reference obtained in vb2_dma_sg_get_dmabuf */
462         vb2_dma_sg_put(dbuf->priv);
463 }
464
465 static int
466 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
467                                        enum dma_data_direction direction)
468 {
469         struct vb2_dma_sg_buf *buf = dbuf->priv;
470         struct sg_table *sgt = buf->dma_sgt;
471
472         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
473         return 0;
474 }
475
476 static int
477 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
478                                      enum dma_data_direction direction)
479 {
480         struct vb2_dma_sg_buf *buf = dbuf->priv;
481         struct sg_table *sgt = buf->dma_sgt;
482
483         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
484         return 0;
485 }
486
487 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
488 {
489         struct vb2_dma_sg_buf *buf = dbuf->priv;
490
491         dma_buf_map_set_vaddr(map, buf->vaddr);
492
493         return 0;
494 }
495
496 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
497         struct vm_area_struct *vma)
498 {
499         return vb2_dma_sg_mmap(dbuf->priv, vma);
500 }
501
502 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
503         .attach = vb2_dma_sg_dmabuf_ops_attach,
504         .detach = vb2_dma_sg_dmabuf_ops_detach,
505         .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
506         .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
507         .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
508         .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
509         .vmap = vb2_dma_sg_dmabuf_ops_vmap,
510         .mmap = vb2_dma_sg_dmabuf_ops_mmap,
511         .release = vb2_dma_sg_dmabuf_ops_release,
512 };
513
514 static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
515 {
516         struct vb2_dma_sg_buf *buf = buf_priv;
517         struct dma_buf *dbuf;
518         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
519
520         exp_info.ops = &vb2_dma_sg_dmabuf_ops;
521         exp_info.size = buf->size;
522         exp_info.flags = flags;
523         exp_info.priv = buf;
524
525         if (WARN_ON(!buf->dma_sgt))
526                 return NULL;
527
528         dbuf = dma_buf_export(&exp_info);
529         if (IS_ERR(dbuf))
530                 return NULL;
531
532         /* dmabuf keeps reference to vb2 buffer */
533         refcount_inc(&buf->refcount);
534
535         return dbuf;
536 }
537
538 /*********************************************/
539 /*       callbacks for DMABUF buffers        */
540 /*********************************************/
541
542 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
543 {
544         struct vb2_dma_sg_buf *buf = mem_priv;
545         struct sg_table *sgt;
546
547         if (WARN_ON(!buf->db_attach)) {
548                 pr_err("trying to pin a non attached buffer\n");
549                 return -EINVAL;
550         }
551
552         if (WARN_ON(buf->dma_sgt)) {
553                 pr_err("dmabuf buffer is already pinned\n");
554                 return 0;
555         }
556
557         /* get the associated scatterlist for this buffer */
558         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
559         if (IS_ERR(sgt)) {
560                 pr_err("Error getting dmabuf scatterlist\n");
561                 return -EINVAL;
562         }
563
564         buf->dma_sgt = sgt;
565         buf->vaddr = NULL;
566
567         return 0;
568 }
569
570 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
571 {
572         struct vb2_dma_sg_buf *buf = mem_priv;
573         struct sg_table *sgt = buf->dma_sgt;
574         struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
575
576         if (WARN_ON(!buf->db_attach)) {
577                 pr_err("trying to unpin a not attached buffer\n");
578                 return;
579         }
580
581         if (WARN_ON(!sgt)) {
582                 pr_err("dmabuf buffer is already unpinned\n");
583                 return;
584         }
585
586         if (buf->vaddr) {
587                 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
588                 buf->vaddr = NULL;
589         }
590         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
591
592         buf->dma_sgt = NULL;
593 }
594
595 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
596 {
597         struct vb2_dma_sg_buf *buf = mem_priv;
598
599         /* if vb2 works correctly you should never detach mapped buffer */
600         if (WARN_ON(buf->dma_sgt))
601                 vb2_dma_sg_unmap_dmabuf(buf);
602
603         /* detach this attachment */
604         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
605         kfree(buf);
606 }
607
608 static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
609         unsigned long size, enum dma_data_direction dma_dir)
610 {
611         struct vb2_dma_sg_buf *buf;
612         struct dma_buf_attachment *dba;
613
614         if (WARN_ON(!dev))
615                 return ERR_PTR(-EINVAL);
616
617         if (dbuf->size < size)
618                 return ERR_PTR(-EFAULT);
619
620         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
621         if (!buf)
622                 return ERR_PTR(-ENOMEM);
623
624         buf->dev = dev;
625         /* create attachment for the dmabuf with the user device */
626         dba = dma_buf_attach(dbuf, buf->dev);
627         if (IS_ERR(dba)) {
628                 pr_err("failed to attach dmabuf\n");
629                 kfree(buf);
630                 return dba;
631         }
632
633         buf->dma_dir = dma_dir;
634         buf->size = size;
635         buf->db_attach = dba;
636
637         return buf;
638 }
639
640 static void *vb2_dma_sg_cookie(void *buf_priv)
641 {
642         struct vb2_dma_sg_buf *buf = buf_priv;
643
644         return buf->dma_sgt;
645 }
646
647 const struct vb2_mem_ops vb2_dma_sg_memops = {
648         .alloc          = vb2_dma_sg_alloc,
649         .put            = vb2_dma_sg_put,
650         .get_userptr    = vb2_dma_sg_get_userptr,
651         .put_userptr    = vb2_dma_sg_put_userptr,
652         .prepare        = vb2_dma_sg_prepare,
653         .finish         = vb2_dma_sg_finish,
654         .vaddr          = vb2_dma_sg_vaddr,
655         .mmap           = vb2_dma_sg_mmap,
656         .num_users      = vb2_dma_sg_num_users,
657         .get_dmabuf     = vb2_dma_sg_get_dmabuf,
658         .map_dmabuf     = vb2_dma_sg_map_dmabuf,
659         .unmap_dmabuf   = vb2_dma_sg_unmap_dmabuf,
660         .attach_dmabuf  = vb2_dma_sg_attach_dmabuf,
661         .detach_dmabuf  = vb2_dma_sg_detach_dmabuf,
662         .cookie         = vb2_dma_sg_cookie,
663 };
664 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
665
666 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
667 MODULE_AUTHOR("Andrzej Pietrasiewicz");
668 MODULE_LICENSE("GPL");