Linux 5.16-rc2
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 #include <linux/module.h>
11
12 #include <asm/smp.h>
13
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_scatterlist.h"
17
18 MODULE_IMPORT_NS(DMA_BUF);
19
20 I915_SELFTEST_DECLARE(static bool force_different_devices;)
21
22 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
23 {
24         return to_intel_bo(buf->priv);
25 }
26
27 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
28                                              enum dma_data_direction dir)
29 {
30         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
31         struct sg_table *st;
32         struct scatterlist *src, *dst;
33         int ret, i;
34
35         /* Copy sg so that we make an independent mapping */
36         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
37         if (st == NULL) {
38                 ret = -ENOMEM;
39                 goto err;
40         }
41
42         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
43         if (ret)
44                 goto err_free;
45
46         src = obj->mm.pages->sgl;
47         dst = st->sgl;
48         for (i = 0; i < obj->mm.pages->nents; i++) {
49                 sg_set_page(dst, sg_page(src), src->length, 0);
50                 dst = sg_next(dst);
51                 src = sg_next(src);
52         }
53
54         ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
55         if (ret)
56                 goto err_free_sg;
57
58         return st;
59
60 err_free_sg:
61         sg_free_table(st);
62 err_free:
63         kfree(st);
64 err:
65         return ERR_PTR(ret);
66 }
67
68 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
69                                    struct sg_table *sg,
70                                    enum dma_data_direction dir)
71 {
72         dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
73         sg_free_table(sg);
74         kfree(sg);
75 }
76
77 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
78 {
79         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
80         void *vaddr;
81
82         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
83         if (IS_ERR(vaddr))
84                 return PTR_ERR(vaddr);
85
86         dma_buf_map_set_vaddr(map, vaddr);
87
88         return 0;
89 }
90
91 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
92 {
93         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
94
95         i915_gem_object_flush_map(obj);
96         i915_gem_object_unpin_map(obj);
97 }
98
99 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
100 {
101         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
102         int ret;
103
104         if (obj->base.size < vma->vm_end - vma->vm_start)
105                 return -EINVAL;
106
107         if (!obj->base.filp)
108                 return -ENODEV;
109
110         ret = call_mmap(obj->base.filp, vma);
111         if (ret)
112                 return ret;
113
114         vma_set_file(vma, obj->base.filp);
115
116         return 0;
117 }
118
119 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
120 {
121         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
122         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
123         struct i915_gem_ww_ctx ww;
124         int err;
125
126         i915_gem_ww_ctx_init(&ww, true);
127 retry:
128         err = i915_gem_object_lock(obj, &ww);
129         if (!err)
130                 err = i915_gem_object_pin_pages(obj);
131         if (!err) {
132                 err = i915_gem_object_set_to_cpu_domain(obj, write);
133                 i915_gem_object_unpin_pages(obj);
134         }
135         if (err == -EDEADLK) {
136                 err = i915_gem_ww_ctx_backoff(&ww);
137                 if (!err)
138                         goto retry;
139         }
140         i915_gem_ww_ctx_fini(&ww);
141         return err;
142 }
143
144 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
145 {
146         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
147         struct i915_gem_ww_ctx ww;
148         int err;
149
150         i915_gem_ww_ctx_init(&ww, true);
151 retry:
152         err = i915_gem_object_lock(obj, &ww);
153         if (!err)
154                 err = i915_gem_object_pin_pages(obj);
155         if (!err) {
156                 err = i915_gem_object_set_to_gtt_domain(obj, false);
157                 i915_gem_object_unpin_pages(obj);
158         }
159         if (err == -EDEADLK) {
160                 err = i915_gem_ww_ctx_backoff(&ww);
161                 if (!err)
162                         goto retry;
163         }
164         i915_gem_ww_ctx_fini(&ww);
165         return err;
166 }
167
168 static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
169                                   struct dma_buf_attachment *attach)
170 {
171         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
172         struct i915_gem_ww_ctx ww;
173         int err;
174
175         if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
176                 return -EOPNOTSUPP;
177
178         for_i915_gem_ww(&ww, err, true) {
179                 err = i915_gem_object_lock(obj, &ww);
180                 if (err)
181                         continue;
182
183                 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
184                 if (err)
185                         continue;
186
187                 err = i915_gem_object_wait_migration(obj, 0);
188                 if (err)
189                         continue;
190
191                 err = i915_gem_object_pin_pages(obj);
192         }
193
194         return err;
195 }
196
197 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
198                                    struct dma_buf_attachment *attach)
199 {
200         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
201
202         i915_gem_object_unpin_pages(obj);
203 }
204
205 static const struct dma_buf_ops i915_dmabuf_ops =  {
206         .attach = i915_gem_dmabuf_attach,
207         .detach = i915_gem_dmabuf_detach,
208         .map_dma_buf = i915_gem_map_dma_buf,
209         .unmap_dma_buf = i915_gem_unmap_dma_buf,
210         .release = drm_gem_dmabuf_release,
211         .mmap = i915_gem_dmabuf_mmap,
212         .vmap = i915_gem_dmabuf_vmap,
213         .vunmap = i915_gem_dmabuf_vunmap,
214         .begin_cpu_access = i915_gem_begin_cpu_access,
215         .end_cpu_access = i915_gem_end_cpu_access,
216 };
217
218 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
219 {
220         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
221         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
222
223         exp_info.ops = &i915_dmabuf_ops;
224         exp_info.size = gem_obj->size;
225         exp_info.flags = flags;
226         exp_info.priv = gem_obj;
227         exp_info.resv = obj->base.resv;
228
229         if (obj->ops->dmabuf_export) {
230                 int ret = obj->ops->dmabuf_export(obj);
231                 if (ret)
232                         return ERR_PTR(ret);
233         }
234
235         return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
236 }
237
238 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
239 {
240         struct drm_i915_private *i915 = to_i915(obj->base.dev);
241         struct sg_table *pages;
242         unsigned int sg_page_sizes;
243
244         assert_object_held(obj);
245
246         pages = dma_buf_map_attachment(obj->base.import_attach,
247                                        DMA_BIDIRECTIONAL);
248         if (IS_ERR(pages))
249                 return PTR_ERR(pages);
250
251         /* XXX: consider doing a vmap flush or something */
252         if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
253                 wbinvd_on_all_cpus();
254
255         sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
256         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
257
258         return 0;
259 }
260
261 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
262                                              struct sg_table *pages)
263 {
264         dma_buf_unmap_attachment(obj->base.import_attach, pages,
265                                  DMA_BIDIRECTIONAL);
266 }
267
268 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
269         .name = "i915_gem_object_dmabuf",
270         .get_pages = i915_gem_object_get_pages_dmabuf,
271         .put_pages = i915_gem_object_put_pages_dmabuf,
272 };
273
274 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
275                                              struct dma_buf *dma_buf)
276 {
277         static struct lock_class_key lock_class;
278         struct dma_buf_attachment *attach;
279         struct drm_i915_gem_object *obj;
280         int ret;
281
282         /* is this one of own objects? */
283         if (dma_buf->ops == &i915_dmabuf_ops) {
284                 obj = dma_buf_to_obj(dma_buf);
285                 /* is it from our device? */
286                 if (obj->base.dev == dev &&
287                     !I915_SELFTEST_ONLY(force_different_devices)) {
288                         /*
289                          * Importing dmabuf exported from out own gem increases
290                          * refcount on gem itself instead of f_count of dmabuf.
291                          */
292                         return &i915_gem_object_get(obj)->base;
293                 }
294         }
295
296         if (i915_gem_object_size_2big(dma_buf->size))
297                 return ERR_PTR(-E2BIG);
298
299         /* need to attach */
300         attach = dma_buf_attach(dma_buf, dev->dev);
301         if (IS_ERR(attach))
302                 return ERR_CAST(attach);
303
304         get_dma_buf(dma_buf);
305
306         obj = i915_gem_object_alloc();
307         if (obj == NULL) {
308                 ret = -ENOMEM;
309                 goto fail_detach;
310         }
311
312         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
313         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
314                              I915_BO_ALLOC_USER);
315         obj->base.import_attach = attach;
316         obj->base.resv = dma_buf->resv;
317
318         /* We use GTT as shorthand for a coherent domain, one that is
319          * neither in the GPU cache nor in the CPU cache, where all
320          * writes are immediately visible in memory. (That's not strictly
321          * true, but it's close! There are internal buffers such as the
322          * write-combined buffer or a delay through the chipset for GTT
323          * writes that do require us to treat GTT as a separate cache domain.)
324          */
325         obj->read_domains = I915_GEM_DOMAIN_GTT;
326         obj->write_domain = 0;
327
328         return &obj->base;
329
330 fail_detach:
331         dma_buf_detach(dma_buf, attach);
332         dma_buf_put(dma_buf);
333
334         return ERR_PTR(ret);
335 }
336
337 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
338 #include "selftests/mock_dmabuf.c"
339 #include "selftests/i915_gem_dmabuf.c"
340 #endif