5 #include "nouveau_drv.h"
6 #include "nouveau_drm.h"
7 #include "nouveau_dma.h"
9 #include <linux/dma-buf.h>
11 static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
12 enum dma_data_direction dir)
14 struct nouveau_bo *nvbo = attachment->dmabuf->priv;
15 struct drm_device *dev = nvbo->gem->dev;
16 int npages = nvbo->bo.num_pages;
20 mutex_lock(&dev->struct_mutex);
21 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
22 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
23 mutex_unlock(&dev->struct_mutex);
27 static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
28 struct sg_table *sg, enum dma_data_direction dir)
30 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
35 static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
37 struct nouveau_bo *nvbo = dma_buf->priv;
39 if (nvbo->gem->export_dma_buf == dma_buf) {
40 nvbo->gem->export_dma_buf = NULL;
41 drm_gem_object_unreference_unlocked(nvbo->gem);
45 static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
50 static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
54 static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
59 static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
64 static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
69 static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
71 struct nouveau_bo *nvbo = dma_buf->priv;
72 struct drm_device *dev = nvbo->gem->dev;
75 mutex_lock(&dev->struct_mutex);
76 if (nvbo->vmapping_count) {
77 nvbo->vmapping_count++;
81 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
84 mutex_unlock(&dev->struct_mutex);
87 nvbo->vmapping_count = 1;
89 mutex_unlock(&dev->struct_mutex);
90 return nvbo->dma_buf_vmap.virtual;
93 static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
95 struct nouveau_bo *nvbo = dma_buf->priv;
96 struct drm_device *dev = nvbo->gem->dev;
98 mutex_lock(&dev->struct_mutex);
99 nvbo->vmapping_count--;
100 if (nvbo->vmapping_count == 0) {
101 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
103 mutex_unlock(&dev->struct_mutex);
106 static const struct dma_buf_ops nouveau_dmabuf_ops = {
107 .map_dma_buf = nouveau_gem_map_dma_buf,
108 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
109 .release = nouveau_gem_dmabuf_release,
110 .kmap = nouveau_gem_kmap,
111 .kmap_atomic = nouveau_gem_kmap_atomic,
112 .kunmap = nouveau_gem_kunmap,
113 .kunmap_atomic = nouveau_gem_kunmap_atomic,
114 .mmap = nouveau_gem_prime_mmap,
115 .vmap = nouveau_gem_prime_vmap,
116 .vunmap = nouveau_gem_prime_vunmap,
120 nouveau_prime_new(struct drm_device *dev,
123 struct nouveau_bo **pnvbo)
125 struct nouveau_bo *nvbo;
129 flags = TTM_PL_FLAG_TT;
131 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
137 /* we restrict allowed domains on nv50+ to only the types
138 * that were requested at creation time. not possibly on
139 * earlier chips without busting the ABI.
141 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
142 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
144 nouveau_bo_ref(NULL, pnvbo);
148 nvbo->gem->driver_private = nvbo;
152 struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
153 struct drm_gem_object *obj, int flags)
155 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
158 /* pin buffer into GTT */
159 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
161 return ERR_PTR(-EINVAL);
163 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
166 struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
167 struct dma_buf *dma_buf)
169 struct dma_buf_attachment *attach;
171 struct nouveau_bo *nvbo;
174 if (dma_buf->ops == &nouveau_dmabuf_ops) {
175 nvbo = dma_buf->priv;
177 if (nvbo->gem->dev == dev) {
178 drm_gem_object_reference(nvbo->gem);
184 attach = dma_buf_attach(dma_buf, dev->dev);
186 return ERR_PTR(PTR_ERR(attach));
188 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
194 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
198 nvbo->gem->import_attach = attach;
203 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
205 dma_buf_detach(dma_buf, attach);