udmabuf: implement begin_cpu_access/end_cpu_access hooks
authorGurchetan Singh <gurchetansingh@chromium.org>
Tue, 3 Dec 2019 01:36:27 +0000 (17:36 -0800)
committerGerd Hoffmann <kraxel@redhat.com>
Thu, 5 Dec 2019 07:57:45 +0000 (08:57 +0100)
With the misc device, we should end up using the result of
get_arch_dma_ops(..) or dma-direct ops.

This can allow us to have WC mappings in the guest after
synchronization.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
drivers/dma-buf/udmabuf.c

index 0a610e0..61b0a2c 100644 (file)
@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
 struct udmabuf {
        pgoff_t pagecount;
        struct page **pages;
+       struct sg_table *sg;
        struct miscdevice *device;
 };
 
@@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
 static void release_udmabuf(struct dma_buf *buf)
 {
        struct udmabuf *ubuf = buf->priv;
+       struct device *dev = ubuf->device->this_device;
        pgoff_t pg;
 
+       if (ubuf->sg)
+               put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
        for (pg = 0; pg < ubuf->pagecount; pg++)
                put_page(ubuf->pages[pg]);
        kfree(ubuf->pages);
        kfree(ubuf);
 }
 
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+                            enum dma_data_direction direction)
+{
+       struct udmabuf *ubuf = buf->priv;
+       struct device *dev = ubuf->device->this_device;
+
+       if (!ubuf->sg) {
+               ubuf->sg = get_sg_table(dev, buf, direction);
+               if (IS_ERR(ubuf->sg))
+                       return PTR_ERR(ubuf->sg);
+       } else {
+               dma_sync_sg_for_device(dev, ubuf->sg->sgl,
+                                      ubuf->sg->nents,
+                                      direction);
+       }
+
+       return 0;
+}
+
+static int end_cpu_udmabuf(struct dma_buf *buf,
+                          enum dma_data_direction direction)
+{
+       struct udmabuf *ubuf = buf->priv;
+       struct device *dev = ubuf->device->this_device;
+
+       if (!ubuf->sg)
+               return -EINVAL;
+
+       dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+       return 0;
+}
+
 static const struct dma_buf_ops udmabuf_ops = {
        .cache_sgt_mapping = true,
        .map_dma_buf       = map_udmabuf,
        .unmap_dma_buf     = unmap_udmabuf,
        .release           = release_udmabuf,
        .mmap              = mmap_udmabuf,
+       .begin_cpu_access  = begin_cpu_udmabuf,
+       .end_cpu_access    = end_cpu_udmabuf,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)