Merge tag 'spi-fix-v6.6-merge-window' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / drivers / virtio / virtio_ring.c
index 8e81b01..51d8f32 100644 (file)
@@ -175,6 +175,11 @@ struct vring_virtqueue {
        /* Do DMA mapping by driver */
        bool premapped;
 
+       /* Do unmap or not for desc. Just when premapped is False and
+        * use_dma_api is true, this is true.
+        */
+       bool do_unmap;
+
        /* Head of free buffer list. */
        unsigned int free_head;
        /* Number we've added since last sync. */
@@ -361,6 +366,11 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
 static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
                            enum dma_data_direction direction, dma_addr_t *addr)
 {
+       if (vq->premapped) {
+               *addr = sg_dma_address(sg);
+               return 0;
+       }
+
        if (!vq->use_dma_api) {
                /*
                 * If DMA is not used, KMSAN doesn't know that the scatterlist
@@ -435,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
 {
        u16 flags;
 
-       if (!vq->use_dma_api)
+       if (!vq->do_unmap)
                return;
 
        flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
@@ -453,18 +463,21 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
        struct vring_desc_extra *extra = vq->split.desc_extra;
        u16 flags;
 
-       if (!vq->use_dma_api)
-               goto out;
-
        flags = extra[i].flags;
 
        if (flags & VRING_DESC_F_INDIRECT) {
+               if (!vq->use_dma_api)
+                       goto out;
+
                dma_unmap_single(vring_dma_dev(vq),
                                 extra[i].addr,
                                 extra[i].len,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
+               if (!vq->do_unmap)
+                       goto out;
+
                dma_unmap_page(vring_dma_dev(vq),
                               extra[i].addr,
                               extra[i].len,
@@ -630,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        }
        /* Last one doesn't continue. */
        desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
-       if (!indirect && vq->use_dma_api)
+       if (!indirect && vq->do_unmap)
                vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
                        ~VRING_DESC_F_NEXT;
 
@@ -639,8 +652,12 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
                dma_addr_t addr = vring_map_single(
                        vq, desc, total_sg * sizeof(struct vring_desc),
                        DMA_TO_DEVICE);
-               if (vring_mapping_error(vq, addr))
+               if (vring_mapping_error(vq, addr)) {
+                       if (vq->premapped)
+                               goto free_indirect;
+
                        goto unmap_release;
+               }
 
                virtqueue_add_desc_split(_vq, vq->split.vring.desc,
                                         head, addr,
@@ -706,6 +723,7 @@ unmap_release:
                        i = vring_unmap_one_split(vq, i);
        }
 
+free_indirect:
        if (indirect)
                kfree(desc);
 
@@ -784,7 +802,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
                                VRING_DESC_F_INDIRECT));
                BUG_ON(len == 0 || len % sizeof(struct vring_desc));
 
-               if (vq->use_dma_api) {
+               if (vq->do_unmap) {
                        for (j = 0; j < len / sizeof(struct vring_desc); j++)
                                vring_unmap_one_split_indirect(vq, &indir_desc[j]);
                }
@@ -1207,17 +1225,20 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
 {
        u16 flags;
 
-       if (!vq->use_dma_api)
-               return;
-
        flags = extra->flags;
 
        if (flags & VRING_DESC_F_INDIRECT) {
+               if (!vq->use_dma_api)
+                       return;
+
                dma_unmap_single(vring_dma_dev(vq),
                                 extra->addr, extra->len,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
+               if (!vq->do_unmap)
+                       return;
+
                dma_unmap_page(vring_dma_dev(vq),
                               extra->addr, extra->len,
                               (flags & VRING_DESC_F_WRITE) ?
@@ -1230,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
 {
        u16 flags;
 
-       if (!vq->use_dma_api)
+       if (!vq->do_unmap)
                return;
 
        flags = le16_to_cpu(desc->flags);
@@ -1307,15 +1328,19 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
        addr = vring_map_single(vq, desc,
                        total_sg * sizeof(struct vring_packed_desc),
                        DMA_TO_DEVICE);
-       if (vring_mapping_error(vq, addr))
+       if (vring_mapping_error(vq, addr)) {
+               if (vq->premapped)
+                       goto free_desc;
+
                goto unmap_release;
+       }
 
        vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
        vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
                                sizeof(struct vring_packed_desc));
        vq->packed.vring.desc[head].id = cpu_to_le16(id);
 
-       if (vq->use_dma_api) {
+       if (vq->do_unmap) {
                vq->packed.desc_extra[id].addr = addr;
                vq->packed.desc_extra[id].len = total_sg *
                                sizeof(struct vring_packed_desc);
@@ -1366,6 +1391,7 @@ unmap_release:
        for (i = 0; i < err_idx; i++)
                vring_unmap_desc_packed(vq, &desc[i]);
 
+free_desc:
        kfree(desc);
 
        END_USE(vq);
@@ -1455,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
                        desc[i].len = cpu_to_le32(sg->length);
                        desc[i].id = cpu_to_le16(id);
 
-                       if (unlikely(vq->use_dma_api)) {
+                       if (unlikely(vq->do_unmap)) {
                                vq->packed.desc_extra[curr].addr = addr;
                                vq->packed.desc_extra[curr].len = sg->length;
                                vq->packed.desc_extra[curr].flags =
@@ -1473,7 +1499,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
                }
        }
 
-       if (i < head)
+       if (i <= head)
                vq->packed.avail_wrap_counter ^= 1;
 
        /* We're using some buffers from the free list. */
@@ -1589,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
        vq->free_head = id;
        vq->vq.num_free += state->num;
 
-       if (unlikely(vq->use_dma_api)) {
+       if (unlikely(vq->do_unmap)) {
                curr = id;
                for (i = 0; i < state->num; i++) {
                        vring_unmap_extra_packed(vq,
@@ -1606,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
                if (!desc)
                        return;
 
-               if (vq->use_dma_api) {
+               if (vq->do_unmap) {
                        len = vq->packed.desc_extra[id].len;
                        for (i = 0; i < len / sizeof(struct vring_packed_desc);
                                        i++)
@@ -2065,6 +2091,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
        vq->dma_dev = dma_dev;
        vq->use_dma_api = vring_use_dma_api(vdev);
        vq->premapped = false;
+       vq->do_unmap = vq->use_dma_api;
 
        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
                !context;
@@ -2125,6 +2152,43 @@ err_ring:
        return -ENOMEM;
 }
 
+static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
+                                        void (*recycle)(struct virtqueue *vq, void *buf))
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+       struct virtio_device *vdev = vq->vq.vdev;
+       void *buf;
+       int err;
+
+       if (!vq->we_own_ring)
+               return -EPERM;
+
+       if (!vdev->config->disable_vq_and_reset)
+               return -ENOENT;
+
+       if (!vdev->config->enable_vq_after_reset)
+               return -ENOENT;
+
+       err = vdev->config->disable_vq_and_reset(_vq);
+       if (err)
+               return err;
+
+       while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+               recycle(_vq, buf);
+
+       return 0;
+}
+
+static int virtqueue_enable_after_reset(struct virtqueue *_vq)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+       struct virtio_device *vdev = vq->vq.vdev;
+
+       if (vdev->config->enable_vq_after_reset(_vq))
+               return -EBUSY;
+
+       return 0;
+}
 
 /*
  * Generic functions and exported symbols.
@@ -2251,6 +2315,23 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
 
 /**
+ * virtqueue_dma_dev - get the dma dev
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Returns the dma dev. That can been used for dma api.
+ */
+struct device *virtqueue_dma_dev(struct virtqueue *_vq)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (vq->use_dma_api)
+               return vring_dma_dev(vq);
+       else
+               return NULL;
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
+
+/**
  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
  * @_vq: the struct virtqueue
  *
@@ -2555,6 +2636,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
        vq->dma_dev = dma_dev;
        vq->use_dma_api = vring_use_dma_api(vdev);
        vq->premapped = false;
+       vq->do_unmap = vq->use_dma_api;
 
        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
                !context;
@@ -2633,7 +2715,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
  * virtqueue_resize - resize the vring of vq
  * @_vq: the struct virtqueue we're talking about.
  * @num: new ring num
- * @recycle: callback for recycle the useless buffer
+ * @recycle: callback to recycle unused buffers
  *
  * When it is really necessary to create a new vring, it will set the current vq
  * into the reset state. Then call the passed callback to recycle the buffer
@@ -2657,13 +2739,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
                     void (*recycle)(struct virtqueue *vq, void *buf))
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
-       struct virtio_device *vdev = vq->vq.vdev;
-       void *buf;
        int err;
 
-       if (!vq->we_own_ring)
-               return -EPERM;
-
        if (num > vq->vq.num_max)
                return -E2BIG;
 
@@ -2673,28 +2750,16 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
        if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
                return 0;
 
-       if (!vdev->config->disable_vq_and_reset)
-               return -ENOENT;
-
-       if (!vdev->config->enable_vq_after_reset)
-               return -ENOENT;
-
-       err = vdev->config->disable_vq_and_reset(_vq);
+       err = virtqueue_disable_and_recycle(_vq, recycle);
        if (err)
                return err;
 
-       while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
-               recycle(_vq, buf);
-
        if (vq->packed_ring)
                err = virtqueue_resize_packed(_vq, num);
        else
                err = virtqueue_resize_split(_vq, num);
 
-       if (vdev->config->enable_vq_after_reset(_vq))
-               return -EBUSY;
-
-       return err;
+       return virtqueue_enable_after_reset(_vq);
 }
 EXPORT_SYMBOL_GPL(virtqueue_resize);
 
@@ -2739,6 +2804,7 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
        }
 
        vq->premapped = true;
+       vq->do_unmap = false;
 
        END_USE(vq);
 
@@ -2746,6 +2812,39 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
 }
 EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
 
+/**
+ * virtqueue_reset - detach and recycle all unused buffers
+ * @_vq: the struct virtqueue we're talking about.
+ * @recycle: callback to recycle unused buffers
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -EPERM: Operation not permitted
+ */
+int virtqueue_reset(struct virtqueue *_vq,
+                   void (*recycle)(struct virtqueue *vq, void *buf))
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+       int err;
+
+       err = virtqueue_disable_and_recycle(_vq, recycle);
+       if (err)
+               return err;
+
+       if (vq->packed_ring)
+               virtqueue_reinit_packed(vq);
+       else
+               virtqueue_reinit_split(vq);
+
+       return virtqueue_enable_after_reset(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset);
+
 /* Only available for split ring */
 struct virtqueue *vring_new_virtqueue(unsigned int index,
                                      unsigned int num,
@@ -3007,4 +3106,149 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
 }
 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
 
+/**
+ * virtqueue_dma_map_single_attrs - map DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @ptr: the pointer of the buffer to do dma
+ * @size: the size of the buffer to do dma
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * The caller calls this to do dma mapping in advance. The DMA address can be
+ * passed to this _vq when it is in pre-mapped mode.
+ *
+ * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
+ */
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
+                                         size_t size,
+                                         enum dma_data_direction dir,
+                                         unsigned long attrs)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (!vq->use_dma_api)
+               return (dma_addr_t)virt_to_phys(ptr);
+
+       return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
+
+/**
+ * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: the dma address to unmap
+ * @size: the size of the buffer
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
+ *
+ */
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+                                     size_t size, enum dma_data_direction dir,
+                                     unsigned long attrs)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (!vq->use_dma_api)
+               return;
+
+       dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
+
+/**
+ * virtqueue_dma_mapping_error - check dma address
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Returns 0 means dma valid. Other means invalid dma address.
+ */
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (!vq->use_dma_api)
+               return 0;
+
+       return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
+
+/**
+ * virtqueue_dma_need_sync - check a dma address needs sync
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
+ * synchronized
+ *
+ * return bool
+ */
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+
+       if (!vq->use_dma_api)
+               return false;
+
+       return dma_need_sync(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
+
+/**
+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ *
+ */
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+                                            dma_addr_t addr,
+                                            unsigned long offset, size_t size,
+                                            enum dma_data_direction dir)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+       struct device *dev = vring_dma_dev(vq);
+
+       if (!vq->use_dma_api)
+               return;
+
+       dma_sync_single_range_for_cpu(dev, addr, offset, size,
+                                     DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+
+/**
+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ */
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+                                               dma_addr_t addr,
+                                               unsigned long offset, size_t size,
+                                               enum dma_data_direction dir)
+{
+       struct vring_virtqueue *vq = to_vvq(_vq);
+       struct device *dev = vring_dma_dev(vq);
+
+       if (!vq->use_dma_api)
+               return;
+
+       dma_sync_single_range_for_device(dev, addr, offset, size,
+                                        DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+
 MODULE_LICENSE("GPL");