/* Per-descriptor state. */
struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
/* DMA address and size information */
dma_addr_t queue_dma_addr;
* Split ring specific functions - *_split().
*/
-static void vring_unmap_one_split(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
+static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
+ struct vring_desc *desc)
{
u16 flags;
}
}
+static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
+ unsigned int i)
+{
+ struct vring_desc_extra *extra = vq->split.desc_extra;
+ u16 flags;
+
+ if (!vq->use_dma_api)
+ goto out;
+
+ flags = extra[i].flags;
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+
+out:
+ return extra[i].next;
+}
+
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
unsigned int i,
dma_addr_t addr,
unsigned int len,
- u16 flags)
+ u16 flags,
+ bool indirect)
{
+ struct vring_virtqueue *vring = to_vvq(vq);
+ struct vring_desc_extra *extra = vring->split.desc_extra;
+ u16 next;
+
desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
desc[i].len = cpu_to_virtio32(vq->vdev, len);
- return virtio16_to_cpu(vq->vdev, desc[i].next);
+ if (!indirect) {
+ next = extra[i].next;
+ desc[i].next = cpu_to_virtio16(vq->vdev, next);
+
+ extra[i].addr = addr;
+ extra[i].len = len;
+ extra[i].flags = flags;
+ } else
+ next = virtio16_to_cpu(vq->vdev, desc[i].next);
+
+ return next;
}
static inline int virtqueue_add_split(struct virtqueue *_vq,
goto unmap_release;
prev = i;
+ /* Note that we trust indirect descriptor
+ * table since it use stream DMA mapping.
+ */
i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
- VRING_DESC_F_NEXT);
+ VRING_DESC_F_NEXT,
+ indirect);
}
}
for (; n < (out_sgs + in_sgs); n++) {
goto unmap_release;
prev = i;
+ /* Note that we trust indirect descriptor
+ * table since it use stream DMA mapping.
+ */
i = virtqueue_add_desc_split(_vq, desc, i, addr,
sg->length,
VRING_DESC_F_NEXT |
- VRING_DESC_F_WRITE);
+ VRING_DESC_F_WRITE,
+ indirect);
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ if (!indirect && vq->use_dma_api)
+ vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
+ ~VRING_DESC_F_NEXT;
if (indirect) {
/* Now that the indirect table is filled in, map it. */
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
head, addr,
total_sg * sizeof(struct vring_desc),
- VRING_DESC_F_INDIRECT);
+ VRING_DESC_F_INDIRECT,
+ false);
}
/* We're using some buffers from the free list. */
/* Update free pointer */
if (indirect)
- vq->free_head = virtio16_to_cpu(_vq->vdev,
- vq->split.vring.desc[head].next);
+ vq->free_head = vq->split.desc_extra[head].next;
else
vq->free_head = i;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- vring_unmap_one_split(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ if (indirect) {
+ vring_unmap_one_split_indirect(vq, &desc[i]);
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ } else
+ i = vring_unmap_one_split(vq, i);
}
if (indirect)
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
- vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
- i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+ vring_unmap_one_split(vq, i);
+ i = vq->split.desc_extra[i].next;
vq->vq.num_free++;
}
- vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
- vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
- vq->free_head);
+ vring_unmap_one_split(vq, i);
+ vq->split.desc_extra[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
if (!indir_desc)
return;
- len = virtio32_to_cpu(vq->vq.vdev,
- vq->split.vring.desc[head].len);
+ len = vq->split.desc_extra[head].len;
- BUG_ON(!(vq->split.vring.desc[head].flags &
- cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+ BUG_ON(!(vq->split.desc_extra[head].flags &
+ VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split(vq, &indir_desc[j]);
+ vring_unmap_one_split_indirect(vq, &indir_desc[j]);
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
void (*callback)(struct virtqueue *),
const char *name)
{
- unsigned int i;
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
if (!vq->split.desc_state)
goto err_state;
+ vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
+ if (!vq->split.desc_extra)
+ goto err_extra;
+
/* Put everything in free lists. */
vq->free_head = 0;
- for (i = 0; i < vring.num-1; i++)
- vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
+err_extra:
+ kfree(vq->split.desc_state);
err_state:
kfree(vq);
return NULL;
vq->split.queue_dma_addr);
}
}
- if (!vq->packed_ring)
+ if (!vq->packed_ring) {
kfree(vq->split.desc_state);
+ kfree(vq->split.desc_extra);
+ }
list_del(&_vq->list);
kfree(vq);
}