1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kmsan.h>
15 #include <linux/spinlock.h>
19 /* For development, we want to crash whenever the ring is screwed. */
20 #define BAD_RING(_vq, fmt, args...) \
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
26 /* Caller is supposed to guarantee no reentry. */
27 #define START_USE(_vq) \
30 panic("%s:in_use = %i\n", \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
34 #define END_USE(_vq) \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
36 #define LAST_ADD_TIME_UPDATE(_vq) \
38 ktime_t now = ktime_get(); \
40 /* No kick or get, with .1 second between? Warn. */ \
41 if ((_vq)->last_add_time_valid) \
42 WARN_ON(ktime_to_ms(ktime_sub(now, \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
47 #define LAST_ADD_TIME_CHECK(_vq) \
49 if ((_vq)->last_add_time_valid) { \
50 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
51 (_vq)->last_add_time)) > 100); \
54 #define LAST_ADD_TIME_INVALID(_vq) \
55 ((_vq)->last_add_time_valid = false)
57 #define BAD_RING(_vq, fmt, args...) \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
65 #define LAST_ADD_TIME_UPDATE(vq)
66 #define LAST_ADD_TIME_CHECK(vq)
67 #define LAST_ADD_TIME_INVALID(vq)
70 struct vring_desc_state_split {
71 void *data; /* Data for callback. */
72 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
75 struct vring_desc_state_packed {
76 void *data; /* Data for callback. */
77 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
78 u16 num; /* Descriptor list length. */
79 u16 last; /* The last desc state in a list. */
82 struct vring_desc_extra {
83 dma_addr_t addr; /* Descriptor DMA addr. */
84 u32 len; /* Descriptor length. */
85 u16 flags; /* Descriptor flags. */
86 u16 next; /* The next desc state in a list. */
89 struct vring_virtqueue_split {
90 /* Actual memory layout for this queue. */
93 /* Last written value to avail->flags */
94 u16 avail_flags_shadow;
97 * Last written value to avail->idx in
100 u16 avail_idx_shadow;
102 /* Per-descriptor state. */
103 struct vring_desc_state_split *desc_state;
104 struct vring_desc_extra *desc_extra;
106 /* DMA address and size information */
107 dma_addr_t queue_dma_addr;
108 size_t queue_size_in_bytes;
111 * The parameters for creating vrings are reserved for creating new
118 struct vring_virtqueue_packed {
119 /* Actual memory layout for this queue. */
122 struct vring_packed_desc *desc;
123 struct vring_packed_desc_event *driver;
124 struct vring_packed_desc_event *device;
127 /* Driver ring wrap counter. */
128 bool avail_wrap_counter;
130 /* Avail used flags. */
131 u16 avail_used_flags;
133 /* Index of the next avail descriptor. */
137 * Last written value to driver->flags in
140 u16 event_flags_shadow;
142 /* Per-descriptor state. */
143 struct vring_desc_state_packed *desc_state;
144 struct vring_desc_extra *desc_extra;
146 /* DMA address and size information */
147 dma_addr_t ring_dma_addr;
148 dma_addr_t driver_event_dma_addr;
149 dma_addr_t device_event_dma_addr;
150 size_t ring_size_in_bytes;
151 size_t event_size_in_bytes;
154 struct vring_virtqueue {
157 /* Is this a packed ring? */
160 /* Is DMA API used? */
163 /* Can we use weak barriers? */
166 /* Other side has made a mess, don't try any more. */
169 /* Host supports indirect buffers */
172 /* Host publishes avail event idx */
175 /* Do DMA mapping by driver */
178 /* Head of free buffer list. */
179 unsigned int free_head;
180 /* Number we've added since last sync. */
181 unsigned int num_added;
183 /* Last used index we've seen.
184 * for split ring, it just contains last used index
186 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
187 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
191 /* Hint for event idx: already triggered no need to disable. */
192 bool event_triggered;
195 /* Available for split ring */
196 struct vring_virtqueue_split split;
198 /* Available for packed ring */
199 struct vring_virtqueue_packed packed;
202 /* How to notify other side. FIXME: commonalize hcalls! */
203 bool (*notify)(struct virtqueue *vq);
205 /* DMA, allocation, and size information */
208 /* Device used for doing DMA */
209 struct device *dma_dev;
212 /* They're supposed to lock for us. */
215 /* Figure out if their kicks are too delayed. */
216 bool last_add_time_valid;
217 ktime_t last_add_time;
221 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
222 struct vring_virtqueue_split *vring_split,
223 struct virtio_device *vdev,
226 bool (*notify)(struct virtqueue *),
227 void (*callback)(struct virtqueue *),
229 struct device *dma_dev);
230 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
231 static void vring_free(struct virtqueue *_vq);
237 #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
239 static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
240 unsigned int total_sg)
243 * If the host supports indirect descriptor tables, and we have multiple
244 * buffers, then go indirect. FIXME: tune this threshold
246 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
250 * Modern virtio devices have feature bits to specify whether they need a
251 * quirk and bypass the IOMMU. If not there, just use the DMA API.
253 * If there, the interaction between virtio and DMA API is messy.
255 * On most systems with virtio, physical addresses match bus addresses,
256 * and it doesn't particularly matter whether we use the DMA API.
258 * On some systems, including Xen and any system with a physical device
259 * that speaks virtio behind a physical IOMMU, we must use the DMA API
260 * for virtio DMA to work at all.
262 * On other systems, including SPARC and PPC64, virtio-pci devices are
263 * enumerated as though they are behind an IOMMU, but the virtio host
264 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
265 * there or somehow map everything as the identity.
267 * For the time being, we preserve historic behavior and bypass the DMA
270 * TODO: install a per-device DMA ops structure that does the right thing
271 * taking into account all the above quirks, and use the DMA API
272 * unconditionally on data path.
275 static bool vring_use_dma_api(const struct virtio_device *vdev)
277 if (!virtio_has_dma_quirk(vdev))
280 /* Otherwise, we are left to guess. */
282 * In theory, it's possible to have a buggy QEMU-supposed
283 * emulated Q35 IOMMU and Xen enabled at the same time. On
284 * such a configuration, virtio has never worked and will
285 * not work without an even larger kludge. Instead, enable
286 * the DMA API if we're a Xen guest, which at least allows
287 * all of the sensible Xen configurations to work correctly.
295 size_t virtio_max_dma_size(const struct virtio_device *vdev)
297 size_t max_segment_size = SIZE_MAX;
299 if (vring_use_dma_api(vdev))
300 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
302 return max_segment_size;
304 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
306 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
307 dma_addr_t *dma_handle, gfp_t flag,
308 struct device *dma_dev)
310 if (vring_use_dma_api(vdev)) {
311 return dma_alloc_coherent(dma_dev, size,
314 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
317 phys_addr_t phys_addr = virt_to_phys(queue);
318 *dma_handle = (dma_addr_t)phys_addr;
321 * Sanity check: make sure we dind't truncate
322 * the address. The only arches I can find that
323 * have 64-bit phys_addr_t but 32-bit dma_addr_t
324 * are certain non-highmem MIPS and x86
325 * configurations, but these configurations
326 * should never allocate physical pages above 32
327 * bits, so this is fine. Just in case, throw a
328 * warning and abort if we end up with an
329 * unrepresentable address.
331 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
332 free_pages_exact(queue, PAGE_ALIGN(size));
340 static void vring_free_queue(struct virtio_device *vdev, size_t size,
341 void *queue, dma_addr_t dma_handle,
342 struct device *dma_dev)
344 if (vring_use_dma_api(vdev))
345 dma_free_coherent(dma_dev, size, queue, dma_handle);
347 free_pages_exact(queue, PAGE_ALIGN(size));
351 * The DMA ops on various arches are rather gnarly right now, and
352 * making all of the arch DMA ops work on the vring device itself
355 static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
360 /* Map one sg entry. */
361 static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
362 enum dma_data_direction direction, dma_addr_t *addr)
365 *addr = sg_dma_address(sg);
369 if (!vq->use_dma_api) {
371 * If DMA is not used, KMSAN doesn't know that the scatterlist
372 * is initialized by the hardware. Explicitly check/unpoison it
373 * depending on the direction.
375 kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
376 *addr = (dma_addr_t)sg_phys(sg);
381 * We can't use dma_map_sg, because we don't use scatterlists in
382 * the way it expects (we don't guarantee that the scatterlist
383 * will exist for the lifetime of the mapping).
385 *addr = dma_map_page(vring_dma_dev(vq),
386 sg_page(sg), sg->offset, sg->length,
389 if (dma_mapping_error(vring_dma_dev(vq), *addr))
395 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
396 void *cpu_addr, size_t size,
397 enum dma_data_direction direction)
399 if (!vq->use_dma_api)
400 return (dma_addr_t)virt_to_phys(cpu_addr);
402 return dma_map_single(vring_dma_dev(vq),
403 cpu_addr, size, direction);
406 static int vring_mapping_error(const struct vring_virtqueue *vq,
409 if (!vq->use_dma_api)
412 return dma_mapping_error(vring_dma_dev(vq), addr);
415 static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
417 vq->vq.num_free = num;
420 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
422 vq->last_used_idx = 0;
424 vq->event_triggered = false;
429 vq->last_add_time_valid = false;
435 * Split ring specific functions - *_split().
438 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
439 const struct vring_desc *desc)
443 if (!vq->use_dma_api)
446 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
448 dma_unmap_page(vring_dma_dev(vq),
449 virtio64_to_cpu(vq->vq.vdev, desc->addr),
450 virtio32_to_cpu(vq->vq.vdev, desc->len),
451 (flags & VRING_DESC_F_WRITE) ?
452 DMA_FROM_DEVICE : DMA_TO_DEVICE);
455 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
458 struct vring_desc_extra *extra = vq->split.desc_extra;
461 if (!vq->use_dma_api)
464 flags = extra[i].flags;
466 if (flags & VRING_DESC_F_INDIRECT) {
467 dma_unmap_single(vring_dma_dev(vq),
470 (flags & VRING_DESC_F_WRITE) ?
471 DMA_FROM_DEVICE : DMA_TO_DEVICE);
473 dma_unmap_page(vring_dma_dev(vq),
476 (flags & VRING_DESC_F_WRITE) ?
477 DMA_FROM_DEVICE : DMA_TO_DEVICE);
481 return extra[i].next;
484 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
485 unsigned int total_sg,
488 struct vring_desc *desc;
492 * We require lowmem mappings for the descriptors because
493 * otherwise virt_to_phys will give us bogus addresses in the
496 gfp &= ~__GFP_HIGHMEM;
498 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
502 for (i = 0; i < total_sg; i++)
503 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
507 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
508 struct vring_desc *desc,
515 struct vring_virtqueue *vring = to_vvq(vq);
516 struct vring_desc_extra *extra = vring->split.desc_extra;
519 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
520 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
521 desc[i].len = cpu_to_virtio32(vq->vdev, len);
524 next = extra[i].next;
525 desc[i].next = cpu_to_virtio16(vq->vdev, next);
527 extra[i].addr = addr;
529 extra[i].flags = flags;
531 next = virtio16_to_cpu(vq->vdev, desc[i].next);
536 static inline int virtqueue_add_split(struct virtqueue *_vq,
537 struct scatterlist *sgs[],
538 unsigned int total_sg,
539 unsigned int out_sgs,
545 struct vring_virtqueue *vq = to_vvq(_vq);
546 struct scatterlist *sg;
547 struct vring_desc *desc;
548 unsigned int i, n, avail, descs_used, prev, err_idx;
554 BUG_ON(data == NULL);
555 BUG_ON(ctx && vq->indirect);
557 if (unlikely(vq->broken)) {
562 LAST_ADD_TIME_UPDATE(vq);
564 BUG_ON(total_sg == 0);
566 head = vq->free_head;
568 if (virtqueue_use_indirect(vq, total_sg))
569 desc = alloc_indirect_split(_vq, total_sg, gfp);
572 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
576 /* Use a single buffer which doesn't continue */
578 /* Set up rest to use this indirect table. */
583 desc = vq->split.vring.desc;
585 descs_used = total_sg;
588 if (unlikely(vq->vq.num_free < descs_used)) {
589 pr_debug("Can't add buf len %i - avail = %i\n",
590 descs_used, vq->vq.num_free);
591 /* FIXME: for historical reasons, we force a notify here if
592 * there are outgoing parts to the buffer. Presumably the
593 * host should service the ring ASAP. */
602 for (n = 0; n < out_sgs; n++) {
603 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
606 if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
610 /* Note that we trust indirect descriptor
611 * table since it use stream DMA mapping.
613 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
618 for (; n < (out_sgs + in_sgs); n++) {
619 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
622 if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
626 /* Note that we trust indirect descriptor
627 * table since it use stream DMA mapping.
629 i = virtqueue_add_desc_split(_vq, desc, i, addr,
636 /* Last one doesn't continue. */
637 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
638 if (!indirect && vq->use_dma_api)
639 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
643 /* Now that the indirect table is filled in, map it. */
644 dma_addr_t addr = vring_map_single(
645 vq, desc, total_sg * sizeof(struct vring_desc),
647 if (vring_mapping_error(vq, addr)) {
654 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
656 total_sg * sizeof(struct vring_desc),
657 VRING_DESC_F_INDIRECT,
661 /* We're using some buffers from the free list. */
662 vq->vq.num_free -= descs_used;
664 /* Update free pointer */
666 vq->free_head = vq->split.desc_extra[head].next;
670 /* Store token and indirect buffer state. */
671 vq->split.desc_state[head].data = data;
673 vq->split.desc_state[head].indir_desc = desc;
675 vq->split.desc_state[head].indir_desc = ctx;
677 /* Put entry in available array (but don't update avail->idx until they
679 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
680 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
682 /* Descriptors and available array need to be set before we expose the
683 * new available array entries. */
684 virtio_wmb(vq->weak_barriers);
685 vq->split.avail_idx_shadow++;
686 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
687 vq->split.avail_idx_shadow);
690 pr_debug("Added buffer head %i to %p\n", head, vq);
693 /* This is very unlikely, but theoretically possible. Kick
695 if (unlikely(vq->num_added == (1 << 16) - 1))
708 for (n = 0; n < total_sg; n++) {
712 vring_unmap_one_split_indirect(vq, &desc[i]);
713 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
715 i = vring_unmap_one_split(vq, i);
726 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
728 struct vring_virtqueue *vq = to_vvq(_vq);
733 /* We need to expose available array entries before checking avail
735 virtio_mb(vq->weak_barriers);
737 old = vq->split.avail_idx_shadow - vq->num_added;
738 new = vq->split.avail_idx_shadow;
741 LAST_ADD_TIME_CHECK(vq);
742 LAST_ADD_TIME_INVALID(vq);
745 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
746 vring_avail_event(&vq->split.vring)),
749 needs_kick = !(vq->split.vring.used->flags &
750 cpu_to_virtio16(_vq->vdev,
751 VRING_USED_F_NO_NOTIFY));
757 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
761 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
763 /* Clear data ptr. */
764 vq->split.desc_state[head].data = NULL;
766 /* Put back on free list: unmap first-level descriptors and find end */
769 while (vq->split.vring.desc[i].flags & nextflag) {
770 vring_unmap_one_split(vq, i);
771 i = vq->split.desc_extra[i].next;
775 vring_unmap_one_split(vq, i);
776 vq->split.desc_extra[i].next = vq->free_head;
777 vq->free_head = head;
779 /* Plus final descriptor */
783 struct vring_desc *indir_desc =
784 vq->split.desc_state[head].indir_desc;
787 /* Free the indirect table, if any, now that it's unmapped. */
791 len = vq->split.desc_extra[head].len;
793 BUG_ON(!(vq->split.desc_extra[head].flags &
794 VRING_DESC_F_INDIRECT));
795 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
797 if (vq->use_dma_api) {
798 for (j = 0; j < len / sizeof(struct vring_desc); j++)
799 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
803 vq->split.desc_state[head].indir_desc = NULL;
805 *ctx = vq->split.desc_state[head].indir_desc;
809 static bool more_used_split(const struct vring_virtqueue *vq)
811 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
812 vq->split.vring.used->idx);
815 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
819 struct vring_virtqueue *vq = to_vvq(_vq);
826 if (unlikely(vq->broken)) {
831 if (!more_used_split(vq)) {
832 pr_debug("No more buffers in queue\n");
837 /* Only get used array entries after they have been exposed by host. */
838 virtio_rmb(vq->weak_barriers);
840 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
841 i = virtio32_to_cpu(_vq->vdev,
842 vq->split.vring.used->ring[last_used].id);
843 *len = virtio32_to_cpu(_vq->vdev,
844 vq->split.vring.used->ring[last_used].len);
846 if (unlikely(i >= vq->split.vring.num)) {
847 BAD_RING(vq, "id %u out of range\n", i);
850 if (unlikely(!vq->split.desc_state[i].data)) {
851 BAD_RING(vq, "id %u is not a head!\n", i);
855 /* detach_buf_split clears data, so grab it now. */
856 ret = vq->split.desc_state[i].data;
857 detach_buf_split(vq, i, ctx);
859 /* If we expect an interrupt for the next entry, tell host
860 * by writing event index and flush out the write before
861 * the read in the next get_buf call. */
862 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
863 virtio_store_mb(vq->weak_barriers,
864 &vring_used_event(&vq->split.vring),
865 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
867 LAST_ADD_TIME_INVALID(vq);
873 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
875 struct vring_virtqueue *vq = to_vvq(_vq);
877 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
878 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
881 * If device triggered an event already it won't trigger one again:
882 * no need to disable.
884 if (vq->event_triggered)
888 /* TODO: this is a hack. Figure out a cleaner value to write. */
889 vring_used_event(&vq->split.vring) = 0x0;
891 vq->split.vring.avail->flags =
892 cpu_to_virtio16(_vq->vdev,
893 vq->split.avail_flags_shadow);
897 static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
899 struct vring_virtqueue *vq = to_vvq(_vq);
904 /* We optimistically turn back on interrupts, then check if there was
906 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
907 * either clear the flags bit or point the event index at the next
908 * entry. Always do both to keep code simple. */
909 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
910 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
912 vq->split.vring.avail->flags =
913 cpu_to_virtio16(_vq->vdev,
914 vq->split.avail_flags_shadow);
916 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
917 last_used_idx = vq->last_used_idx);
919 return last_used_idx;
922 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
924 struct vring_virtqueue *vq = to_vvq(_vq);
926 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
927 vq->split.vring.used->idx);
930 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
932 struct vring_virtqueue *vq = to_vvq(_vq);
937 /* We optimistically turn back on interrupts, then check if there was
939 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
940 * either clear the flags bit or point the event index at the next
941 * entry. Always update the event index to keep code simple. */
942 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
943 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
945 vq->split.vring.avail->flags =
946 cpu_to_virtio16(_vq->vdev,
947 vq->split.avail_flags_shadow);
949 /* TODO: tune this threshold */
950 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
952 virtio_store_mb(vq->weak_barriers,
953 &vring_used_event(&vq->split.vring),
954 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
956 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
957 - vq->last_used_idx) > bufs)) {
966 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
968 struct vring_virtqueue *vq = to_vvq(_vq);
974 for (i = 0; i < vq->split.vring.num; i++) {
975 if (!vq->split.desc_state[i].data)
977 /* detach_buf_split clears data, so grab it now. */
978 buf = vq->split.desc_state[i].data;
979 detach_buf_split(vq, i, NULL);
980 vq->split.avail_idx_shadow--;
981 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
982 vq->split.avail_idx_shadow);
986 /* That should have freed everything. */
987 BUG_ON(vq->vq.num_free != vq->split.vring.num);
993 static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
994 struct vring_virtqueue *vq)
996 struct virtio_device *vdev;
1000 vring_split->avail_flags_shadow = 0;
1001 vring_split->avail_idx_shadow = 0;
1003 /* No callback? Tell other side not to bother us. */
1004 if (!vq->vq.callback) {
1005 vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1007 vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
1008 vring_split->avail_flags_shadow);
1012 static void virtqueue_reinit_split(struct vring_virtqueue *vq)
1016 num = vq->split.vring.num;
1018 vq->split.vring.avail->flags = 0;
1019 vq->split.vring.avail->idx = 0;
1021 /* reset avail event */
1022 vq->split.vring.avail->ring[num] = 0;
1024 vq->split.vring.used->flags = 0;
1025 vq->split.vring.used->idx = 0;
1027 /* reset used event */
1028 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
1030 virtqueue_init(vq, num);
1032 virtqueue_vring_init_split(&vq->split, vq);
1035 static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
1036 struct vring_virtqueue_split *vring_split)
1038 vq->split = *vring_split;
1040 /* Put everything in free lists. */
1044 static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1046 struct vring_desc_state_split *state;
1047 struct vring_desc_extra *extra;
1048 u32 num = vring_split->vring.num;
1050 state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1054 extra = vring_alloc_desc_extra(num);
1058 memset(state, 0, num * sizeof(struct vring_desc_state_split));
1060 vring_split->desc_state = state;
1061 vring_split->desc_extra = extra;
1070 static void vring_free_split(struct vring_virtqueue_split *vring_split,
1071 struct virtio_device *vdev, struct device *dma_dev)
1073 vring_free_queue(vdev, vring_split->queue_size_in_bytes,
1074 vring_split->vring.desc,
1075 vring_split->queue_dma_addr,
1078 kfree(vring_split->desc_state);
1079 kfree(vring_split->desc_extra);
1082 static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1083 struct virtio_device *vdev,
1085 unsigned int vring_align,
1086 bool may_reduce_num,
1087 struct device *dma_dev)
1090 dma_addr_t dma_addr;
1092 /* We assume num is a power of 2. */
1093 if (!is_power_of_2(num)) {
1094 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1098 /* TODO: allocate each queue chunk individually */
1099 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1100 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1102 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1106 if (!may_reduce_num)
1114 /* Try to get a single page. You are my only hope! */
1115 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1116 &dma_addr, GFP_KERNEL | __GFP_ZERO,
1122 vring_init(&vring_split->vring, num, queue, vring_align);
1124 vring_split->queue_dma_addr = dma_addr;
1125 vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1127 vring_split->vring_align = vring_align;
1128 vring_split->may_reduce_num = may_reduce_num;
1133 static struct virtqueue *vring_create_virtqueue_split(
1136 unsigned int vring_align,
1137 struct virtio_device *vdev,
1139 bool may_reduce_num,
1141 bool (*notify)(struct virtqueue *),
1142 void (*callback)(struct virtqueue *),
1144 struct device *dma_dev)
1146 struct vring_virtqueue_split vring_split = {};
1147 struct virtqueue *vq;
1150 err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1151 may_reduce_num, dma_dev);
1155 vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1156 context, notify, callback, name, dma_dev);
1158 vring_free_split(&vring_split, vdev, dma_dev);
1162 to_vvq(vq)->we_own_ring = true;
1167 static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
1169 struct vring_virtqueue_split vring_split = {};
1170 struct vring_virtqueue *vq = to_vvq(_vq);
1171 struct virtio_device *vdev = _vq->vdev;
1174 err = vring_alloc_queue_split(&vring_split, vdev, num,
1175 vq->split.vring_align,
1176 vq->split.may_reduce_num,
1181 err = vring_alloc_state_extra_split(&vring_split);
1183 goto err_state_extra;
1185 vring_free(&vq->vq);
1187 virtqueue_vring_init_split(&vring_split, vq);
1189 virtqueue_init(vq, vring_split.vring.num);
1190 virtqueue_vring_attach_split(vq, &vring_split);
1195 vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
1197 virtqueue_reinit_split(vq);
1203 * Packed ring specific functions - *_packed().
1205 static bool packed_used_wrap_counter(u16 last_used_idx)
1207 return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1210 static u16 packed_last_used(u16 last_used_idx)
1212 return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1215 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1216 const struct vring_desc_extra *extra)
1220 if (!vq->use_dma_api)
1223 flags = extra->flags;
1225 if (flags & VRING_DESC_F_INDIRECT) {
1226 dma_unmap_single(vring_dma_dev(vq),
1227 extra->addr, extra->len,
1228 (flags & VRING_DESC_F_WRITE) ?
1229 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1231 dma_unmap_page(vring_dma_dev(vq),
1232 extra->addr, extra->len,
1233 (flags & VRING_DESC_F_WRITE) ?
1234 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1238 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1239 const struct vring_packed_desc *desc)
1243 if (!vq->use_dma_api)
1246 flags = le16_to_cpu(desc->flags);
1248 dma_unmap_page(vring_dma_dev(vq),
1249 le64_to_cpu(desc->addr),
1250 le32_to_cpu(desc->len),
1251 (flags & VRING_DESC_F_WRITE) ?
1252 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1255 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1258 struct vring_packed_desc *desc;
1261 * We require lowmem mappings for the descriptors because
1262 * otherwise virt_to_phys will give us bogus addresses in the
1265 gfp &= ~__GFP_HIGHMEM;
1267 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1272 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1273 struct scatterlist *sgs[],
1274 unsigned int total_sg,
1275 unsigned int out_sgs,
1276 unsigned int in_sgs,
1280 struct vring_packed_desc *desc;
1281 struct scatterlist *sg;
1282 unsigned int i, n, err_idx;
1286 head = vq->packed.next_avail_idx;
1287 desc = alloc_indirect_packed(total_sg, gfp);
1291 if (unlikely(vq->vq.num_free < 1)) {
1292 pr_debug("Can't add buf len 1 - avail = 0\n");
1300 BUG_ON(id == vq->packed.vring.num);
1302 for (n = 0; n < out_sgs + in_sgs; n++) {
1303 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1304 if (vring_map_one_sg(vq, sg, n < out_sgs ?
1305 DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
1308 desc[i].flags = cpu_to_le16(n < out_sgs ?
1309 0 : VRING_DESC_F_WRITE);
1310 desc[i].addr = cpu_to_le64(addr);
1311 desc[i].len = cpu_to_le32(sg->length);
1316 /* Now that the indirect table is filled in, map it. */
1317 addr = vring_map_single(vq, desc,
1318 total_sg * sizeof(struct vring_packed_desc),
1320 if (vring_mapping_error(vq, addr)) {
1327 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1328 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1329 sizeof(struct vring_packed_desc));
1330 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1332 if (vq->use_dma_api) {
1333 vq->packed.desc_extra[id].addr = addr;
1334 vq->packed.desc_extra[id].len = total_sg *
1335 sizeof(struct vring_packed_desc);
1336 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1337 vq->packed.avail_used_flags;
1341 * A driver MUST NOT make the first descriptor in the list
1342 * available before all subsequent descriptors comprising
1343 * the list are made available.
1345 virtio_wmb(vq->weak_barriers);
1346 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1347 vq->packed.avail_used_flags);
1349 /* We're using some buffers from the free list. */
1350 vq->vq.num_free -= 1;
1352 /* Update free pointer */
1354 if (n >= vq->packed.vring.num) {
1356 vq->packed.avail_wrap_counter ^= 1;
1357 vq->packed.avail_used_flags ^=
1358 1 << VRING_PACKED_DESC_F_AVAIL |
1359 1 << VRING_PACKED_DESC_F_USED;
1361 vq->packed.next_avail_idx = n;
1362 vq->free_head = vq->packed.desc_extra[id].next;
1364 /* Store token and indirect buffer state. */
1365 vq->packed.desc_state[id].num = 1;
1366 vq->packed.desc_state[id].data = data;
1367 vq->packed.desc_state[id].indir_desc = desc;
1368 vq->packed.desc_state[id].last = id;
1372 pr_debug("Added buffer head %i to %p\n", head, vq);
1380 for (i = 0; i < err_idx; i++)
1381 vring_unmap_desc_packed(vq, &desc[i]);
1390 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1391 struct scatterlist *sgs[],
1392 unsigned int total_sg,
1393 unsigned int out_sgs,
1394 unsigned int in_sgs,
1399 struct vring_virtqueue *vq = to_vvq(_vq);
1400 struct vring_packed_desc *desc;
1401 struct scatterlist *sg;
1402 unsigned int i, n, c, descs_used, err_idx;
1403 __le16 head_flags, flags;
1404 u16 head, id, prev, curr, avail_used_flags;
1409 BUG_ON(data == NULL);
1410 BUG_ON(ctx && vq->indirect);
1412 if (unlikely(vq->broken)) {
1417 LAST_ADD_TIME_UPDATE(vq);
1419 BUG_ON(total_sg == 0);
1421 if (virtqueue_use_indirect(vq, total_sg)) {
1422 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1424 if (err != -ENOMEM) {
1429 /* fall back on direct */
1432 head = vq->packed.next_avail_idx;
1433 avail_used_flags = vq->packed.avail_used_flags;
1435 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1437 desc = vq->packed.vring.desc;
1439 descs_used = total_sg;
1441 if (unlikely(vq->vq.num_free < descs_used)) {
1442 pr_debug("Can't add buf len %i - avail = %i\n",
1443 descs_used, vq->vq.num_free);
1449 BUG_ON(id == vq->packed.vring.num);
1453 for (n = 0; n < out_sgs + in_sgs; n++) {
1454 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1457 if (vring_map_one_sg(vq, sg, n < out_sgs ?
1458 DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
1461 flags = cpu_to_le16(vq->packed.avail_used_flags |
1462 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1463 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1467 desc[i].flags = flags;
1469 desc[i].addr = cpu_to_le64(addr);
1470 desc[i].len = cpu_to_le32(sg->length);
1471 desc[i].id = cpu_to_le16(id);
1473 if (unlikely(vq->use_dma_api)) {
1474 vq->packed.desc_extra[curr].addr = addr;
1475 vq->packed.desc_extra[curr].len = sg->length;
1476 vq->packed.desc_extra[curr].flags =
1480 curr = vq->packed.desc_extra[curr].next;
1482 if ((unlikely(++i >= vq->packed.vring.num))) {
1484 vq->packed.avail_used_flags ^=
1485 1 << VRING_PACKED_DESC_F_AVAIL |
1486 1 << VRING_PACKED_DESC_F_USED;
1492 vq->packed.avail_wrap_counter ^= 1;
1494 /* We're using some buffers from the free list. */
1495 vq->vq.num_free -= descs_used;
1497 /* Update free pointer */
1498 vq->packed.next_avail_idx = i;
1499 vq->free_head = curr;
1502 vq->packed.desc_state[id].num = descs_used;
1503 vq->packed.desc_state[id].data = data;
1504 vq->packed.desc_state[id].indir_desc = ctx;
1505 vq->packed.desc_state[id].last = prev;
1508 * A driver MUST NOT make the first descriptor in the list
1509 * available before all subsequent descriptors comprising
1510 * the list are made available.
1512 virtio_wmb(vq->weak_barriers);
1513 vq->packed.vring.desc[head].flags = head_flags;
1514 vq->num_added += descs_used;
1516 pr_debug("Added buffer head %i to %p\n", head, vq);
1524 curr = vq->free_head;
1526 vq->packed.avail_used_flags = avail_used_flags;
1528 for (n = 0; n < total_sg; n++) {
1531 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
1532 curr = vq->packed.desc_extra[curr].next;
1534 if (i >= vq->packed.vring.num)
1542 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1544 struct vring_virtqueue *vq = to_vvq(_vq);
1545 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1558 * We need to expose the new flags value before checking notification
1561 virtio_mb(vq->weak_barriers);
1563 old = vq->packed.next_avail_idx - vq->num_added;
1564 new = vq->packed.next_avail_idx;
1567 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1568 flags = le16_to_cpu(snapshot.flags);
1570 LAST_ADD_TIME_CHECK(vq);
1571 LAST_ADD_TIME_INVALID(vq);
1573 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1574 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1578 off_wrap = le16_to_cpu(snapshot.off_wrap);
1580 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1581 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1582 if (wrap_counter != vq->packed.avail_wrap_counter)
1583 event_idx -= vq->packed.vring.num;
1585 needs_kick = vring_need_event(event_idx, new, old);
1591 static void detach_buf_packed(struct vring_virtqueue *vq,
1592 unsigned int id, void **ctx)
1594 struct vring_desc_state_packed *state = NULL;
1595 struct vring_packed_desc *desc;
1596 unsigned int i, curr;
1598 state = &vq->packed.desc_state[id];
1600 /* Clear data ptr. */
1603 vq->packed.desc_extra[state->last].next = vq->free_head;
1605 vq->vq.num_free += state->num;
1607 if (unlikely(vq->use_dma_api)) {
1609 for (i = 0; i < state->num; i++) {
1610 vring_unmap_extra_packed(vq,
1611 &vq->packed.desc_extra[curr]);
1612 curr = vq->packed.desc_extra[curr].next;
1619 /* Free the indirect table, if any, now that it's unmapped. */
1620 desc = state->indir_desc;
1624 if (vq->use_dma_api) {
1625 len = vq->packed.desc_extra[id].len;
1626 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1628 vring_unmap_desc_packed(vq, &desc[i]);
1631 state->indir_desc = NULL;
1633 *ctx = state->indir_desc;
1637 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1638 u16 idx, bool used_wrap_counter)
1643 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1644 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1645 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1647 return avail == used && used == used_wrap_counter;
1650 static bool more_used_packed(const struct vring_virtqueue *vq)
1654 bool used_wrap_counter;
1656 last_used_idx = READ_ONCE(vq->last_used_idx);
1657 last_used = packed_last_used(last_used_idx);
1658 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1659 return is_used_desc_packed(vq, last_used, used_wrap_counter);
1662 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1666 struct vring_virtqueue *vq = to_vvq(_vq);
1667 u16 last_used, id, last_used_idx;
1668 bool used_wrap_counter;
1673 if (unlikely(vq->broken)) {
1678 if (!more_used_packed(vq)) {
1679 pr_debug("No more buffers in queue\n");
1684 /* Only get used elements after they have been exposed by host. */
1685 virtio_rmb(vq->weak_barriers);
1687 last_used_idx = READ_ONCE(vq->last_used_idx);
1688 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1689 last_used = packed_last_used(last_used_idx);
1690 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1691 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1693 if (unlikely(id >= vq->packed.vring.num)) {
1694 BAD_RING(vq, "id %u out of range\n", id);
1697 if (unlikely(!vq->packed.desc_state[id].data)) {
1698 BAD_RING(vq, "id %u is not a head!\n", id);
1702 /* detach_buf_packed clears data, so grab it now. */
1703 ret = vq->packed.desc_state[id].data;
1704 detach_buf_packed(vq, id, ctx);
1706 last_used += vq->packed.desc_state[id].num;
1707 if (unlikely(last_used >= vq->packed.vring.num)) {
1708 last_used -= vq->packed.vring.num;
1709 used_wrap_counter ^= 1;
1712 last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1713 WRITE_ONCE(vq->last_used_idx, last_used);
1716 * If we expect an interrupt for the next entry, tell host
1717 * by writing event index and flush out the write before
1718 * the read in the next get_buf call.
1720 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1721 virtio_store_mb(vq->weak_barriers,
1722 &vq->packed.vring.driver->off_wrap,
1723 cpu_to_le16(vq->last_used_idx));
1725 LAST_ADD_TIME_INVALID(vq);
1731 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1733 struct vring_virtqueue *vq = to_vvq(_vq);
1735 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1736 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1739 * If device triggered an event already it won't trigger one again:
1740 * no need to disable.
1742 if (vq->event_triggered)
1745 vq->packed.vring.driver->flags =
1746 cpu_to_le16(vq->packed.event_flags_shadow);
1750 static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1752 struct vring_virtqueue *vq = to_vvq(_vq);
1757 * We optimistically turn back on interrupts, then check if there was
1762 vq->packed.vring.driver->off_wrap =
1763 cpu_to_le16(vq->last_used_idx);
1765 * We need to update event offset and event wrap
1766 * counter first before updating event flags.
1768 virtio_wmb(vq->weak_barriers);
1771 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1772 vq->packed.event_flags_shadow = vq->event ?
1773 VRING_PACKED_EVENT_FLAG_DESC :
1774 VRING_PACKED_EVENT_FLAG_ENABLE;
1775 vq->packed.vring.driver->flags =
1776 cpu_to_le16(vq->packed.event_flags_shadow);
1780 return vq->last_used_idx;
1783 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1785 struct vring_virtqueue *vq = to_vvq(_vq);
1789 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1790 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1792 return is_used_desc_packed(vq, used_idx, wrap_counter);
1795 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1797 struct vring_virtqueue *vq = to_vvq(_vq);
1798 u16 used_idx, wrap_counter, last_used_idx;
1804 * We optimistically turn back on interrupts, then check if there was
1809 /* TODO: tune this threshold */
1810 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1811 last_used_idx = READ_ONCE(vq->last_used_idx);
1812 wrap_counter = packed_used_wrap_counter(last_used_idx);
1814 used_idx = packed_last_used(last_used_idx) + bufs;
1815 if (used_idx >= vq->packed.vring.num) {
1816 used_idx -= vq->packed.vring.num;
1820 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1821 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1824 * We need to update event offset and event wrap
1825 * counter first before updating event flags.
1827 virtio_wmb(vq->weak_barriers);
1830 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1831 vq->packed.event_flags_shadow = vq->event ?
1832 VRING_PACKED_EVENT_FLAG_DESC :
1833 VRING_PACKED_EVENT_FLAG_ENABLE;
1834 vq->packed.vring.driver->flags =
1835 cpu_to_le16(vq->packed.event_flags_shadow);
1839 * We need to update event suppression structure first
1840 * before re-checking for more used buffers.
1842 virtio_mb(vq->weak_barriers);
1844 last_used_idx = READ_ONCE(vq->last_used_idx);
1845 wrap_counter = packed_used_wrap_counter(last_used_idx);
1846 used_idx = packed_last_used(last_used_idx);
1847 if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
1856 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1858 struct vring_virtqueue *vq = to_vvq(_vq);
1864 for (i = 0; i < vq->packed.vring.num; i++) {
1865 if (!vq->packed.desc_state[i].data)
1867 /* detach_buf clears data, so grab it now. */
1868 buf = vq->packed.desc_state[i].data;
1869 detach_buf_packed(vq, i, NULL);
1873 /* That should have freed everything. */
1874 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1880 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
1882 struct vring_desc_extra *desc_extra;
1885 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1890 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1892 for (i = 0; i < num - 1; i++)
1893 desc_extra[i].next = i + 1;
1898 static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
1899 struct virtio_device *vdev,
1900 struct device *dma_dev)
1902 if (vring_packed->vring.desc)
1903 vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
1904 vring_packed->vring.desc,
1905 vring_packed->ring_dma_addr,
1908 if (vring_packed->vring.driver)
1909 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1910 vring_packed->vring.driver,
1911 vring_packed->driver_event_dma_addr,
1914 if (vring_packed->vring.device)
1915 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1916 vring_packed->vring.device,
1917 vring_packed->device_event_dma_addr,
1920 kfree(vring_packed->desc_state);
1921 kfree(vring_packed->desc_extra);
1924 static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
1925 struct virtio_device *vdev,
1926 u32 num, struct device *dma_dev)
1928 struct vring_packed_desc *ring;
1929 struct vring_packed_desc_event *driver, *device;
1930 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1931 size_t ring_size_in_bytes, event_size_in_bytes;
1933 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1935 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1937 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1942 vring_packed->vring.desc = ring;
1943 vring_packed->ring_dma_addr = ring_dma_addr;
1944 vring_packed->ring_size_in_bytes = ring_size_in_bytes;
1946 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1948 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1949 &driver_event_dma_addr,
1950 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1955 vring_packed->vring.driver = driver;
1956 vring_packed->event_size_in_bytes = event_size_in_bytes;
1957 vring_packed->driver_event_dma_addr = driver_event_dma_addr;
1959 device = vring_alloc_queue(vdev, event_size_in_bytes,
1960 &device_event_dma_addr,
1961 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1966 vring_packed->vring.device = device;
1967 vring_packed->device_event_dma_addr = device_event_dma_addr;
1969 vring_packed->vring.num = num;
1974 vring_free_packed(vring_packed, vdev, dma_dev);
1978 static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
1980 struct vring_desc_state_packed *state;
1981 struct vring_desc_extra *extra;
1982 u32 num = vring_packed->vring.num;
1984 state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
1986 goto err_desc_state;
1988 memset(state, 0, num * sizeof(struct vring_desc_state_packed));
1990 extra = vring_alloc_desc_extra(num);
1992 goto err_desc_extra;
1994 vring_packed->desc_state = state;
1995 vring_packed->desc_extra = extra;
2005 static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
2008 vring_packed->next_avail_idx = 0;
2009 vring_packed->avail_wrap_counter = 1;
2010 vring_packed->event_flags_shadow = 0;
2011 vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
2013 /* No callback? Tell other side not to bother us. */
2015 vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
2016 vring_packed->vring.driver->flags =
2017 cpu_to_le16(vring_packed->event_flags_shadow);
2021 static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
2022 struct vring_virtqueue_packed *vring_packed)
2024 vq->packed = *vring_packed;
2026 /* Put everything in free lists. */
2030 static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
2032 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
2033 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
2035 /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
2036 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
2038 virtqueue_init(vq, vq->packed.vring.num);
2039 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
2042 static struct virtqueue *vring_create_virtqueue_packed(
2045 unsigned int vring_align,
2046 struct virtio_device *vdev,
2048 bool may_reduce_num,
2050 bool (*notify)(struct virtqueue *),
2051 void (*callback)(struct virtqueue *),
2053 struct device *dma_dev)
2055 struct vring_virtqueue_packed vring_packed = {};
2056 struct vring_virtqueue *vq;
2059 if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2062 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2066 vq->vq.callback = callback;
2069 vq->vq.index = index;
2070 vq->vq.reset = false;
2071 vq->we_own_ring = true;
2072 vq->notify = notify;
2073 vq->weak_barriers = weak_barriers;
2074 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2079 vq->packed_ring = true;
2080 vq->dma_dev = dma_dev;
2081 vq->use_dma_api = vring_use_dma_api(vdev);
2082 vq->premapped = false;
2084 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2086 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2088 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2089 vq->weak_barriers = false;
2091 err = vring_alloc_state_extra_packed(&vring_packed);
2093 goto err_state_extra;
2095 virtqueue_vring_init_packed(&vring_packed, !!callback);
2097 virtqueue_init(vq, num);
2098 virtqueue_vring_attach_packed(vq, &vring_packed);
2100 spin_lock(&vdev->vqs_list_lock);
2101 list_add_tail(&vq->vq.list, &vdev->vqs);
2102 spin_unlock(&vdev->vqs_list_lock);
2108 vring_free_packed(&vring_packed, vdev, dma_dev);
2113 static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
2115 struct vring_virtqueue_packed vring_packed = {};
2116 struct vring_virtqueue *vq = to_vvq(_vq);
2117 struct virtio_device *vdev = _vq->vdev;
2120 if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2123 err = vring_alloc_state_extra_packed(&vring_packed);
2125 goto err_state_extra;
2127 vring_free(&vq->vq);
2129 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
2131 virtqueue_init(vq, vring_packed.vring.num);
2132 virtqueue_vring_attach_packed(vq, &vring_packed);
2137 vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2139 virtqueue_reinit_packed(vq);
2145 * Generic functions and exported symbols.
2148 static inline int virtqueue_add(struct virtqueue *_vq,
2149 struct scatterlist *sgs[],
2150 unsigned int total_sg,
2151 unsigned int out_sgs,
2152 unsigned int in_sgs,
2157 struct vring_virtqueue *vq = to_vvq(_vq);
2159 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
2160 out_sgs, in_sgs, data, ctx, gfp) :
2161 virtqueue_add_split(_vq, sgs, total_sg,
2162 out_sgs, in_sgs, data, ctx, gfp);
2166 * virtqueue_add_sgs - expose buffers to other end
2167 * @_vq: the struct virtqueue we're talking about.
2168 * @sgs: array of terminated scatterlists.
2169 * @out_sgs: the number of scatterlists readable by other side
2170 * @in_sgs: the number of scatterlists which are writable (after readable ones)
2171 * @data: the token identifying the buffer.
2172 * @gfp: how to do memory allocations (if necessary).
2174 * Caller must ensure we don't call this with other virtqueue operations
2175 * at the same time (except where noted).
2177 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2179 int virtqueue_add_sgs(struct virtqueue *_vq,
2180 struct scatterlist *sgs[],
2181 unsigned int out_sgs,
2182 unsigned int in_sgs,
2186 unsigned int i, total_sg = 0;
2188 /* Count them first. */
2189 for (i = 0; i < out_sgs + in_sgs; i++) {
2190 struct scatterlist *sg;
2192 for (sg = sgs[i]; sg; sg = sg_next(sg))
2195 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2198 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
2201 * virtqueue_add_outbuf - expose output buffers to other end
2202 * @vq: the struct virtqueue we're talking about.
2203 * @sg: scatterlist (must be well-formed and terminated!)
2204 * @num: the number of entries in @sg readable by other side
2205 * @data: the token identifying the buffer.
2206 * @gfp: how to do memory allocations (if necessary).
2208 * Caller must ensure we don't call this with other virtqueue operations
2209 * at the same time (except where noted).
2211 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2213 int virtqueue_add_outbuf(struct virtqueue *vq,
2214 struct scatterlist *sg, unsigned int num,
2218 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2220 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2223 * virtqueue_add_inbuf - expose input buffers to other end
2224 * @vq: the struct virtqueue we're talking about.
2225 * @sg: scatterlist (must be well-formed and terminated!)
2226 * @num: the number of entries in @sg writable by other side
2227 * @data: the token identifying the buffer.
2228 * @gfp: how to do memory allocations (if necessary).
2230 * Caller must ensure we don't call this with other virtqueue operations
2231 * at the same time (except where noted).
2233 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2235 int virtqueue_add_inbuf(struct virtqueue *vq,
2236 struct scatterlist *sg, unsigned int num,
2240 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2242 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2245 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2246 * @vq: the struct virtqueue we're talking about.
2247 * @sg: scatterlist (must be well-formed and terminated!)
2248 * @num: the number of entries in @sg writable by other side
2249 * @data: the token identifying the buffer.
2250 * @ctx: extra context for the token
2251 * @gfp: how to do memory allocations (if necessary).
2253 * Caller must ensure we don't call this with other virtqueue operations
2254 * at the same time (except where noted).
2256 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2258 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2259 struct scatterlist *sg, unsigned int num,
2264 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2266 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2269 * virtqueue_dma_dev - get the dma dev
2270 * @_vq: the struct virtqueue we're talking about.
2272 * Returns the dma dev. That can been used for dma api.
2274 struct device *virtqueue_dma_dev(struct virtqueue *_vq)
2276 struct vring_virtqueue *vq = to_vvq(_vq);
2278 if (vq->use_dma_api)
2279 return vring_dma_dev(vq);
2283 EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
2286 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2287 * @_vq: the struct virtqueue
2289 * Instead of virtqueue_kick(), you can do:
2290 * if (virtqueue_kick_prepare(vq))
2291 * virtqueue_notify(vq);
2293 * This is sometimes useful because the virtqueue_kick_prepare() needs
2294 * to be serialized, but the actual virtqueue_notify() call does not.
2296 bool virtqueue_kick_prepare(struct virtqueue *_vq)
2298 struct vring_virtqueue *vq = to_vvq(_vq);
2300 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
2301 virtqueue_kick_prepare_split(_vq);
2303 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2306 * virtqueue_notify - second half of split virtqueue_kick call.
2307 * @_vq: the struct virtqueue
2309 * This does not need to be serialized.
2311 * Returns false if host notify failed or queue is broken, otherwise true.
2313 bool virtqueue_notify(struct virtqueue *_vq)
2315 struct vring_virtqueue *vq = to_vvq(_vq);
2317 if (unlikely(vq->broken))
2320 /* Prod other side to tell it about changes. */
2321 if (!vq->notify(_vq)) {
2327 EXPORT_SYMBOL_GPL(virtqueue_notify);
2330 * virtqueue_kick - update after add_buf
2331 * @vq: the struct virtqueue
2333 * After one or more virtqueue_add_* calls, invoke this to kick
2336 * Caller must ensure we don't call this with other virtqueue
2337 * operations at the same time (except where noted).
2339 * Returns false if kick failed, otherwise true.
2341 bool virtqueue_kick(struct virtqueue *vq)
2343 if (virtqueue_kick_prepare(vq))
2344 return virtqueue_notify(vq);
2347 EXPORT_SYMBOL_GPL(virtqueue_kick);
2350 * virtqueue_get_buf_ctx - get the next used buffer
2351 * @_vq: the struct virtqueue we're talking about.
2352 * @len: the length written into the buffer
2353 * @ctx: extra context for the token
2355 * If the device wrote data into the buffer, @len will be set to the
2356 * amount written. This means you don't need to clear the buffer
2357 * beforehand to ensure there's no data leakage in the case of short
2360 * Caller must ensure we don't call this with other virtqueue
2361 * operations at the same time (except where noted).
2363 * Returns NULL if there are no used buffers, or the "data" token
2364 * handed to virtqueue_add_*().
2366 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2369 struct vring_virtqueue *vq = to_vvq(_vq);
2371 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2372 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2374 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2376 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2378 return virtqueue_get_buf_ctx(_vq, len, NULL);
2380 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2382 * virtqueue_disable_cb - disable callbacks
2383 * @_vq: the struct virtqueue we're talking about.
2385 * Note that this is not necessarily synchronous, hence unreliable and only
2386 * useful as an optimization.
2388 * Unlike other operations, this need not be serialized.
2390 void virtqueue_disable_cb(struct virtqueue *_vq)
2392 struct vring_virtqueue *vq = to_vvq(_vq);
2394 if (vq->packed_ring)
2395 virtqueue_disable_cb_packed(_vq);
2397 virtqueue_disable_cb_split(_vq);
2399 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2402 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2403 * @_vq: the struct virtqueue we're talking about.
2405 * This re-enables callbacks; it returns current queue state
2406 * in an opaque unsigned value. This value should be later tested by
2407 * virtqueue_poll, to detect a possible race between the driver checking for
2408 * more work, and enabling callbacks.
2410 * Caller must ensure we don't call this with other virtqueue
2411 * operations at the same time (except where noted).
2413 unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2415 struct vring_virtqueue *vq = to_vvq(_vq);
2417 if (vq->event_triggered)
2418 vq->event_triggered = false;
2420 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2421 virtqueue_enable_cb_prepare_split(_vq);
2423 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2426 * virtqueue_poll - query pending used buffers
2427 * @_vq: the struct virtqueue we're talking about.
2428 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2430 * Returns "true" if there are pending used buffers in the queue.
2432 * This does not need to be serialized.
2434 bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2436 struct vring_virtqueue *vq = to_vvq(_vq);
2438 if (unlikely(vq->broken))
2441 virtio_mb(vq->weak_barriers);
2442 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2443 virtqueue_poll_split(_vq, last_used_idx);
2445 EXPORT_SYMBOL_GPL(virtqueue_poll);
2448 * virtqueue_enable_cb - restart callbacks after disable_cb.
2449 * @_vq: the struct virtqueue we're talking about.
2451 * This re-enables callbacks; it returns "false" if there are pending
2452 * buffers in the queue, to detect a possible race between the driver
2453 * checking for more work, and enabling callbacks.
2455 * Caller must ensure we don't call this with other virtqueue
2456 * operations at the same time (except where noted).
2458 bool virtqueue_enable_cb(struct virtqueue *_vq)
2460 unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2462 return !virtqueue_poll(_vq, last_used_idx);
2464 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2467 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2468 * @_vq: the struct virtqueue we're talking about.
2470 * This re-enables callbacks but hints to the other side to delay
2471 * interrupts until most of the available buffers have been processed;
2472 * it returns "false" if there are many pending buffers in the queue,
2473 * to detect a possible race between the driver checking for more work,
2474 * and enabling callbacks.
2476 * Caller must ensure we don't call this with other virtqueue
2477 * operations at the same time (except where noted).
2479 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2481 struct vring_virtqueue *vq = to_vvq(_vq);
2483 if (vq->event_triggered)
2484 vq->event_triggered = false;
2486 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2487 virtqueue_enable_cb_delayed_split(_vq);
2489 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2492 * virtqueue_detach_unused_buf - detach first unused buffer
2493 * @_vq: the struct virtqueue we're talking about.
2495 * Returns NULL or the "data" token handed to virtqueue_add_*().
2496 * This is not valid on an active queue; it is useful for device
2497 * shutdown or the reset queue.
2499 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2501 struct vring_virtqueue *vq = to_vvq(_vq);
2503 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2504 virtqueue_detach_unused_buf_split(_vq);
2506 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2508 static inline bool more_used(const struct vring_virtqueue *vq)
2510 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2514 * vring_interrupt - notify a virtqueue on an interrupt
2515 * @irq: the IRQ number (ignored)
2516 * @_vq: the struct virtqueue to notify
2518 * Calls the callback function of @_vq to process the virtqueue
2521 irqreturn_t vring_interrupt(int irq, void *_vq)
2523 struct vring_virtqueue *vq = to_vvq(_vq);
2525 if (!more_used(vq)) {
2526 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2530 if (unlikely(vq->broken)) {
2531 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2532 dev_warn_once(&vq->vq.vdev->dev,
2533 "virtio vring IRQ raised before DRIVER_OK");
2540 /* Just a hint for performance: so it's ok that this can be racy! */
2542 vq->event_triggered = true;
2544 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2545 if (vq->vq.callback)
2546 vq->vq.callback(&vq->vq);
2550 EXPORT_SYMBOL_GPL(vring_interrupt);
2552 /* Only available for split ring */
2553 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2554 struct vring_virtqueue_split *vring_split,
2555 struct virtio_device *vdev,
2558 bool (*notify)(struct virtqueue *),
2559 void (*callback)(struct virtqueue *),
2561 struct device *dma_dev)
2563 struct vring_virtqueue *vq;
2566 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2569 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2573 vq->packed_ring = false;
2574 vq->vq.callback = callback;
2577 vq->vq.index = index;
2578 vq->vq.reset = false;
2579 vq->we_own_ring = false;
2580 vq->notify = notify;
2581 vq->weak_barriers = weak_barriers;
2582 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2587 vq->dma_dev = dma_dev;
2588 vq->use_dma_api = vring_use_dma_api(vdev);
2589 vq->premapped = false;
2591 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2593 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2595 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2596 vq->weak_barriers = false;
2598 err = vring_alloc_state_extra_split(vring_split);
2604 virtqueue_vring_init_split(vring_split, vq);
2606 virtqueue_init(vq, vring_split->vring.num);
2607 virtqueue_vring_attach_split(vq, vring_split);
2609 spin_lock(&vdev->vqs_list_lock);
2610 list_add_tail(&vq->vq.list, &vdev->vqs);
2611 spin_unlock(&vdev->vqs_list_lock);
2615 struct virtqueue *vring_create_virtqueue(
2618 unsigned int vring_align,
2619 struct virtio_device *vdev,
2621 bool may_reduce_num,
2623 bool (*notify)(struct virtqueue *),
2624 void (*callback)(struct virtqueue *),
2628 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2629 return vring_create_virtqueue_packed(index, num, vring_align,
2630 vdev, weak_barriers, may_reduce_num,
2631 context, notify, callback, name, vdev->dev.parent);
2633 return vring_create_virtqueue_split(index, num, vring_align,
2634 vdev, weak_barriers, may_reduce_num,
2635 context, notify, callback, name, vdev->dev.parent);
2637 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2639 struct virtqueue *vring_create_virtqueue_dma(
2642 unsigned int vring_align,
2643 struct virtio_device *vdev,
2645 bool may_reduce_num,
2647 bool (*notify)(struct virtqueue *),
2648 void (*callback)(struct virtqueue *),
2650 struct device *dma_dev)
2653 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2654 return vring_create_virtqueue_packed(index, num, vring_align,
2655 vdev, weak_barriers, may_reduce_num,
2656 context, notify, callback, name, dma_dev);
2658 return vring_create_virtqueue_split(index, num, vring_align,
2659 vdev, weak_barriers, may_reduce_num,
2660 context, notify, callback, name, dma_dev);
2662 EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
2665 * virtqueue_resize - resize the vring of vq
2666 * @_vq: the struct virtqueue we're talking about.
2667 * @num: new ring num
2668 * @recycle: callback for recycle the useless buffer
2670 * When it is really necessary to create a new vring, it will set the current vq
2671 * into the reset state. Then call the passed callback to recycle the buffer
2672 * that is no longer used. Only after the new vring is successfully created, the
2673 * old vring will be released.
2675 * Caller must ensure we don't call this with other virtqueue operations
2676 * at the same time (except where noted).
2678 * Returns zero or a negative error.
2680 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2681 * vq can still work normally
2682 * -EBUSY: Failed to sync with device, vq may not work properly
2683 * -ENOENT: Transport or device not supported
2684 * -E2BIG/-EINVAL: num error
2685 * -EPERM: Operation not permitted
2688 int virtqueue_resize(struct virtqueue *_vq, u32 num,
2689 void (*recycle)(struct virtqueue *vq, void *buf))
2691 struct vring_virtqueue *vq = to_vvq(_vq);
2692 struct virtio_device *vdev = vq->vq.vdev;
2696 if (!vq->we_own_ring)
2699 if (num > vq->vq.num_max)
2705 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2708 if (!vdev->config->disable_vq_and_reset)
2711 if (!vdev->config->enable_vq_after_reset)
2714 err = vdev->config->disable_vq_and_reset(_vq);
2718 while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
2721 if (vq->packed_ring)
2722 err = virtqueue_resize_packed(_vq, num);
2724 err = virtqueue_resize_split(_vq, num);
2726 if (vdev->config->enable_vq_after_reset(_vq))
2731 EXPORT_SYMBOL_GPL(virtqueue_resize);
2734 * virtqueue_set_dma_premapped - set the vring premapped mode
2735 * @_vq: the struct virtqueue we're talking about.
2737 * Enable the premapped mode of the vq.
2739 * The vring in premapped mode does not do dma internally, so the driver must
2740 * do dma mapping in advance. The driver must pass the dma_address through
2741 * dma_address of scatterlist. When the driver got a used buffer from
2742 * the vring, it has to unmap the dma address.
2744 * This function must be called immediately after creating the vq, or after vq
2745 * reset, and before adding any buffers to it.
2747 * Caller must ensure we don't call this with other virtqueue operations
2748 * at the same time (except where noted).
2750 * Returns zero or a negative error.
2752 * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
2754 int virtqueue_set_dma_premapped(struct virtqueue *_vq)
2756 struct vring_virtqueue *vq = to_vvq(_vq);
2761 num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2763 if (num != vq->vq.num_free) {
2768 if (!vq->use_dma_api) {
2773 vq->premapped = true;
2779 EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
2781 /* Only available for split ring */
2782 struct virtqueue *vring_new_virtqueue(unsigned int index,
2784 unsigned int vring_align,
2785 struct virtio_device *vdev,
2789 bool (*notify)(struct virtqueue *vq),
2790 void (*callback)(struct virtqueue *vq),
2793 struct vring_virtqueue_split vring_split = {};
2795 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2798 vring_init(&vring_split.vring, num, pages, vring_align);
2799 return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2800 context, notify, callback, name,
2803 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2805 static void vring_free(struct virtqueue *_vq)
2807 struct vring_virtqueue *vq = to_vvq(_vq);
2809 if (vq->we_own_ring) {
2810 if (vq->packed_ring) {
2811 vring_free_queue(vq->vq.vdev,
2812 vq->packed.ring_size_in_bytes,
2813 vq->packed.vring.desc,
2814 vq->packed.ring_dma_addr,
2817 vring_free_queue(vq->vq.vdev,
2818 vq->packed.event_size_in_bytes,
2819 vq->packed.vring.driver,
2820 vq->packed.driver_event_dma_addr,
2823 vring_free_queue(vq->vq.vdev,
2824 vq->packed.event_size_in_bytes,
2825 vq->packed.vring.device,
2826 vq->packed.device_event_dma_addr,
2829 kfree(vq->packed.desc_state);
2830 kfree(vq->packed.desc_extra);
2832 vring_free_queue(vq->vq.vdev,
2833 vq->split.queue_size_in_bytes,
2834 vq->split.vring.desc,
2835 vq->split.queue_dma_addr,
2839 if (!vq->packed_ring) {
2840 kfree(vq->split.desc_state);
2841 kfree(vq->split.desc_extra);
2845 void vring_del_virtqueue(struct virtqueue *_vq)
2847 struct vring_virtqueue *vq = to_vvq(_vq);
2849 spin_lock(&vq->vq.vdev->vqs_list_lock);
2850 list_del(&_vq->list);
2851 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2857 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2859 u32 vring_notification_data(struct virtqueue *_vq)
2861 struct vring_virtqueue *vq = to_vvq(_vq);
2864 if (vq->packed_ring)
2865 next = (vq->packed.next_avail_idx &
2866 ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
2867 vq->packed.avail_wrap_counter <<
2868 VRING_PACKED_EVENT_F_WRAP_CTR;
2870 next = vq->split.avail_idx_shadow;
2872 return next << 16 | _vq->index;
2874 EXPORT_SYMBOL_GPL(vring_notification_data);
2876 /* Manipulates transport-specific feature bits. */
2877 void vring_transport_features(struct virtio_device *vdev)
2881 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2883 case VIRTIO_RING_F_INDIRECT_DESC:
2885 case VIRTIO_RING_F_EVENT_IDX:
2887 case VIRTIO_F_VERSION_1:
2889 case VIRTIO_F_ACCESS_PLATFORM:
2891 case VIRTIO_F_RING_PACKED:
2893 case VIRTIO_F_ORDER_PLATFORM:
2895 case VIRTIO_F_NOTIFICATION_DATA:
2898 /* We don't understand this bit. */
2899 __virtio_clear_bit(vdev, i);
2903 EXPORT_SYMBOL_GPL(vring_transport_features);
2906 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2907 * @_vq: the struct virtqueue containing the vring of interest.
2909 * Returns the size of the vring. This is mainly used for boasting to
2910 * userspace. Unlike other operations, this need not be serialized.
2912 unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
2915 const struct vring_virtqueue *vq = to_vvq(_vq);
2917 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2919 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2922 * This function should only be called by the core, not directly by the driver.
2924 void __virtqueue_break(struct virtqueue *_vq)
2926 struct vring_virtqueue *vq = to_vvq(_vq);
2928 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2929 WRITE_ONCE(vq->broken, true);
2931 EXPORT_SYMBOL_GPL(__virtqueue_break);
2934 * This function should only be called by the core, not directly by the driver.
2936 void __virtqueue_unbreak(struct virtqueue *_vq)
2938 struct vring_virtqueue *vq = to_vvq(_vq);
2940 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2941 WRITE_ONCE(vq->broken, false);
2943 EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
2945 bool virtqueue_is_broken(const struct virtqueue *_vq)
2947 const struct vring_virtqueue *vq = to_vvq(_vq);
2949 return READ_ONCE(vq->broken);
2951 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2954 * This should prevent the device from being used, allowing drivers to
2955 * recover. You may need to grab appropriate locks to flush.
2957 void virtio_break_device(struct virtio_device *dev)
2959 struct virtqueue *_vq;
2961 spin_lock(&dev->vqs_list_lock);
2962 list_for_each_entry(_vq, &dev->vqs, list) {
2963 struct vring_virtqueue *vq = to_vvq(_vq);
2965 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2966 WRITE_ONCE(vq->broken, true);
2968 spin_unlock(&dev->vqs_list_lock);
2970 EXPORT_SYMBOL_GPL(virtio_break_device);
2973 * This should allow the device to be used by the driver. You may
2974 * need to grab appropriate locks to flush the write to
2975 * vq->broken. This should only be used in some specific case e.g
2976 * (probing and restoring). This function should only be called by the
2977 * core, not directly by the driver.
2979 void __virtio_unbreak_device(struct virtio_device *dev)
2981 struct virtqueue *_vq;
2983 spin_lock(&dev->vqs_list_lock);
2984 list_for_each_entry(_vq, &dev->vqs, list) {
2985 struct vring_virtqueue *vq = to_vvq(_vq);
2987 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2988 WRITE_ONCE(vq->broken, false);
2990 spin_unlock(&dev->vqs_list_lock);
2992 EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2994 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
2996 const struct vring_virtqueue *vq = to_vvq(_vq);
2998 BUG_ON(!vq->we_own_ring);
3000 if (vq->packed_ring)
3001 return vq->packed.ring_dma_addr;
3003 return vq->split.queue_dma_addr;
3005 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
3007 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
3009 const struct vring_virtqueue *vq = to_vvq(_vq);
3011 BUG_ON(!vq->we_own_ring);
3013 if (vq->packed_ring)
3014 return vq->packed.driver_event_dma_addr;
3016 return vq->split.queue_dma_addr +
3017 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
3019 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
3021 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
3023 const struct vring_virtqueue *vq = to_vvq(_vq);
3025 BUG_ON(!vq->we_own_ring);
3027 if (vq->packed_ring)
3028 return vq->packed.device_event_dma_addr;
3030 return vq->split.queue_dma_addr +
3031 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
3033 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
3035 /* Only available for split ring */
3036 const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
3038 return &to_vvq(vq)->split.vring;
3040 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
3042 MODULE_LICENSE("GPL");