1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kmsan.h>
15 #include <linux/spinlock.h>
19 /* For development, we want to crash whenever the ring is screwed. */
20 #define BAD_RING(_vq, fmt, args...) \
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
26 /* Caller is supposed to guarantee no reentry. */
27 #define START_USE(_vq) \
30 panic("%s:in_use = %i\n", \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
34 #define END_USE(_vq) \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
36 #define LAST_ADD_TIME_UPDATE(_vq) \
38 ktime_t now = ktime_get(); \
40 /* No kick or get, with .1 second between? Warn. */ \
41 if ((_vq)->last_add_time_valid) \
42 WARN_ON(ktime_to_ms(ktime_sub(now, \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
47 #define LAST_ADD_TIME_CHECK(_vq) \
49 if ((_vq)->last_add_time_valid) { \
50 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
51 (_vq)->last_add_time)) > 100); \
54 #define LAST_ADD_TIME_INVALID(_vq) \
55 ((_vq)->last_add_time_valid = false)
57 #define BAD_RING(_vq, fmt, args...) \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
65 #define LAST_ADD_TIME_UPDATE(vq)
66 #define LAST_ADD_TIME_CHECK(vq)
67 #define LAST_ADD_TIME_INVALID(vq)
70 struct vring_desc_state_split {
71 void *data; /* Data for callback. */
72 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
75 struct vring_desc_state_packed {
76 void *data; /* Data for callback. */
77 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
78 u16 num; /* Descriptor list length. */
79 u16 last; /* The last desc state in a list. */
82 struct vring_desc_extra {
83 dma_addr_t addr; /* Descriptor DMA addr. */
84 u32 len; /* Descriptor length. */
85 u16 flags; /* Descriptor flags. */
86 u16 next; /* The next desc state in a list. */
89 struct vring_virtqueue_split {
90 /* Actual memory layout for this queue. */
93 /* Last written value to avail->flags */
94 u16 avail_flags_shadow;
97 * Last written value to avail->idx in
100 u16 avail_idx_shadow;
102 /* Per-descriptor state. */
103 struct vring_desc_state_split *desc_state;
104 struct vring_desc_extra *desc_extra;
106 /* DMA address and size information */
107 dma_addr_t queue_dma_addr;
108 size_t queue_size_in_bytes;
111 * The parameters for creating vrings are reserved for creating new
118 struct vring_virtqueue_packed {
119 /* Actual memory layout for this queue. */
122 struct vring_packed_desc *desc;
123 struct vring_packed_desc_event *driver;
124 struct vring_packed_desc_event *device;
127 /* Driver ring wrap counter. */
128 bool avail_wrap_counter;
130 /* Avail used flags. */
131 u16 avail_used_flags;
133 /* Index of the next avail descriptor. */
137 * Last written value to driver->flags in
140 u16 event_flags_shadow;
142 /* Per-descriptor state. */
143 struct vring_desc_state_packed *desc_state;
144 struct vring_desc_extra *desc_extra;
146 /* DMA address and size information */
147 dma_addr_t ring_dma_addr;
148 dma_addr_t driver_event_dma_addr;
149 dma_addr_t device_event_dma_addr;
150 size_t ring_size_in_bytes;
151 size_t event_size_in_bytes;
154 struct vring_virtqueue {
157 /* Is this a packed ring? */
160 /* Is DMA API used? */
163 /* Can we use weak barriers? */
166 /* Other side has made a mess, don't try any more. */
169 /* Host supports indirect buffers */
172 /* Host publishes avail event idx */
175 /* Head of free buffer list. */
176 unsigned int free_head;
177 /* Number we've added since last sync. */
178 unsigned int num_added;
180 /* Last used index we've seen.
181 * for split ring, it just contains last used index
183 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
184 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
188 /* Hint for event idx: already triggered no need to disable. */
189 bool event_triggered;
192 /* Available for split ring */
193 struct vring_virtqueue_split split;
195 /* Available for packed ring */
196 struct vring_virtqueue_packed packed;
199 /* How to notify other side. FIXME: commonalize hcalls! */
200 bool (*notify)(struct virtqueue *vq);
202 /* DMA, allocation, and size information */
205 /* Device used for doing DMA */
206 struct device *dma_dev;
209 /* They're supposed to lock for us. */
212 /* Figure out if their kicks are too delayed. */
213 bool last_add_time_valid;
214 ktime_t last_add_time;
218 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
219 struct vring_virtqueue_split *vring_split,
220 struct virtio_device *vdev,
223 bool (*notify)(struct virtqueue *),
224 void (*callback)(struct virtqueue *),
226 struct device *dma_dev);
227 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
228 static void vring_free(struct virtqueue *_vq);
234 #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
236 static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
237 unsigned int total_sg)
240 * If the host supports indirect descriptor tables, and we have multiple
241 * buffers, then go indirect. FIXME: tune this threshold
243 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
247 * Modern virtio devices have feature bits to specify whether they need a
248 * quirk and bypass the IOMMU. If not there, just use the DMA API.
250 * If there, the interaction between virtio and DMA API is messy.
252 * On most systems with virtio, physical addresses match bus addresses,
253 * and it doesn't particularly matter whether we use the DMA API.
255 * On some systems, including Xen and any system with a physical device
256 * that speaks virtio behind a physical IOMMU, we must use the DMA API
257 * for virtio DMA to work at all.
259 * On other systems, including SPARC and PPC64, virtio-pci devices are
260 * enumerated as though they are behind an IOMMU, but the virtio host
261 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
262 * there or somehow map everything as the identity.
264 * For the time being, we preserve historic behavior and bypass the DMA
267 * TODO: install a per-device DMA ops structure that does the right thing
268 * taking into account all the above quirks, and use the DMA API
269 * unconditionally on data path.
272 static bool vring_use_dma_api(const struct virtio_device *vdev)
274 if (!virtio_has_dma_quirk(vdev))
277 /* Otherwise, we are left to guess. */
279 * In theory, it's possible to have a buggy QEMU-supposed
280 * emulated Q35 IOMMU and Xen enabled at the same time. On
281 * such a configuration, virtio has never worked and will
282 * not work without an even larger kludge. Instead, enable
283 * the DMA API if we're a Xen guest, which at least allows
284 * all of the sensible Xen configurations to work correctly.
292 size_t virtio_max_dma_size(const struct virtio_device *vdev)
294 size_t max_segment_size = SIZE_MAX;
296 if (vring_use_dma_api(vdev))
297 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
299 return max_segment_size;
301 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
303 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
304 dma_addr_t *dma_handle, gfp_t flag,
305 struct device *dma_dev)
307 if (vring_use_dma_api(vdev)) {
308 return dma_alloc_coherent(dma_dev, size,
311 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
314 phys_addr_t phys_addr = virt_to_phys(queue);
315 *dma_handle = (dma_addr_t)phys_addr;
318 * Sanity check: make sure we dind't truncate
319 * the address. The only arches I can find that
320 * have 64-bit phys_addr_t but 32-bit dma_addr_t
321 * are certain non-highmem MIPS and x86
322 * configurations, but these configurations
323 * should never allocate physical pages above 32
324 * bits, so this is fine. Just in case, throw a
325 * warning and abort if we end up with an
326 * unrepresentable address.
328 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
329 free_pages_exact(queue, PAGE_ALIGN(size));
337 static void vring_free_queue(struct virtio_device *vdev, size_t size,
338 void *queue, dma_addr_t dma_handle,
339 struct device *dma_dev)
341 if (vring_use_dma_api(vdev))
342 dma_free_coherent(dma_dev, size, queue, dma_handle);
344 free_pages_exact(queue, PAGE_ALIGN(size));
348 * The DMA ops on various arches are rather gnarly right now, and
349 * making all of the arch DMA ops work on the vring device itself
352 static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
357 /* Map one sg entry. */
358 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
359 struct scatterlist *sg,
360 enum dma_data_direction direction)
362 if (!vq->use_dma_api) {
364 * If DMA is not used, KMSAN doesn't know that the scatterlist
365 * is initialized by the hardware. Explicitly check/unpoison it
366 * depending on the direction.
368 kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
369 return (dma_addr_t)sg_phys(sg);
373 * We can't use dma_map_sg, because we don't use scatterlists in
374 * the way it expects (we don't guarantee that the scatterlist
375 * will exist for the lifetime of the mapping).
377 return dma_map_page(vring_dma_dev(vq),
378 sg_page(sg), sg->offset, sg->length,
382 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
383 void *cpu_addr, size_t size,
384 enum dma_data_direction direction)
386 if (!vq->use_dma_api)
387 return (dma_addr_t)virt_to_phys(cpu_addr);
389 return dma_map_single(vring_dma_dev(vq),
390 cpu_addr, size, direction);
393 static int vring_mapping_error(const struct vring_virtqueue *vq,
396 if (!vq->use_dma_api)
399 return dma_mapping_error(vring_dma_dev(vq), addr);
402 static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
404 vq->vq.num_free = num;
407 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
409 vq->last_used_idx = 0;
411 vq->event_triggered = false;
416 vq->last_add_time_valid = false;
422 * Split ring specific functions - *_split().
425 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
426 const struct vring_desc *desc)
430 if (!vq->use_dma_api)
433 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
435 dma_unmap_page(vring_dma_dev(vq),
436 virtio64_to_cpu(vq->vq.vdev, desc->addr),
437 virtio32_to_cpu(vq->vq.vdev, desc->len),
438 (flags & VRING_DESC_F_WRITE) ?
439 DMA_FROM_DEVICE : DMA_TO_DEVICE);
442 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
445 struct vring_desc_extra *extra = vq->split.desc_extra;
448 if (!vq->use_dma_api)
451 flags = extra[i].flags;
453 if (flags & VRING_DESC_F_INDIRECT) {
454 dma_unmap_single(vring_dma_dev(vq),
457 (flags & VRING_DESC_F_WRITE) ?
458 DMA_FROM_DEVICE : DMA_TO_DEVICE);
460 dma_unmap_page(vring_dma_dev(vq),
463 (flags & VRING_DESC_F_WRITE) ?
464 DMA_FROM_DEVICE : DMA_TO_DEVICE);
468 return extra[i].next;
471 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
472 unsigned int total_sg,
475 struct vring_desc *desc;
479 * We require lowmem mappings for the descriptors because
480 * otherwise virt_to_phys will give us bogus addresses in the
483 gfp &= ~__GFP_HIGHMEM;
485 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
489 for (i = 0; i < total_sg; i++)
490 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
494 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
495 struct vring_desc *desc,
502 struct vring_virtqueue *vring = to_vvq(vq);
503 struct vring_desc_extra *extra = vring->split.desc_extra;
506 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
507 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
508 desc[i].len = cpu_to_virtio32(vq->vdev, len);
511 next = extra[i].next;
512 desc[i].next = cpu_to_virtio16(vq->vdev, next);
514 extra[i].addr = addr;
516 extra[i].flags = flags;
518 next = virtio16_to_cpu(vq->vdev, desc[i].next);
523 static inline int virtqueue_add_split(struct virtqueue *_vq,
524 struct scatterlist *sgs[],
525 unsigned int total_sg,
526 unsigned int out_sgs,
532 struct vring_virtqueue *vq = to_vvq(_vq);
533 struct scatterlist *sg;
534 struct vring_desc *desc;
535 unsigned int i, n, avail, descs_used, prev, err_idx;
541 BUG_ON(data == NULL);
542 BUG_ON(ctx && vq->indirect);
544 if (unlikely(vq->broken)) {
549 LAST_ADD_TIME_UPDATE(vq);
551 BUG_ON(total_sg == 0);
553 head = vq->free_head;
555 if (virtqueue_use_indirect(vq, total_sg))
556 desc = alloc_indirect_split(_vq, total_sg, gfp);
559 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
563 /* Use a single buffer which doesn't continue */
565 /* Set up rest to use this indirect table. */
570 desc = vq->split.vring.desc;
572 descs_used = total_sg;
575 if (unlikely(vq->vq.num_free < descs_used)) {
576 pr_debug("Can't add buf len %i - avail = %i\n",
577 descs_used, vq->vq.num_free);
578 /* FIXME: for historical reasons, we force a notify here if
579 * there are outgoing parts to the buffer. Presumably the
580 * host should service the ring ASAP. */
589 for (n = 0; n < out_sgs; n++) {
590 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
591 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
592 if (vring_mapping_error(vq, addr))
596 /* Note that we trust indirect descriptor
597 * table since it use stream DMA mapping.
599 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
604 for (; n < (out_sgs + in_sgs); n++) {
605 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
606 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
607 if (vring_mapping_error(vq, addr))
611 /* Note that we trust indirect descriptor
612 * table since it use stream DMA mapping.
614 i = virtqueue_add_desc_split(_vq, desc, i, addr,
621 /* Last one doesn't continue. */
622 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
623 if (!indirect && vq->use_dma_api)
624 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
628 /* Now that the indirect table is filled in, map it. */
629 dma_addr_t addr = vring_map_single(
630 vq, desc, total_sg * sizeof(struct vring_desc),
632 if (vring_mapping_error(vq, addr))
635 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
637 total_sg * sizeof(struct vring_desc),
638 VRING_DESC_F_INDIRECT,
642 /* We're using some buffers from the free list. */
643 vq->vq.num_free -= descs_used;
645 /* Update free pointer */
647 vq->free_head = vq->split.desc_extra[head].next;
651 /* Store token and indirect buffer state. */
652 vq->split.desc_state[head].data = data;
654 vq->split.desc_state[head].indir_desc = desc;
656 vq->split.desc_state[head].indir_desc = ctx;
658 /* Put entry in available array (but don't update avail->idx until they
660 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
661 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
663 /* Descriptors and available array need to be set before we expose the
664 * new available array entries. */
665 virtio_wmb(vq->weak_barriers);
666 vq->split.avail_idx_shadow++;
667 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
668 vq->split.avail_idx_shadow);
671 pr_debug("Added buffer head %i to %p\n", head, vq);
674 /* This is very unlikely, but theoretically possible. Kick
676 if (unlikely(vq->num_added == (1 << 16) - 1))
689 for (n = 0; n < total_sg; n++) {
693 vring_unmap_one_split_indirect(vq, &desc[i]);
694 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
696 i = vring_unmap_one_split(vq, i);
706 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
708 struct vring_virtqueue *vq = to_vvq(_vq);
713 /* We need to expose available array entries before checking avail
715 virtio_mb(vq->weak_barriers);
717 old = vq->split.avail_idx_shadow - vq->num_added;
718 new = vq->split.avail_idx_shadow;
721 LAST_ADD_TIME_CHECK(vq);
722 LAST_ADD_TIME_INVALID(vq);
725 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
726 vring_avail_event(&vq->split.vring)),
729 needs_kick = !(vq->split.vring.used->flags &
730 cpu_to_virtio16(_vq->vdev,
731 VRING_USED_F_NO_NOTIFY));
737 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
741 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
743 /* Clear data ptr. */
744 vq->split.desc_state[head].data = NULL;
746 /* Put back on free list: unmap first-level descriptors and find end */
749 while (vq->split.vring.desc[i].flags & nextflag) {
750 vring_unmap_one_split(vq, i);
751 i = vq->split.desc_extra[i].next;
755 vring_unmap_one_split(vq, i);
756 vq->split.desc_extra[i].next = vq->free_head;
757 vq->free_head = head;
759 /* Plus final descriptor */
763 struct vring_desc *indir_desc =
764 vq->split.desc_state[head].indir_desc;
767 /* Free the indirect table, if any, now that it's unmapped. */
771 len = vq->split.desc_extra[head].len;
773 BUG_ON(!(vq->split.desc_extra[head].flags &
774 VRING_DESC_F_INDIRECT));
775 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
777 for (j = 0; j < len / sizeof(struct vring_desc); j++)
778 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
781 vq->split.desc_state[head].indir_desc = NULL;
783 *ctx = vq->split.desc_state[head].indir_desc;
787 static bool more_used_split(const struct vring_virtqueue *vq)
789 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
790 vq->split.vring.used->idx);
793 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
797 struct vring_virtqueue *vq = to_vvq(_vq);
804 if (unlikely(vq->broken)) {
809 if (!more_used_split(vq)) {
810 pr_debug("No more buffers in queue\n");
815 /* Only get used array entries after they have been exposed by host. */
816 virtio_rmb(vq->weak_barriers);
818 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
819 i = virtio32_to_cpu(_vq->vdev,
820 vq->split.vring.used->ring[last_used].id);
821 *len = virtio32_to_cpu(_vq->vdev,
822 vq->split.vring.used->ring[last_used].len);
824 if (unlikely(i >= vq->split.vring.num)) {
825 BAD_RING(vq, "id %u out of range\n", i);
828 if (unlikely(!vq->split.desc_state[i].data)) {
829 BAD_RING(vq, "id %u is not a head!\n", i);
833 /* detach_buf_split clears data, so grab it now. */
834 ret = vq->split.desc_state[i].data;
835 detach_buf_split(vq, i, ctx);
837 /* If we expect an interrupt for the next entry, tell host
838 * by writing event index and flush out the write before
839 * the read in the next get_buf call. */
840 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
841 virtio_store_mb(vq->weak_barriers,
842 &vring_used_event(&vq->split.vring),
843 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
845 LAST_ADD_TIME_INVALID(vq);
851 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
853 struct vring_virtqueue *vq = to_vvq(_vq);
855 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
856 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
859 * If device triggered an event already it won't trigger one again:
860 * no need to disable.
862 if (vq->event_triggered)
866 /* TODO: this is a hack. Figure out a cleaner value to write. */
867 vring_used_event(&vq->split.vring) = 0x0;
869 vq->split.vring.avail->flags =
870 cpu_to_virtio16(_vq->vdev,
871 vq->split.avail_flags_shadow);
875 static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
877 struct vring_virtqueue *vq = to_vvq(_vq);
882 /* We optimistically turn back on interrupts, then check if there was
884 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
885 * either clear the flags bit or point the event index at the next
886 * entry. Always do both to keep code simple. */
887 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
888 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
890 vq->split.vring.avail->flags =
891 cpu_to_virtio16(_vq->vdev,
892 vq->split.avail_flags_shadow);
894 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
895 last_used_idx = vq->last_used_idx);
897 return last_used_idx;
900 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
902 struct vring_virtqueue *vq = to_vvq(_vq);
904 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
905 vq->split.vring.used->idx);
908 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
910 struct vring_virtqueue *vq = to_vvq(_vq);
915 /* We optimistically turn back on interrupts, then check if there was
917 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
918 * either clear the flags bit or point the event index at the next
919 * entry. Always update the event index to keep code simple. */
920 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
921 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
923 vq->split.vring.avail->flags =
924 cpu_to_virtio16(_vq->vdev,
925 vq->split.avail_flags_shadow);
927 /* TODO: tune this threshold */
928 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
930 virtio_store_mb(vq->weak_barriers,
931 &vring_used_event(&vq->split.vring),
932 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
934 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
935 - vq->last_used_idx) > bufs)) {
944 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
946 struct vring_virtqueue *vq = to_vvq(_vq);
952 for (i = 0; i < vq->split.vring.num; i++) {
953 if (!vq->split.desc_state[i].data)
955 /* detach_buf_split clears data, so grab it now. */
956 buf = vq->split.desc_state[i].data;
957 detach_buf_split(vq, i, NULL);
958 vq->split.avail_idx_shadow--;
959 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
960 vq->split.avail_idx_shadow);
964 /* That should have freed everything. */
965 BUG_ON(vq->vq.num_free != vq->split.vring.num);
971 static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
972 struct vring_virtqueue *vq)
974 struct virtio_device *vdev;
978 vring_split->avail_flags_shadow = 0;
979 vring_split->avail_idx_shadow = 0;
981 /* No callback? Tell other side not to bother us. */
982 if (!vq->vq.callback) {
983 vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
985 vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
986 vring_split->avail_flags_shadow);
990 static void virtqueue_reinit_split(struct vring_virtqueue *vq)
994 num = vq->split.vring.num;
996 vq->split.vring.avail->flags = 0;
997 vq->split.vring.avail->idx = 0;
999 /* reset avail event */
1000 vq->split.vring.avail->ring[num] = 0;
1002 vq->split.vring.used->flags = 0;
1003 vq->split.vring.used->idx = 0;
1005 /* reset used event */
1006 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
1008 virtqueue_init(vq, num);
1010 virtqueue_vring_init_split(&vq->split, vq);
1013 static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
1014 struct vring_virtqueue_split *vring_split)
1016 vq->split = *vring_split;
1018 /* Put everything in free lists. */
1022 static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1024 struct vring_desc_state_split *state;
1025 struct vring_desc_extra *extra;
1026 u32 num = vring_split->vring.num;
1028 state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1032 extra = vring_alloc_desc_extra(num);
1036 memset(state, 0, num * sizeof(struct vring_desc_state_split));
1038 vring_split->desc_state = state;
1039 vring_split->desc_extra = extra;
1048 static void vring_free_split(struct vring_virtqueue_split *vring_split,
1049 struct virtio_device *vdev, struct device *dma_dev)
1051 vring_free_queue(vdev, vring_split->queue_size_in_bytes,
1052 vring_split->vring.desc,
1053 vring_split->queue_dma_addr,
1056 kfree(vring_split->desc_state);
1057 kfree(vring_split->desc_extra);
1060 static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1061 struct virtio_device *vdev,
1063 unsigned int vring_align,
1064 bool may_reduce_num,
1065 struct device *dma_dev)
1068 dma_addr_t dma_addr;
1070 /* We assume num is a power of 2. */
1071 if (!is_power_of_2(num)) {
1072 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1076 /* TODO: allocate each queue chunk individually */
1077 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1078 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1080 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1084 if (!may_reduce_num)
1092 /* Try to get a single page. You are my only hope! */
1093 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1094 &dma_addr, GFP_KERNEL | __GFP_ZERO,
1100 vring_init(&vring_split->vring, num, queue, vring_align);
1102 vring_split->queue_dma_addr = dma_addr;
1103 vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1105 vring_split->vring_align = vring_align;
1106 vring_split->may_reduce_num = may_reduce_num;
1111 static struct virtqueue *vring_create_virtqueue_split(
1114 unsigned int vring_align,
1115 struct virtio_device *vdev,
1117 bool may_reduce_num,
1119 bool (*notify)(struct virtqueue *),
1120 void (*callback)(struct virtqueue *),
1122 struct device *dma_dev)
1124 struct vring_virtqueue_split vring_split = {};
1125 struct virtqueue *vq;
1128 err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1129 may_reduce_num, dma_dev);
1133 vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1134 context, notify, callback, name, dma_dev);
1136 vring_free_split(&vring_split, vdev, dma_dev);
1140 to_vvq(vq)->we_own_ring = true;
1145 static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
1147 struct vring_virtqueue_split vring_split = {};
1148 struct vring_virtqueue *vq = to_vvq(_vq);
1149 struct virtio_device *vdev = _vq->vdev;
1152 err = vring_alloc_queue_split(&vring_split, vdev, num,
1153 vq->split.vring_align,
1154 vq->split.may_reduce_num,
1159 err = vring_alloc_state_extra_split(&vring_split);
1161 goto err_state_extra;
1163 vring_free(&vq->vq);
1165 virtqueue_vring_init_split(&vring_split, vq);
1167 virtqueue_init(vq, vring_split.vring.num);
1168 virtqueue_vring_attach_split(vq, &vring_split);
1173 vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
1175 virtqueue_reinit_split(vq);
1181 * Packed ring specific functions - *_packed().
1183 static bool packed_used_wrap_counter(u16 last_used_idx)
1185 return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1188 static u16 packed_last_used(u16 last_used_idx)
1190 return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1193 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1194 const struct vring_desc_extra *extra)
1198 if (!vq->use_dma_api)
1201 flags = extra->flags;
1203 if (flags & VRING_DESC_F_INDIRECT) {
1204 dma_unmap_single(vring_dma_dev(vq),
1205 extra->addr, extra->len,
1206 (flags & VRING_DESC_F_WRITE) ?
1207 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1209 dma_unmap_page(vring_dma_dev(vq),
1210 extra->addr, extra->len,
1211 (flags & VRING_DESC_F_WRITE) ?
1212 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1216 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1217 const struct vring_packed_desc *desc)
1221 if (!vq->use_dma_api)
1224 flags = le16_to_cpu(desc->flags);
1226 dma_unmap_page(vring_dma_dev(vq),
1227 le64_to_cpu(desc->addr),
1228 le32_to_cpu(desc->len),
1229 (flags & VRING_DESC_F_WRITE) ?
1230 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1233 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1236 struct vring_packed_desc *desc;
1239 * We require lowmem mappings for the descriptors because
1240 * otherwise virt_to_phys will give us bogus addresses in the
1243 gfp &= ~__GFP_HIGHMEM;
1245 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1250 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1251 struct scatterlist *sgs[],
1252 unsigned int total_sg,
1253 unsigned int out_sgs,
1254 unsigned int in_sgs,
1258 struct vring_packed_desc *desc;
1259 struct scatterlist *sg;
1260 unsigned int i, n, err_idx;
1264 head = vq->packed.next_avail_idx;
1265 desc = alloc_indirect_packed(total_sg, gfp);
1269 if (unlikely(vq->vq.num_free < 1)) {
1270 pr_debug("Can't add buf len 1 - avail = 0\n");
1278 BUG_ON(id == vq->packed.vring.num);
1280 for (n = 0; n < out_sgs + in_sgs; n++) {
1281 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1282 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1283 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1284 if (vring_mapping_error(vq, addr))
1287 desc[i].flags = cpu_to_le16(n < out_sgs ?
1288 0 : VRING_DESC_F_WRITE);
1289 desc[i].addr = cpu_to_le64(addr);
1290 desc[i].len = cpu_to_le32(sg->length);
1295 /* Now that the indirect table is filled in, map it. */
1296 addr = vring_map_single(vq, desc,
1297 total_sg * sizeof(struct vring_packed_desc),
1299 if (vring_mapping_error(vq, addr))
1302 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1303 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1304 sizeof(struct vring_packed_desc));
1305 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1307 if (vq->use_dma_api) {
1308 vq->packed.desc_extra[id].addr = addr;
1309 vq->packed.desc_extra[id].len = total_sg *
1310 sizeof(struct vring_packed_desc);
1311 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1312 vq->packed.avail_used_flags;
1316 * A driver MUST NOT make the first descriptor in the list
1317 * available before all subsequent descriptors comprising
1318 * the list are made available.
1320 virtio_wmb(vq->weak_barriers);
1321 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1322 vq->packed.avail_used_flags);
1324 /* We're using some buffers from the free list. */
1325 vq->vq.num_free -= 1;
1327 /* Update free pointer */
1329 if (n >= vq->packed.vring.num) {
1331 vq->packed.avail_wrap_counter ^= 1;
1332 vq->packed.avail_used_flags ^=
1333 1 << VRING_PACKED_DESC_F_AVAIL |
1334 1 << VRING_PACKED_DESC_F_USED;
1336 vq->packed.next_avail_idx = n;
1337 vq->free_head = vq->packed.desc_extra[id].next;
1339 /* Store token and indirect buffer state. */
1340 vq->packed.desc_state[id].num = 1;
1341 vq->packed.desc_state[id].data = data;
1342 vq->packed.desc_state[id].indir_desc = desc;
1343 vq->packed.desc_state[id].last = id;
1347 pr_debug("Added buffer head %i to %p\n", head, vq);
1355 for (i = 0; i < err_idx; i++)
1356 vring_unmap_desc_packed(vq, &desc[i]);
1364 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1365 struct scatterlist *sgs[],
1366 unsigned int total_sg,
1367 unsigned int out_sgs,
1368 unsigned int in_sgs,
1373 struct vring_virtqueue *vq = to_vvq(_vq);
1374 struct vring_packed_desc *desc;
1375 struct scatterlist *sg;
1376 unsigned int i, n, c, descs_used, err_idx;
1377 __le16 head_flags, flags;
1378 u16 head, id, prev, curr, avail_used_flags;
1383 BUG_ON(data == NULL);
1384 BUG_ON(ctx && vq->indirect);
1386 if (unlikely(vq->broken)) {
1391 LAST_ADD_TIME_UPDATE(vq);
1393 BUG_ON(total_sg == 0);
1395 if (virtqueue_use_indirect(vq, total_sg)) {
1396 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1398 if (err != -ENOMEM) {
1403 /* fall back on direct */
1406 head = vq->packed.next_avail_idx;
1407 avail_used_flags = vq->packed.avail_used_flags;
1409 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1411 desc = vq->packed.vring.desc;
1413 descs_used = total_sg;
1415 if (unlikely(vq->vq.num_free < descs_used)) {
1416 pr_debug("Can't add buf len %i - avail = %i\n",
1417 descs_used, vq->vq.num_free);
1423 BUG_ON(id == vq->packed.vring.num);
1427 for (n = 0; n < out_sgs + in_sgs; n++) {
1428 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1429 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1430 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1431 if (vring_mapping_error(vq, addr))
1434 flags = cpu_to_le16(vq->packed.avail_used_flags |
1435 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1436 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1440 desc[i].flags = flags;
1442 desc[i].addr = cpu_to_le64(addr);
1443 desc[i].len = cpu_to_le32(sg->length);
1444 desc[i].id = cpu_to_le16(id);
1446 if (unlikely(vq->use_dma_api)) {
1447 vq->packed.desc_extra[curr].addr = addr;
1448 vq->packed.desc_extra[curr].len = sg->length;
1449 vq->packed.desc_extra[curr].flags =
1453 curr = vq->packed.desc_extra[curr].next;
1455 if ((unlikely(++i >= vq->packed.vring.num))) {
1457 vq->packed.avail_used_flags ^=
1458 1 << VRING_PACKED_DESC_F_AVAIL |
1459 1 << VRING_PACKED_DESC_F_USED;
1465 vq->packed.avail_wrap_counter ^= 1;
1467 /* We're using some buffers from the free list. */
1468 vq->vq.num_free -= descs_used;
1470 /* Update free pointer */
1471 vq->packed.next_avail_idx = i;
1472 vq->free_head = curr;
1475 vq->packed.desc_state[id].num = descs_used;
1476 vq->packed.desc_state[id].data = data;
1477 vq->packed.desc_state[id].indir_desc = ctx;
1478 vq->packed.desc_state[id].last = prev;
1481 * A driver MUST NOT make the first descriptor in the list
1482 * available before all subsequent descriptors comprising
1483 * the list are made available.
1485 virtio_wmb(vq->weak_barriers);
1486 vq->packed.vring.desc[head].flags = head_flags;
1487 vq->num_added += descs_used;
1489 pr_debug("Added buffer head %i to %p\n", head, vq);
1497 curr = vq->free_head;
1499 vq->packed.avail_used_flags = avail_used_flags;
1501 for (n = 0; n < total_sg; n++) {
1504 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
1505 curr = vq->packed.desc_extra[curr].next;
1507 if (i >= vq->packed.vring.num)
1515 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1517 struct vring_virtqueue *vq = to_vvq(_vq);
1518 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1531 * We need to expose the new flags value before checking notification
1534 virtio_mb(vq->weak_barriers);
1536 old = vq->packed.next_avail_idx - vq->num_added;
1537 new = vq->packed.next_avail_idx;
1540 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1541 flags = le16_to_cpu(snapshot.flags);
1543 LAST_ADD_TIME_CHECK(vq);
1544 LAST_ADD_TIME_INVALID(vq);
1546 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1547 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1551 off_wrap = le16_to_cpu(snapshot.off_wrap);
1553 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1554 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1555 if (wrap_counter != vq->packed.avail_wrap_counter)
1556 event_idx -= vq->packed.vring.num;
1558 needs_kick = vring_need_event(event_idx, new, old);
1564 static void detach_buf_packed(struct vring_virtqueue *vq,
1565 unsigned int id, void **ctx)
1567 struct vring_desc_state_packed *state = NULL;
1568 struct vring_packed_desc *desc;
1569 unsigned int i, curr;
1571 state = &vq->packed.desc_state[id];
1573 /* Clear data ptr. */
1576 vq->packed.desc_extra[state->last].next = vq->free_head;
1578 vq->vq.num_free += state->num;
1580 if (unlikely(vq->use_dma_api)) {
1582 for (i = 0; i < state->num; i++) {
1583 vring_unmap_extra_packed(vq,
1584 &vq->packed.desc_extra[curr]);
1585 curr = vq->packed.desc_extra[curr].next;
1592 /* Free the indirect table, if any, now that it's unmapped. */
1593 desc = state->indir_desc;
1597 if (vq->use_dma_api) {
1598 len = vq->packed.desc_extra[id].len;
1599 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1601 vring_unmap_desc_packed(vq, &desc[i]);
1604 state->indir_desc = NULL;
1606 *ctx = state->indir_desc;
1610 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1611 u16 idx, bool used_wrap_counter)
1616 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1617 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1618 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1620 return avail == used && used == used_wrap_counter;
1623 static bool more_used_packed(const struct vring_virtqueue *vq)
1627 bool used_wrap_counter;
1629 last_used_idx = READ_ONCE(vq->last_used_idx);
1630 last_used = packed_last_used(last_used_idx);
1631 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1632 return is_used_desc_packed(vq, last_used, used_wrap_counter);
1635 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1639 struct vring_virtqueue *vq = to_vvq(_vq);
1640 u16 last_used, id, last_used_idx;
1641 bool used_wrap_counter;
1646 if (unlikely(vq->broken)) {
1651 if (!more_used_packed(vq)) {
1652 pr_debug("No more buffers in queue\n");
1657 /* Only get used elements after they have been exposed by host. */
1658 virtio_rmb(vq->weak_barriers);
1660 last_used_idx = READ_ONCE(vq->last_used_idx);
1661 used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1662 last_used = packed_last_used(last_used_idx);
1663 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1664 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1666 if (unlikely(id >= vq->packed.vring.num)) {
1667 BAD_RING(vq, "id %u out of range\n", id);
1670 if (unlikely(!vq->packed.desc_state[id].data)) {
1671 BAD_RING(vq, "id %u is not a head!\n", id);
1675 /* detach_buf_packed clears data, so grab it now. */
1676 ret = vq->packed.desc_state[id].data;
1677 detach_buf_packed(vq, id, ctx);
1679 last_used += vq->packed.desc_state[id].num;
1680 if (unlikely(last_used >= vq->packed.vring.num)) {
1681 last_used -= vq->packed.vring.num;
1682 used_wrap_counter ^= 1;
1685 last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1686 WRITE_ONCE(vq->last_used_idx, last_used);
1689 * If we expect an interrupt for the next entry, tell host
1690 * by writing event index and flush out the write before
1691 * the read in the next get_buf call.
1693 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1694 virtio_store_mb(vq->weak_barriers,
1695 &vq->packed.vring.driver->off_wrap,
1696 cpu_to_le16(vq->last_used_idx));
1698 LAST_ADD_TIME_INVALID(vq);
1704 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1706 struct vring_virtqueue *vq = to_vvq(_vq);
1708 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1709 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1712 * If device triggered an event already it won't trigger one again:
1713 * no need to disable.
1715 if (vq->event_triggered)
1718 vq->packed.vring.driver->flags =
1719 cpu_to_le16(vq->packed.event_flags_shadow);
1723 static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1725 struct vring_virtqueue *vq = to_vvq(_vq);
1730 * We optimistically turn back on interrupts, then check if there was
1735 vq->packed.vring.driver->off_wrap =
1736 cpu_to_le16(vq->last_used_idx);
1738 * We need to update event offset and event wrap
1739 * counter first before updating event flags.
1741 virtio_wmb(vq->weak_barriers);
1744 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1745 vq->packed.event_flags_shadow = vq->event ?
1746 VRING_PACKED_EVENT_FLAG_DESC :
1747 VRING_PACKED_EVENT_FLAG_ENABLE;
1748 vq->packed.vring.driver->flags =
1749 cpu_to_le16(vq->packed.event_flags_shadow);
1753 return vq->last_used_idx;
1756 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1758 struct vring_virtqueue *vq = to_vvq(_vq);
1762 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1763 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1765 return is_used_desc_packed(vq, used_idx, wrap_counter);
1768 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1770 struct vring_virtqueue *vq = to_vvq(_vq);
1771 u16 used_idx, wrap_counter, last_used_idx;
1777 * We optimistically turn back on interrupts, then check if there was
1782 /* TODO: tune this threshold */
1783 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1784 last_used_idx = READ_ONCE(vq->last_used_idx);
1785 wrap_counter = packed_used_wrap_counter(last_used_idx);
1787 used_idx = packed_last_used(last_used_idx) + bufs;
1788 if (used_idx >= vq->packed.vring.num) {
1789 used_idx -= vq->packed.vring.num;
1793 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1794 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1797 * We need to update event offset and event wrap
1798 * counter first before updating event flags.
1800 virtio_wmb(vq->weak_barriers);
1803 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1804 vq->packed.event_flags_shadow = vq->event ?
1805 VRING_PACKED_EVENT_FLAG_DESC :
1806 VRING_PACKED_EVENT_FLAG_ENABLE;
1807 vq->packed.vring.driver->flags =
1808 cpu_to_le16(vq->packed.event_flags_shadow);
1812 * We need to update event suppression structure first
1813 * before re-checking for more used buffers.
1815 virtio_mb(vq->weak_barriers);
1817 last_used_idx = READ_ONCE(vq->last_used_idx);
1818 wrap_counter = packed_used_wrap_counter(last_used_idx);
1819 used_idx = packed_last_used(last_used_idx);
1820 if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
1829 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1831 struct vring_virtqueue *vq = to_vvq(_vq);
1837 for (i = 0; i < vq->packed.vring.num; i++) {
1838 if (!vq->packed.desc_state[i].data)
1840 /* detach_buf clears data, so grab it now. */
1841 buf = vq->packed.desc_state[i].data;
1842 detach_buf_packed(vq, i, NULL);
1846 /* That should have freed everything. */
1847 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1853 static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
1855 struct vring_desc_extra *desc_extra;
1858 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1863 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1865 for (i = 0; i < num - 1; i++)
1866 desc_extra[i].next = i + 1;
1871 static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
1872 struct virtio_device *vdev,
1873 struct device *dma_dev)
1875 if (vring_packed->vring.desc)
1876 vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
1877 vring_packed->vring.desc,
1878 vring_packed->ring_dma_addr,
1881 if (vring_packed->vring.driver)
1882 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1883 vring_packed->vring.driver,
1884 vring_packed->driver_event_dma_addr,
1887 if (vring_packed->vring.device)
1888 vring_free_queue(vdev, vring_packed->event_size_in_bytes,
1889 vring_packed->vring.device,
1890 vring_packed->device_event_dma_addr,
1893 kfree(vring_packed->desc_state);
1894 kfree(vring_packed->desc_extra);
1897 static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
1898 struct virtio_device *vdev,
1899 u32 num, struct device *dma_dev)
1901 struct vring_packed_desc *ring;
1902 struct vring_packed_desc_event *driver, *device;
1903 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1904 size_t ring_size_in_bytes, event_size_in_bytes;
1906 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1908 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1910 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1915 vring_packed->vring.desc = ring;
1916 vring_packed->ring_dma_addr = ring_dma_addr;
1917 vring_packed->ring_size_in_bytes = ring_size_in_bytes;
1919 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1921 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1922 &driver_event_dma_addr,
1923 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1928 vring_packed->vring.driver = driver;
1929 vring_packed->event_size_in_bytes = event_size_in_bytes;
1930 vring_packed->driver_event_dma_addr = driver_event_dma_addr;
1932 device = vring_alloc_queue(vdev, event_size_in_bytes,
1933 &device_event_dma_addr,
1934 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1939 vring_packed->vring.device = device;
1940 vring_packed->device_event_dma_addr = device_event_dma_addr;
1942 vring_packed->vring.num = num;
1947 vring_free_packed(vring_packed, vdev, dma_dev);
1951 static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
1953 struct vring_desc_state_packed *state;
1954 struct vring_desc_extra *extra;
1955 u32 num = vring_packed->vring.num;
1957 state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
1959 goto err_desc_state;
1961 memset(state, 0, num * sizeof(struct vring_desc_state_packed));
1963 extra = vring_alloc_desc_extra(num);
1965 goto err_desc_extra;
1967 vring_packed->desc_state = state;
1968 vring_packed->desc_extra = extra;
1978 static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
1981 vring_packed->next_avail_idx = 0;
1982 vring_packed->avail_wrap_counter = 1;
1983 vring_packed->event_flags_shadow = 0;
1984 vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1986 /* No callback? Tell other side not to bother us. */
1988 vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1989 vring_packed->vring.driver->flags =
1990 cpu_to_le16(vring_packed->event_flags_shadow);
1994 static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
1995 struct vring_virtqueue_packed *vring_packed)
1997 vq->packed = *vring_packed;
1999 /* Put everything in free lists. */
2003 static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
2005 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
2006 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
2008 /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
2009 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
2011 virtqueue_init(vq, vq->packed.vring.num);
2012 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
2015 static struct virtqueue *vring_create_virtqueue_packed(
2018 unsigned int vring_align,
2019 struct virtio_device *vdev,
2021 bool may_reduce_num,
2023 bool (*notify)(struct virtqueue *),
2024 void (*callback)(struct virtqueue *),
2026 struct device *dma_dev)
2028 struct vring_virtqueue_packed vring_packed = {};
2029 struct vring_virtqueue *vq;
2032 if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2035 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2039 vq->vq.callback = callback;
2042 vq->vq.index = index;
2043 vq->vq.reset = false;
2044 vq->we_own_ring = true;
2045 vq->notify = notify;
2046 vq->weak_barriers = weak_barriers;
2047 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2052 vq->packed_ring = true;
2053 vq->dma_dev = dma_dev;
2054 vq->use_dma_api = vring_use_dma_api(vdev);
2056 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2058 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2060 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2061 vq->weak_barriers = false;
2063 err = vring_alloc_state_extra_packed(&vring_packed);
2065 goto err_state_extra;
2067 virtqueue_vring_init_packed(&vring_packed, !!callback);
2069 virtqueue_init(vq, num);
2070 virtqueue_vring_attach_packed(vq, &vring_packed);
2072 spin_lock(&vdev->vqs_list_lock);
2073 list_add_tail(&vq->vq.list, &vdev->vqs);
2074 spin_unlock(&vdev->vqs_list_lock);
2080 vring_free_packed(&vring_packed, vdev, dma_dev);
2085 static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
2087 struct vring_virtqueue_packed vring_packed = {};
2088 struct vring_virtqueue *vq = to_vvq(_vq);
2089 struct virtio_device *vdev = _vq->vdev;
2092 if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2095 err = vring_alloc_state_extra_packed(&vring_packed);
2097 goto err_state_extra;
2099 vring_free(&vq->vq);
2101 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
2103 virtqueue_init(vq, vring_packed.vring.num);
2104 virtqueue_vring_attach_packed(vq, &vring_packed);
2109 vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2111 virtqueue_reinit_packed(vq);
2117 * Generic functions and exported symbols.
2120 static inline int virtqueue_add(struct virtqueue *_vq,
2121 struct scatterlist *sgs[],
2122 unsigned int total_sg,
2123 unsigned int out_sgs,
2124 unsigned int in_sgs,
2129 struct vring_virtqueue *vq = to_vvq(_vq);
2131 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
2132 out_sgs, in_sgs, data, ctx, gfp) :
2133 virtqueue_add_split(_vq, sgs, total_sg,
2134 out_sgs, in_sgs, data, ctx, gfp);
2138 * virtqueue_add_sgs - expose buffers to other end
2139 * @_vq: the struct virtqueue we're talking about.
2140 * @sgs: array of terminated scatterlists.
2141 * @out_sgs: the number of scatterlists readable by other side
2142 * @in_sgs: the number of scatterlists which are writable (after readable ones)
2143 * @data: the token identifying the buffer.
2144 * @gfp: how to do memory allocations (if necessary).
2146 * Caller must ensure we don't call this with other virtqueue operations
2147 * at the same time (except where noted).
2149 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2151 int virtqueue_add_sgs(struct virtqueue *_vq,
2152 struct scatterlist *sgs[],
2153 unsigned int out_sgs,
2154 unsigned int in_sgs,
2158 unsigned int i, total_sg = 0;
2160 /* Count them first. */
2161 for (i = 0; i < out_sgs + in_sgs; i++) {
2162 struct scatterlist *sg;
2164 for (sg = sgs[i]; sg; sg = sg_next(sg))
2167 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2170 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
2173 * virtqueue_add_outbuf - expose output buffers to other end
2174 * @vq: the struct virtqueue we're talking about.
2175 * @sg: scatterlist (must be well-formed and terminated!)
2176 * @num: the number of entries in @sg readable by other side
2177 * @data: the token identifying the buffer.
2178 * @gfp: how to do memory allocations (if necessary).
2180 * Caller must ensure we don't call this with other virtqueue operations
2181 * at the same time (except where noted).
2183 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2185 int virtqueue_add_outbuf(struct virtqueue *vq,
2186 struct scatterlist *sg, unsigned int num,
2190 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2192 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2195 * virtqueue_add_inbuf - expose input buffers to other end
2196 * @vq: the struct virtqueue we're talking about.
2197 * @sg: scatterlist (must be well-formed and terminated!)
2198 * @num: the number of entries in @sg writable by other side
2199 * @data: the token identifying the buffer.
2200 * @gfp: how to do memory allocations (if necessary).
2202 * Caller must ensure we don't call this with other virtqueue operations
2203 * at the same time (except where noted).
2205 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2207 int virtqueue_add_inbuf(struct virtqueue *vq,
2208 struct scatterlist *sg, unsigned int num,
2212 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2214 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2217 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2218 * @vq: the struct virtqueue we're talking about.
2219 * @sg: scatterlist (must be well-formed and terminated!)
2220 * @num: the number of entries in @sg writable by other side
2221 * @data: the token identifying the buffer.
2222 * @ctx: extra context for the token
2223 * @gfp: how to do memory allocations (if necessary).
2225 * Caller must ensure we don't call this with other virtqueue operations
2226 * at the same time (except where noted).
2228 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2230 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2231 struct scatterlist *sg, unsigned int num,
2236 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2238 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2241 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2242 * @_vq: the struct virtqueue
2244 * Instead of virtqueue_kick(), you can do:
2245 * if (virtqueue_kick_prepare(vq))
2246 * virtqueue_notify(vq);
2248 * This is sometimes useful because the virtqueue_kick_prepare() needs
2249 * to be serialized, but the actual virtqueue_notify() call does not.
2251 bool virtqueue_kick_prepare(struct virtqueue *_vq)
2253 struct vring_virtqueue *vq = to_vvq(_vq);
2255 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
2256 virtqueue_kick_prepare_split(_vq);
2258 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2261 * virtqueue_notify - second half of split virtqueue_kick call.
2262 * @_vq: the struct virtqueue
2264 * This does not need to be serialized.
2266 * Returns false if host notify failed or queue is broken, otherwise true.
2268 bool virtqueue_notify(struct virtqueue *_vq)
2270 struct vring_virtqueue *vq = to_vvq(_vq);
2272 if (unlikely(vq->broken))
2275 /* Prod other side to tell it about changes. */
2276 if (!vq->notify(_vq)) {
2282 EXPORT_SYMBOL_GPL(virtqueue_notify);
2285 * virtqueue_kick - update after add_buf
2286 * @vq: the struct virtqueue
2288 * After one or more virtqueue_add_* calls, invoke this to kick
2291 * Caller must ensure we don't call this with other virtqueue
2292 * operations at the same time (except where noted).
2294 * Returns false if kick failed, otherwise true.
2296 bool virtqueue_kick(struct virtqueue *vq)
2298 if (virtqueue_kick_prepare(vq))
2299 return virtqueue_notify(vq);
2302 EXPORT_SYMBOL_GPL(virtqueue_kick);
2305 * virtqueue_get_buf_ctx - get the next used buffer
2306 * @_vq: the struct virtqueue we're talking about.
2307 * @len: the length written into the buffer
2308 * @ctx: extra context for the token
2310 * If the device wrote data into the buffer, @len will be set to the
2311 * amount written. This means you don't need to clear the buffer
2312 * beforehand to ensure there's no data leakage in the case of short
2315 * Caller must ensure we don't call this with other virtqueue
2316 * operations at the same time (except where noted).
2318 * Returns NULL if there are no used buffers, or the "data" token
2319 * handed to virtqueue_add_*().
2321 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2324 struct vring_virtqueue *vq = to_vvq(_vq);
2326 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2327 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2329 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2331 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2333 return virtqueue_get_buf_ctx(_vq, len, NULL);
2335 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2337 * virtqueue_disable_cb - disable callbacks
2338 * @_vq: the struct virtqueue we're talking about.
2340 * Note that this is not necessarily synchronous, hence unreliable and only
2341 * useful as an optimization.
2343 * Unlike other operations, this need not be serialized.
2345 void virtqueue_disable_cb(struct virtqueue *_vq)
2347 struct vring_virtqueue *vq = to_vvq(_vq);
2349 if (vq->packed_ring)
2350 virtqueue_disable_cb_packed(_vq);
2352 virtqueue_disable_cb_split(_vq);
2354 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2357 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2358 * @_vq: the struct virtqueue we're talking about.
2360 * This re-enables callbacks; it returns current queue state
2361 * in an opaque unsigned value. This value should be later tested by
2362 * virtqueue_poll, to detect a possible race between the driver checking for
2363 * more work, and enabling callbacks.
2365 * Caller must ensure we don't call this with other virtqueue
2366 * operations at the same time (except where noted).
2368 unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2370 struct vring_virtqueue *vq = to_vvq(_vq);
2372 if (vq->event_triggered)
2373 vq->event_triggered = false;
2375 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2376 virtqueue_enable_cb_prepare_split(_vq);
2378 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2381 * virtqueue_poll - query pending used buffers
2382 * @_vq: the struct virtqueue we're talking about.
2383 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2385 * Returns "true" if there are pending used buffers in the queue.
2387 * This does not need to be serialized.
2389 bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2391 struct vring_virtqueue *vq = to_vvq(_vq);
2393 if (unlikely(vq->broken))
2396 virtio_mb(vq->weak_barriers);
2397 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2398 virtqueue_poll_split(_vq, last_used_idx);
2400 EXPORT_SYMBOL_GPL(virtqueue_poll);
2403 * virtqueue_enable_cb - restart callbacks after disable_cb.
2404 * @_vq: the struct virtqueue we're talking about.
2406 * This re-enables callbacks; it returns "false" if there are pending
2407 * buffers in the queue, to detect a possible race between the driver
2408 * checking for more work, and enabling callbacks.
2410 * Caller must ensure we don't call this with other virtqueue
2411 * operations at the same time (except where noted).
2413 bool virtqueue_enable_cb(struct virtqueue *_vq)
2415 unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2417 return !virtqueue_poll(_vq, last_used_idx);
2419 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2422 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2423 * @_vq: the struct virtqueue we're talking about.
2425 * This re-enables callbacks but hints to the other side to delay
2426 * interrupts until most of the available buffers have been processed;
2427 * it returns "false" if there are many pending buffers in the queue,
2428 * to detect a possible race between the driver checking for more work,
2429 * and enabling callbacks.
2431 * Caller must ensure we don't call this with other virtqueue
2432 * operations at the same time (except where noted).
2434 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2436 struct vring_virtqueue *vq = to_vvq(_vq);
2438 if (vq->event_triggered)
2439 vq->event_triggered = false;
2441 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2442 virtqueue_enable_cb_delayed_split(_vq);
2444 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2447 * virtqueue_detach_unused_buf - detach first unused buffer
2448 * @_vq: the struct virtqueue we're talking about.
2450 * Returns NULL or the "data" token handed to virtqueue_add_*().
2451 * This is not valid on an active queue; it is useful for device
2452 * shutdown or the reset queue.
2454 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2456 struct vring_virtqueue *vq = to_vvq(_vq);
2458 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2459 virtqueue_detach_unused_buf_split(_vq);
2461 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2463 static inline bool more_used(const struct vring_virtqueue *vq)
2465 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2469 * vring_interrupt - notify a virtqueue on an interrupt
2470 * @irq: the IRQ number (ignored)
2471 * @_vq: the struct virtqueue to notify
2473 * Calls the callback function of @_vq to process the virtqueue
2476 irqreturn_t vring_interrupt(int irq, void *_vq)
2478 struct vring_virtqueue *vq = to_vvq(_vq);
2480 if (!more_used(vq)) {
2481 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2485 if (unlikely(vq->broken)) {
2486 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2487 dev_warn_once(&vq->vq.vdev->dev,
2488 "virtio vring IRQ raised before DRIVER_OK");
2495 /* Just a hint for performance: so it's ok that this can be racy! */
2497 vq->event_triggered = true;
2499 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2500 if (vq->vq.callback)
2501 vq->vq.callback(&vq->vq);
2505 EXPORT_SYMBOL_GPL(vring_interrupt);
2507 /* Only available for split ring */
2508 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2509 struct vring_virtqueue_split *vring_split,
2510 struct virtio_device *vdev,
2513 bool (*notify)(struct virtqueue *),
2514 void (*callback)(struct virtqueue *),
2516 struct device *dma_dev)
2518 struct vring_virtqueue *vq;
2521 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2524 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2528 vq->packed_ring = false;
2529 vq->vq.callback = callback;
2532 vq->vq.index = index;
2533 vq->vq.reset = false;
2534 vq->we_own_ring = false;
2535 vq->notify = notify;
2536 vq->weak_barriers = weak_barriers;
2537 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2542 vq->dma_dev = dma_dev;
2543 vq->use_dma_api = vring_use_dma_api(vdev);
2545 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2547 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2549 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2550 vq->weak_barriers = false;
2552 err = vring_alloc_state_extra_split(vring_split);
2558 virtqueue_vring_init_split(vring_split, vq);
2560 virtqueue_init(vq, vring_split->vring.num);
2561 virtqueue_vring_attach_split(vq, vring_split);
2563 spin_lock(&vdev->vqs_list_lock);
2564 list_add_tail(&vq->vq.list, &vdev->vqs);
2565 spin_unlock(&vdev->vqs_list_lock);
2569 struct virtqueue *vring_create_virtqueue(
2572 unsigned int vring_align,
2573 struct virtio_device *vdev,
2575 bool may_reduce_num,
2577 bool (*notify)(struct virtqueue *),
2578 void (*callback)(struct virtqueue *),
2582 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2583 return vring_create_virtqueue_packed(index, num, vring_align,
2584 vdev, weak_barriers, may_reduce_num,
2585 context, notify, callback, name, vdev->dev.parent);
2587 return vring_create_virtqueue_split(index, num, vring_align,
2588 vdev, weak_barriers, may_reduce_num,
2589 context, notify, callback, name, vdev->dev.parent);
2591 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2593 struct virtqueue *vring_create_virtqueue_dma(
2596 unsigned int vring_align,
2597 struct virtio_device *vdev,
2599 bool may_reduce_num,
2601 bool (*notify)(struct virtqueue *),
2602 void (*callback)(struct virtqueue *),
2604 struct device *dma_dev)
2607 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2608 return vring_create_virtqueue_packed(index, num, vring_align,
2609 vdev, weak_barriers, may_reduce_num,
2610 context, notify, callback, name, dma_dev);
2612 return vring_create_virtqueue_split(index, num, vring_align,
2613 vdev, weak_barriers, may_reduce_num,
2614 context, notify, callback, name, dma_dev);
2616 EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
2619 * virtqueue_resize - resize the vring of vq
2620 * @_vq: the struct virtqueue we're talking about.
2621 * @num: new ring num
2622 * @recycle: callback for recycle the useless buffer
2624 * When it is really necessary to create a new vring, it will set the current vq
2625 * into the reset state. Then call the passed callback to recycle the buffer
2626 * that is no longer used. Only after the new vring is successfully created, the
2627 * old vring will be released.
2629 * Caller must ensure we don't call this with other virtqueue operations
2630 * at the same time (except where noted).
2632 * Returns zero or a negative error.
2634 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2635 * vq can still work normally
2636 * -EBUSY: Failed to sync with device, vq may not work properly
2637 * -ENOENT: Transport or device not supported
2638 * -E2BIG/-EINVAL: num error
2639 * -EPERM: Operation not permitted
2642 int virtqueue_resize(struct virtqueue *_vq, u32 num,
2643 void (*recycle)(struct virtqueue *vq, void *buf))
2645 struct vring_virtqueue *vq = to_vvq(_vq);
2646 struct virtio_device *vdev = vq->vq.vdev;
2650 if (!vq->we_own_ring)
2653 if (num > vq->vq.num_max)
2659 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2662 if (!vdev->config->disable_vq_and_reset)
2665 if (!vdev->config->enable_vq_after_reset)
2668 err = vdev->config->disable_vq_and_reset(_vq);
2672 while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
2675 if (vq->packed_ring)
2676 err = virtqueue_resize_packed(_vq, num);
2678 err = virtqueue_resize_split(_vq, num);
2680 if (vdev->config->enable_vq_after_reset(_vq))
2685 EXPORT_SYMBOL_GPL(virtqueue_resize);
2687 /* Only available for split ring */
2688 struct virtqueue *vring_new_virtqueue(unsigned int index,
2690 unsigned int vring_align,
2691 struct virtio_device *vdev,
2695 bool (*notify)(struct virtqueue *vq),
2696 void (*callback)(struct virtqueue *vq),
2699 struct vring_virtqueue_split vring_split = {};
2701 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2704 vring_init(&vring_split.vring, num, pages, vring_align);
2705 return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2706 context, notify, callback, name,
2709 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2711 static void vring_free(struct virtqueue *_vq)
2713 struct vring_virtqueue *vq = to_vvq(_vq);
2715 if (vq->we_own_ring) {
2716 if (vq->packed_ring) {
2717 vring_free_queue(vq->vq.vdev,
2718 vq->packed.ring_size_in_bytes,
2719 vq->packed.vring.desc,
2720 vq->packed.ring_dma_addr,
2723 vring_free_queue(vq->vq.vdev,
2724 vq->packed.event_size_in_bytes,
2725 vq->packed.vring.driver,
2726 vq->packed.driver_event_dma_addr,
2729 vring_free_queue(vq->vq.vdev,
2730 vq->packed.event_size_in_bytes,
2731 vq->packed.vring.device,
2732 vq->packed.device_event_dma_addr,
2735 kfree(vq->packed.desc_state);
2736 kfree(vq->packed.desc_extra);
2738 vring_free_queue(vq->vq.vdev,
2739 vq->split.queue_size_in_bytes,
2740 vq->split.vring.desc,
2741 vq->split.queue_dma_addr,
2745 if (!vq->packed_ring) {
2746 kfree(vq->split.desc_state);
2747 kfree(vq->split.desc_extra);
2751 void vring_del_virtqueue(struct virtqueue *_vq)
2753 struct vring_virtqueue *vq = to_vvq(_vq);
2755 spin_lock(&vq->vq.vdev->vqs_list_lock);
2756 list_del(&_vq->list);
2757 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2763 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2765 u32 vring_notification_data(struct virtqueue *_vq)
2767 struct vring_virtqueue *vq = to_vvq(_vq);
2770 if (vq->packed_ring)
2771 next = (vq->packed.next_avail_idx &
2772 ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
2773 vq->packed.avail_wrap_counter <<
2774 VRING_PACKED_EVENT_F_WRAP_CTR;
2776 next = vq->split.avail_idx_shadow;
2778 return next << 16 | _vq->index;
2780 EXPORT_SYMBOL_GPL(vring_notification_data);
2782 /* Manipulates transport-specific feature bits. */
2783 void vring_transport_features(struct virtio_device *vdev)
2787 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2789 case VIRTIO_RING_F_INDIRECT_DESC:
2791 case VIRTIO_RING_F_EVENT_IDX:
2793 case VIRTIO_F_VERSION_1:
2795 case VIRTIO_F_ACCESS_PLATFORM:
2797 case VIRTIO_F_RING_PACKED:
2799 case VIRTIO_F_ORDER_PLATFORM:
2801 case VIRTIO_F_NOTIFICATION_DATA:
2804 /* We don't understand this bit. */
2805 __virtio_clear_bit(vdev, i);
2809 EXPORT_SYMBOL_GPL(vring_transport_features);
2812 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2813 * @_vq: the struct virtqueue containing the vring of interest.
2815 * Returns the size of the vring. This is mainly used for boasting to
2816 * userspace. Unlike other operations, this need not be serialized.
2818 unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
2821 const struct vring_virtqueue *vq = to_vvq(_vq);
2823 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2825 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2828 * This function should only be called by the core, not directly by the driver.
2830 void __virtqueue_break(struct virtqueue *_vq)
2832 struct vring_virtqueue *vq = to_vvq(_vq);
2834 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2835 WRITE_ONCE(vq->broken, true);
2837 EXPORT_SYMBOL_GPL(__virtqueue_break);
2840 * This function should only be called by the core, not directly by the driver.
2842 void __virtqueue_unbreak(struct virtqueue *_vq)
2844 struct vring_virtqueue *vq = to_vvq(_vq);
2846 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2847 WRITE_ONCE(vq->broken, false);
2849 EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
2851 bool virtqueue_is_broken(const struct virtqueue *_vq)
2853 const struct vring_virtqueue *vq = to_vvq(_vq);
2855 return READ_ONCE(vq->broken);
2857 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2860 * This should prevent the device from being used, allowing drivers to
2861 * recover. You may need to grab appropriate locks to flush.
2863 void virtio_break_device(struct virtio_device *dev)
2865 struct virtqueue *_vq;
2867 spin_lock(&dev->vqs_list_lock);
2868 list_for_each_entry(_vq, &dev->vqs, list) {
2869 struct vring_virtqueue *vq = to_vvq(_vq);
2871 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2872 WRITE_ONCE(vq->broken, true);
2874 spin_unlock(&dev->vqs_list_lock);
2876 EXPORT_SYMBOL_GPL(virtio_break_device);
2879 * This should allow the device to be used by the driver. You may
2880 * need to grab appropriate locks to flush the write to
2881 * vq->broken. This should only be used in some specific case e.g
2882 * (probing and restoring). This function should only be called by the
2883 * core, not directly by the driver.
2885 void __virtio_unbreak_device(struct virtio_device *dev)
2887 struct virtqueue *_vq;
2889 spin_lock(&dev->vqs_list_lock);
2890 list_for_each_entry(_vq, &dev->vqs, list) {
2891 struct vring_virtqueue *vq = to_vvq(_vq);
2893 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2894 WRITE_ONCE(vq->broken, false);
2896 spin_unlock(&dev->vqs_list_lock);
2898 EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2900 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
2902 const struct vring_virtqueue *vq = to_vvq(_vq);
2904 BUG_ON(!vq->we_own_ring);
2906 if (vq->packed_ring)
2907 return vq->packed.ring_dma_addr;
2909 return vq->split.queue_dma_addr;
2911 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2913 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
2915 const struct vring_virtqueue *vq = to_vvq(_vq);
2917 BUG_ON(!vq->we_own_ring);
2919 if (vq->packed_ring)
2920 return vq->packed.driver_event_dma_addr;
2922 return vq->split.queue_dma_addr +
2923 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2925 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2927 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
2929 const struct vring_virtqueue *vq = to_vvq(_vq);
2931 BUG_ON(!vq->we_own_ring);
2933 if (vq->packed_ring)
2934 return vq->packed.device_event_dma_addr;
2936 return vq->split.queue_dma_addr +
2937 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2939 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2941 /* Only available for split ring */
2942 const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
2944 return &to_vvq(vq)->split.vring;
2946 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2948 MODULE_LICENSE("GPL");