return 0;
}
+static inline bool queue_ring_full(struct flush_queue *queue)
+{
+ return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
+}
+
+#define queue_ring_for_each(i, q) \
+ for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
+
+static void queue_release(struct dma_ops_domain *dom,
+ struct flush_queue *queue)
+{
+ unsigned i;
+
+ queue_ring_for_each(i, queue)
+ free_iova_fast(&dom->iovad,
+ queue->entries[i].iova_pfn,
+ queue->entries[i].pages);
+
+ queue->head = queue->tail = 0;
+}
+
+static inline unsigned queue_ring_add(struct flush_queue *queue)
+{
+ unsigned idx = queue->tail;
+
+ queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
+
+ return idx;
+}
+
+static void queue_add(struct dma_ops_domain *dom,
+ unsigned long address, unsigned long pages)
+{
+ struct flush_queue *queue;
+ int idx;
+
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+
+ queue = get_cpu_ptr(dom->flush_queue);
+
+ if (queue_ring_full(queue)) {
+ domain_flush_tlb(&dom->domain);
+ domain_flush_complete(&dom->domain);
+ queue_release(dom, queue);
+ }
+
+ idx = queue_ring_add(queue);
+
+ queue->entries[idx].iova_pfn = address;
+ queue->entries[idx].pages = pages;
+
+ put_cpu_ptr(dom->flush_queue);
+}
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
} else {
- /* Keep the if() around, we need it later again */
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
+ queue_add(dma_dom, dma_addr, pages);
}
}