1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4 * for providing grant references as DMA addresses to be used by frontends
5 * (e.g. virtio) in Xen guests
7 * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
13 #include <linux/pfn.h>
14 #include <linux/xarray.h>
15 #include <linux/virtio_anchor.h>
16 #include <linux/virtio.h>
18 #include <xen/xen-ops.h>
19 #include <xen/grant_table.h>
21 struct xen_grant_dma_data {
22 /* The ID of backend domain */
23 domid_t backend_domid;
24 /* Is device behaving sane? */
28 static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
30 #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
32 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
34 return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
37 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
39 return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
42 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
44 struct xen_grant_dma_data *data;
47 xa_lock_irqsave(&xen_grant_dma_devices, flags);
48 data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
49 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
54 static int store_xen_grant_dma_data(struct device *dev,
55 struct xen_grant_dma_data *data)
60 xa_lock_irqsave(&xen_grant_dma_devices, flags);
61 ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
63 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
69 * DMA ops for Xen frontends (e.g. virtio).
71 * Used to act as a kind of software IOMMU for Xen guests by using grants as
73 * Such a DMA address is formed by using the grant reference as a frame
74 * number and setting the highest address bit (this bit is for the backend
75 * to be able to distinguish it from e.g. a mmio address).
77 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
78 dma_addr_t *dma_handle, gfp_t gfp,
81 struct xen_grant_dma_data *data;
82 unsigned int i, n_pages = PFN_UP(size);
87 data = find_xen_grant_dma_data(dev);
91 if (unlikely(data->broken))
94 ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
98 pfn = virt_to_pfn(ret);
100 if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
101 free_pages_exact(ret, n_pages * PAGE_SIZE);
105 for (i = 0; i < n_pages; i++) {
106 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
107 pfn_to_gfn(pfn + i), 0);
110 *dma_handle = grant_to_dma(grant);
115 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
116 dma_addr_t dma_handle, unsigned long attrs)
118 struct xen_grant_dma_data *data;
119 unsigned int i, n_pages = PFN_UP(size);
122 data = find_xen_grant_dma_data(dev);
126 if (unlikely(data->broken))
129 grant = dma_to_grant(dma_handle);
131 for (i = 0; i < n_pages; i++) {
132 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
133 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
139 gnttab_free_grant_reference_seq(grant, n_pages);
141 free_pages_exact(vaddr, n_pages * PAGE_SIZE);
144 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
145 dma_addr_t *dma_handle,
146 enum dma_data_direction dir,
151 vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
155 return virt_to_page(vaddr);
158 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
159 struct page *vaddr, dma_addr_t dma_handle,
160 enum dma_data_direction dir)
162 xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
165 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
166 unsigned long offset, size_t size,
167 enum dma_data_direction dir,
170 struct xen_grant_dma_data *data;
171 unsigned int i, n_pages = PFN_UP(offset + size);
173 dma_addr_t dma_handle;
175 if (WARN_ON(dir == DMA_NONE))
176 return DMA_MAPPING_ERROR;
178 data = find_xen_grant_dma_data(dev);
180 return DMA_MAPPING_ERROR;
182 if (unlikely(data->broken))
183 return DMA_MAPPING_ERROR;
185 if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
186 return DMA_MAPPING_ERROR;
188 for (i = 0; i < n_pages; i++) {
189 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
190 xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
193 dma_handle = grant_to_dma(grant) + offset;
198 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
199 size_t size, enum dma_data_direction dir,
202 struct xen_grant_dma_data *data;
203 unsigned long offset = dma_handle & (PAGE_SIZE - 1);
204 unsigned int i, n_pages = PFN_UP(offset + size);
207 if (WARN_ON(dir == DMA_NONE))
210 data = find_xen_grant_dma_data(dev);
214 if (unlikely(data->broken))
217 grant = dma_to_grant(dma_handle);
219 for (i = 0; i < n_pages; i++) {
220 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
221 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
227 gnttab_free_grant_reference_seq(grant, n_pages);
230 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
231 int nents, enum dma_data_direction dir,
234 struct scatterlist *s;
237 if (WARN_ON(dir == DMA_NONE))
240 for_each_sg(sg, s, nents, i)
241 xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
245 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
246 int nents, enum dma_data_direction dir,
249 struct scatterlist *s;
252 if (WARN_ON(dir == DMA_NONE))
255 for_each_sg(sg, s, nents, i) {
256 s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
257 s->length, dir, attrs);
258 if (s->dma_address == DMA_MAPPING_ERROR)
261 sg_dma_len(s) = s->length;
267 xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
273 static int xen_grant_dma_supported(struct device *dev, u64 mask)
275 return mask == DMA_BIT_MASK(64);
278 static const struct dma_map_ops xen_grant_dma_ops = {
279 .alloc = xen_grant_dma_alloc,
280 .free = xen_grant_dma_free,
281 .alloc_pages = xen_grant_dma_alloc_pages,
282 .free_pages = xen_grant_dma_free_pages,
283 .mmap = dma_common_mmap,
284 .get_sgtable = dma_common_get_sgtable,
285 .map_page = xen_grant_dma_map_page,
286 .unmap_page = xen_grant_dma_unmap_page,
287 .map_sg = xen_grant_dma_map_sg,
288 .unmap_sg = xen_grant_dma_unmap_sg,
289 .dma_supported = xen_grant_dma_supported,
292 static bool xen_is_dt_grant_dma_device(struct device *dev)
294 struct device_node *iommu_np;
297 iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
298 has_iommu = iommu_np &&
299 of_device_is_compatible(iommu_np, "xen,grant-dma");
300 of_node_put(iommu_np);
305 bool xen_is_grant_dma_device(struct device *dev)
307 /* XXX Handle only DT devices for now */
309 return xen_is_dt_grant_dma_device(dev);
314 bool xen_virtio_mem_acc(struct virtio_device *dev)
316 if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
319 return xen_is_grant_dma_device(dev->dev.parent);
322 static int xen_dt_grant_init_backend_domid(struct device *dev,
323 struct xen_grant_dma_data *data)
325 struct of_phandle_args iommu_spec;
327 if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
329 dev_err(dev, "Cannot parse iommus property\n");
333 if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
334 iommu_spec.args_count != 1) {
335 dev_err(dev, "Incompatible IOMMU node\n");
336 of_node_put(iommu_spec.np);
340 of_node_put(iommu_spec.np);
343 * The endpoint ID here means the ID of the domain where the
344 * corresponding backend is running
346 data->backend_domid = iommu_spec.args[0];
351 void xen_grant_setup_dma_ops(struct device *dev)
353 struct xen_grant_dma_data *data;
355 data = find_xen_grant_dma_data(dev);
357 dev_err(dev, "Xen grant DMA data is already created\n");
361 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
366 if (xen_dt_grant_init_backend_domid(dev, data))
368 } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
369 dev_info(dev, "Using dom0 as backend\n");
370 data->backend_domid = 0;
372 /* XXX ACPI device unsupported for now */
376 if (store_xen_grant_dma_data(dev, data)) {
377 dev_err(dev, "Cannot store Xen grant DMA data\n");
381 dev->dma_ops = &xen_grant_dma_ops;
386 devm_kfree(dev, data);
387 dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
390 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
392 bool ret = xen_virtio_mem_acc(dev);
395 xen_grant_setup_dma_ops(dev->dev.parent);
400 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
401 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
402 MODULE_LICENSE("GPL");