xen_remap() is used to establish mappings for frames not under direct
control of the kernel: for Xenstore and console ring pages, and for
grant pages of non-PV guests.
Today xen_remap() is defined to use ioremap() on x86 (doing uncached
mappings), and ioremap_cache() on Arm (doing cached mappings).
Uncached mappings for those use cases are bad for performance, so they
should be avoided if possible. As all use cases of xen_remap() don't
require uncached mappings (the mapped area is always physical RAM),
a mapping using the standard WB cache mode is fine.
As sparse is flagging some of the xen_remap() use cases to be not
appropriate for iomem(), as the result is not annotated with the
__iomem modifier, eliminate xen_remap() completely and replace all
use cases with memremap() specifying the MEMREMAP_WB caching mode.
xen_unmap() can be replaced with memunmap().
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Stefano Stabellini <sstabellini@kernel.org>
Link: https://lore.kernel.org/r/20220530082634.6339-1-jgross@suse.com
Signed-off-by: Juergen Gross <jgross@suse.com>
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
-#define xen_remap(cookie, size) ioremap((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
static inline bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr)
if (r < 0 || v == 0)
goto err;
gfn = v;
- info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
+ info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
if (xen_auto_xlat_grant_frames.count)
return -EINVAL;
- vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
+ vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
if (vaddr == NULL) {
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
&addr);
}
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
if (!pfn) {
- xen_unmap(vaddr);
+ memunmap(vaddr);
return -ENOMEM;
}
for (i = 0; i < max_nr_gframes; i++)
if (!xen_auto_xlat_grant_frames.count)
return;
kfree(xen_auto_xlat_grant_frames.pfn);
- xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+ memunmap(xen_auto_xlat_grant_frames.vaddr);
xen_auto_xlat_grant_frames.pfn = NULL;
xen_auto_xlat_grant_frames.count = 0;
xenstored_ready = 1;
if (!xen_store_interface) {
- xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
/*
* Now it is safe to free the IRQ used for xenstore late
* initialization. No need to unbind: it is about to be
#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true;
}
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys,
dma_addr_t dev_addr);