1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
4 #include <linux/kernel.h>
6 #include <linux/memremap.h>
7 #include <linux/slab.h>
14 static DEFINE_MUTEX(list_lock);
15 static struct page *page_list;
16 static unsigned int list_count;
18 static int fill_list(unsigned int nr_pages)
20 struct dev_pagemap *pgmap;
23 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
26 res = kzalloc(sizeof(*res), GFP_KERNEL);
30 res->name = "Xen scratch";
31 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
33 ret = allocate_resource(&iomem_resource, res,
34 alloc_pages * PAGE_SIZE, 0, -1,
35 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
37 pr_err("Cannot allocate new IOMEM resource\n");
41 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
47 pgmap->type = MEMORY_DEVICE_GENERIC;
48 pgmap->range = (struct range) {
55 #ifdef CONFIG_XEN_HAVE_PVMMU
57 * memremap will build page tables for the new memory so
58 * the p2m must contain invalid entries so the correct
59 * non-present PTEs will be written.
61 * If a failure occurs, the original (identity) p2m entries
62 * are not restored since this region is now known not to
63 * conflict with any devices.
65 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
66 xen_pfn_t pfn = PFN_DOWN(res->start);
68 for (i = 0; i < alloc_pages; i++) {
69 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
70 pr_warn("set_phys_to_machine() failed, no memory added\n");
78 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
80 pr_err("Cannot remap memory range\n");
85 for (i = 0; i < alloc_pages; i++) {
86 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
88 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
89 pg->zone_device_data = page_list;
99 release_resource(res);
106 * xen_alloc_unpopulated_pages - alloc unpopulated pages
107 * @nr_pages: Number of pages
108 * @pages: pages returned
109 * @return 0 on success, error otherwise
111 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
116 mutex_lock(&list_lock);
117 if (list_count < nr_pages) {
118 ret = fill_list(nr_pages - list_count);
123 for (i = 0; i < nr_pages; i++) {
124 struct page *pg = page_list;
127 page_list = pg->zone_device_data;
131 #ifdef CONFIG_XEN_HAVE_PVMMU
132 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
133 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
137 for (j = 0; j <= i; j++) {
138 pages[j]->zone_device_data = page_list;
139 page_list = pages[j];
149 mutex_unlock(&list_lock);
152 EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
155 * xen_free_unpopulated_pages - return unpopulated pages
156 * @nr_pages: Number of pages
157 * @pages: pages to return
159 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
163 mutex_lock(&list_lock);
164 for (i = 0; i < nr_pages; i++) {
165 pages[i]->zone_device_data = page_list;
166 page_list = pages[i];
169 mutex_unlock(&list_lock);
171 EXPORT_SYMBOL(xen_free_unpopulated_pages);
174 static int __init init(void)
181 if (!xen_pv_domain())
185 * Initialize with pages from the extra memory regions (see
186 * arch/x86/xen/setup.c).
188 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
191 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
193 pfn_to_page(xen_extra_mem[i].start_pfn + j);
195 pg->zone_device_data = page_list;
203 subsys_initcall(init);