1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/radix-tree.h>
4 #include <linux/device.h>
5 #include <linux/types.h>
6 #include <linux/pfn_t.h>
8 #include <linux/kasan.h>
10 #include <linux/memory_hotplug.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/wait_bit.h>
15 static DEFINE_MUTEX(pgmap_lock);
16 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
17 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
18 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
20 static unsigned long order_at(struct resource *res, unsigned long pgoff)
22 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
23 unsigned long nr_pages, mask;
25 nr_pages = PHYS_PFN(resource_size(res));
26 if (nr_pages == pgoff)
30 * What is the largest aligned power-of-2 range available from
31 * this resource pgoff to the end of the resource range,
32 * considering the alignment of the current pgoff?
34 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
38 return find_first_bit(&mask, BITS_PER_LONG);
41 #define foreach_order_pgoff(res, order, pgoff) \
42 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
43 pgoff += 1UL << order, order = order_at((res), pgoff))
45 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
46 vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
52 struct page *page = device_private_entry_to_page(entry);
55 * The page_fault() callback must migrate page back to system memory
56 * so that CPU can access it. This might fail for various reasons
57 * (device issue, device was unsafely unplugged, ...). When such
58 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
60 * Note that because memory cgroup charges are accounted to the device
61 * memory, this should never fail because of memory restrictions (but
62 * allocation of regular system page might still fail because we are
65 * There is a more in-depth description of what that callback can and
66 * cannot do, in include/linux/memremap.h
68 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
70 EXPORT_SYMBOL(device_private_entry_fault);
71 #endif /* CONFIG_DEVICE_PRIVATE */
73 static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
75 unsigned long pgoff, order;
77 mutex_lock(&pgmap_lock);
78 foreach_order_pgoff(res, order, pgoff) {
79 if (pgoff >= end_pgoff)
81 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
83 mutex_unlock(&pgmap_lock);
88 static unsigned long pfn_first(struct dev_pagemap *pgmap)
90 const struct resource *res = &pgmap->res;
91 struct vmem_altmap *altmap = &pgmap->altmap;
94 pfn = res->start >> PAGE_SHIFT;
95 if (pgmap->altmap_valid)
96 pfn += vmem_altmap_offset(altmap);
100 static unsigned long pfn_end(struct dev_pagemap *pgmap)
102 const struct resource *res = &pgmap->res;
104 return (res->start + resource_size(res)) >> PAGE_SHIFT;
107 static unsigned long pfn_next(unsigned long pfn)
114 #define for_each_device_pfn(pfn, map) \
115 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
117 static void devm_memremap_pages_release(void *data)
119 struct dev_pagemap *pgmap = data;
120 struct device *dev = pgmap->dev;
121 struct resource *res = &pgmap->res;
122 resource_size_t align_start, align_size;
125 pgmap->kill(pgmap->ref);
126 for_each_device_pfn(pfn, pgmap)
127 put_page(pfn_to_page(pfn));
129 /* pages are dead and unused, undo the arch mapping */
130 align_start = res->start & ~(SECTION_SIZE - 1);
131 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
135 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
136 pfn = align_start >> PAGE_SHIFT;
137 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
138 align_size >> PAGE_SHIFT, NULL);
140 arch_remove_memory(align_start, align_size,
141 pgmap->altmap_valid ? &pgmap->altmap : NULL);
142 kasan_remove_zero_shadow(__va(align_start), align_size);
146 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
147 pgmap_radix_release(res, -1);
148 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
149 "%s: failed to free all reserved pages\n", __func__);
153 * devm_memremap_pages - remap and provide memmap backing for the given resource
154 * @dev: hosting device for @res
155 * @pgmap: pointer to a struct dev_pagemap
158 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
159 * by the caller before passing it to this function
161 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
162 * must be set to true
164 * 3/ pgmap->ref must be 'live' on entry and will be killed at
165 * devm_memremap_pages_release() time, or if this routine fails.
167 * 4/ res is expected to be a host memory range that could feasibly be
168 * treated as a "System RAM" range, i.e. not a device mmio range, but
169 * this is not enforced.
171 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
173 resource_size_t align_start, align_size, align_end;
174 struct vmem_altmap *altmap = pgmap->altmap_valid ?
175 &pgmap->altmap : NULL;
176 struct resource *res = &pgmap->res;
177 unsigned long pfn, pgoff, order;
178 pgprot_t pgprot = PAGE_KERNEL;
179 int error, nid, is_ram;
180 struct dev_pagemap *conflict_pgmap;
182 if (!pgmap->ref || !pgmap->kill)
183 return ERR_PTR(-EINVAL);
185 align_start = res->start & ~(SECTION_SIZE - 1);
186 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
188 align_end = align_start + align_size - 1;
190 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
191 if (conflict_pgmap) {
192 dev_WARN(dev, "Conflicting mapping in same section\n");
193 put_dev_pagemap(conflict_pgmap);
194 return ERR_PTR(-ENOMEM);
197 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
198 if (conflict_pgmap) {
199 dev_WARN(dev, "Conflicting mapping in same section\n");
200 put_dev_pagemap(conflict_pgmap);
201 return ERR_PTR(-ENOMEM);
204 is_ram = region_intersects(align_start, align_size,
205 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
207 if (is_ram != REGION_DISJOINT) {
208 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
209 is_ram == REGION_MIXED ? "mixed" : "ram", res);
216 mutex_lock(&pgmap_lock);
219 foreach_order_pgoff(res, order, pgoff) {
220 error = __radix_tree_insert(&pgmap_radix,
221 PHYS_PFN(res->start) + pgoff, order, pgmap);
223 dev_err(dev, "%s: failed: %d\n", __func__, error);
227 mutex_unlock(&pgmap_lock);
231 nid = dev_to_node(dev);
235 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
243 * For device private memory we call add_pages() as we only need to
244 * allocate and initialize struct page for the device memory. More-
245 * over the device memory is un-accessible thus we do not want to
246 * create a linear mapping for the memory like arch_add_memory()
249 * For all other device memory types, which are accessible by
250 * the CPU, we do want the linear mapping and thus use
253 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
254 error = add_pages(nid, align_start >> PAGE_SHIFT,
255 align_size >> PAGE_SHIFT, NULL, false);
257 error = kasan_add_zero_shadow(__va(align_start), align_size);
263 error = arch_add_memory(nid, align_start, align_size, altmap,
270 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
271 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
272 align_size >> PAGE_SHIFT, altmap);
279 for_each_device_pfn(pfn, pgmap) {
280 struct page *page = pfn_to_page(pfn);
283 * ZONE_DEVICE pages union ->lru with a ->pgmap back
284 * pointer. It is a bug if a ZONE_DEVICE page is ever
285 * freed or placed on a driver-private list. Seed the
286 * storage with LIST_POISON* values.
288 list_del(&page->lru);
290 percpu_ref_get(pgmap->ref);
293 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
296 return ERR_PTR(error);
298 return __va(res->start);
301 kasan_remove_zero_shadow(__va(align_start), align_size);
303 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
306 pgmap_radix_release(res, pgoff);
308 pgmap->kill(pgmap->ref);
309 return ERR_PTR(error);
311 EXPORT_SYMBOL_GPL(devm_memremap_pages);
313 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
315 /* number of pfns from base where pfn_to_page() is valid */
316 return altmap->reserve + altmap->free;
319 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
321 altmap->alloc -= nr_pfns;
325 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
326 * @pfn: page frame number to lookup page_map
327 * @pgmap: optional known pgmap that already has a reference
329 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
330 * is non-NULL but does not cover @pfn the reference to it will be released.
332 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
333 struct dev_pagemap *pgmap)
335 resource_size_t phys = PFN_PHYS(pfn);
338 * In the cached case we're already holding a live reference.
341 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
343 put_dev_pagemap(pgmap);
346 /* fall back to slow path lookup */
348 pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
349 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
355 EXPORT_SYMBOL_GPL(get_dev_pagemap);
357 #ifdef CONFIG_DEV_PAGEMAP_OPS
358 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
359 EXPORT_SYMBOL(devmap_managed_key);
360 static atomic_t devmap_enable;
363 * Toggle the static key for ->page_free() callbacks when dev_pagemap
366 void dev_pagemap_get_ops(void)
368 if (atomic_inc_return(&devmap_enable) == 1)
369 static_branch_enable(&devmap_managed_key);
371 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
373 void dev_pagemap_put_ops(void)
375 if (atomic_dec_and_test(&devmap_enable))
376 static_branch_disable(&devmap_managed_key);
378 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
380 void __put_devmap_managed_page(struct page *page)
382 int count = page_ref_dec_return(page);
385 * If refcount is 1 then page is freed and refcount is stable as nobody
386 * holds a reference on the page.
389 /* Clear Active bit in case of parallel mark_page_accessed */
390 __ClearPageActive(page);
391 __ClearPageWaiters(page);
393 mem_cgroup_uncharge(page);
395 page->pgmap->page_free(page, page->pgmap->data);
399 EXPORT_SYMBOL(__put_devmap_managed_page);
400 #endif /* CONFIG_DEV_PAGEMAP_OPS */