mm, devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support
authorDan Williams <dan.j.williams@intel.com>
Fri, 28 Dec 2018 08:35:01 +0000 (00:35 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 20:11:47 +0000 (12:11 -0800)
In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

[jglisse@redhat.com: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
Link: http://lkml.kernel.org/r/154275559036.76910.12434636179931292607.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Reported-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/memremap.c

index 5e45f0c..3eef989 100644 (file)
@@ -98,9 +98,15 @@ static void devm_memremap_pages_release(void *data)
                - align_start;
 
        mem_hotplug_begin();
-       arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
-                       &pgmap->altmap : NULL);
-       kasan_remove_zero_shadow(__va(align_start), align_size);
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               pfn = align_start >> PAGE_SHIFT;
+               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+                               align_size >> PAGE_SHIFT, NULL);
+       } else {
+               arch_remove_memory(align_start, align_size,
+                               pgmap->altmap_valid ? &pgmap->altmap : NULL);
+               kasan_remove_zero_shadow(__va(align_start), align_size);
+       }
        mem_hotplug_done();
 
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -187,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
                goto err_pfn_remap;
 
        mem_hotplug_begin();
-       error = kasan_add_zero_shadow(__va(align_start), align_size);
-       if (error) {
-               mem_hotplug_done();
-               goto err_kasan;
+
+       /*
+        * For device private memory we call add_pages() as we only need to
+        * allocate and initialize struct page for the device memory. More-
+        * over the device memory is un-accessible thus we do not want to
+        * create a linear mapping for the memory like arch_add_memory()
+        * would do.
+        *
+        * For all other device memory types, which are accessible by
+        * the CPU, we do want the linear mapping and thus use
+        * arch_add_memory().
+        */
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               error = add_pages(nid, align_start >> PAGE_SHIFT,
+                               align_size >> PAGE_SHIFT, NULL, false);
+       } else {
+               error = kasan_add_zero_shadow(__va(align_start), align_size);
+               if (error) {
+                       mem_hotplug_done();
+                       goto err_kasan;
+               }
+
+               error = arch_add_memory(nid, align_start, align_size, altmap,
+                               false);
+       }
+
+       if (!error) {
+               struct zone *zone;
+
+               zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+               move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
+                               align_size >> PAGE_SHIFT, altmap);
        }
 
-       error = arch_add_memory(nid, align_start, align_size, altmap, false);
-       if (!error)
-               move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-                                       align_start >> PAGE_SHIFT,
-                                       align_size >> PAGE_SHIFT, altmap);
        mem_hotplug_done();
        if (error)
                goto err_add_memory;