page_alloc: add movable_memmap kernel parameter
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page_alloc.c
index d1107ad..aa1cc5f 100644 (file)
@@ -202,6 +202,9 @@ static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+/* Movable memory ranges, will also be used by memblock subsystem. */
+struct movablemem_map movablemem_map;
+
 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 static unsigned long __initdata required_kernelcore;
@@ -5078,6 +5081,134 @@ static int __init cmdline_parse_movablecore(char *p)
 early_param("kernelcore", cmdline_parse_kernelcore);
 early_param("movablecore", cmdline_parse_movablecore);
 
+/**
+ * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
+ * @start_pfn: start pfn of the range
+ * @end_pfn:   end pfn of the range
+ *
+ * This function will also merge the overlapped ranges, and sort the array
+ * by start_pfn in monotonic increasing order.
+ */
+static void __init insert_movablemem_map(unsigned long start_pfn,
+                                         unsigned long end_pfn)
+{
+       int pos, overlap;
+
+       /*
+        * pos will be at the 1st overlapped range, or the position
+        * where the element should be inserted.
+        */
+       for (pos = 0; pos < movablemem_map.nr_map; pos++)
+               if (start_pfn <= movablemem_map.map[pos].end_pfn)
+                       break;
+
+       /* If there is no overlapped range, just insert the element. */
+       if (pos == movablemem_map.nr_map ||
+           end_pfn < movablemem_map.map[pos].start_pfn) {
+               /*
+                * If pos is not the end of array, we need to move all
+                * the rest elements backward.
+                */
+               if (pos < movablemem_map.nr_map)
+                       memmove(&movablemem_map.map[pos+1],
+                               &movablemem_map.map[pos],
+                               sizeof(struct movablemem_entry) *
+                               (movablemem_map.nr_map - pos));
+               movablemem_map.map[pos].start_pfn = start_pfn;
+               movablemem_map.map[pos].end_pfn = end_pfn;
+               movablemem_map.nr_map++;
+               return;
+       }
+
+       /* overlap will be at the last overlapped range */
+       for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
+               if (end_pfn < movablemem_map.map[overlap].start_pfn)
+                       break;
+
+       /*
+        * If there are more ranges overlapped, we need to merge them,
+        * and move the rest elements forward.
+        */
+       overlap--;
+       movablemem_map.map[pos].start_pfn = min(start_pfn,
+                                       movablemem_map.map[pos].start_pfn);
+       movablemem_map.map[pos].end_pfn = max(end_pfn,
+                                       movablemem_map.map[overlap].end_pfn);
+
+       if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
+               memmove(&movablemem_map.map[pos+1],
+                       &movablemem_map.map[overlap+1],
+                       sizeof(struct movablemem_entry) *
+                       (movablemem_map.nr_map - overlap - 1));
+
+       movablemem_map.nr_map -= overlap - pos;
+}
+
+/**
+ * movablemem_map_add_region - Add a memory range into movablemem_map.
+ * @start:     physical start address of range
+ * @end:       physical end address of range
+ *
+ * This function transform the physical address into pfn, and then add the
+ * range into movablemem_map by calling insert_movablemem_map().
+ */
+static void __init movablemem_map_add_region(u64 start, u64 size)
+{
+       unsigned long start_pfn, end_pfn;
+
+       /* In case size == 0 or start + size overflows */
+       if (start + size <= start)
+               return;
+
+       if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
+               pr_err("movablemem_map: too many entries;"
+                       " ignoring [mem %#010llx-%#010llx]\n",
+                       (unsigned long long) start,
+                       (unsigned long long) (start + size - 1));
+               return;
+       }
+
+       start_pfn = PFN_DOWN(start);
+       end_pfn = PFN_UP(start + size);
+       insert_movablemem_map(start_pfn, end_pfn);
+}
+
+/*
+ * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
+ * @p: The boot option of the following format:
+ *     movablemem_map=nn[KMG]@ss[KMG]
+ *
+ * This option sets the memory range [ss, ss+nn) to be used as movable memory.
+ *
+ * Return: 0 on success or -EINVAL on failure.
+ */
+static int __init cmdline_parse_movablemem_map(char *p)
+{
+       char *oldp;
+       u64 start_at, mem_size;
+
+       if (!p)
+               goto err;
+
+       oldp = p;
+       mem_size = memparse(p, &p);
+       if (p == oldp)
+               goto err;
+
+       if (*p == '@') {
+               oldp = ++p;
+               start_at = memparse(p, &p);
+               if (p == oldp || *p != '\0')
+                       goto err;
+
+               movablemem_map_add_region(start_at, mem_size);
+               return 0;
+       }
+err:
+       return -EINVAL;
+}
+early_param("movablemem_map", cmdline_parse_movablemem_map);
+
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 /**
@@ -5247,13 +5378,10 @@ static void __setup_per_zone_wmarks(void)
                         * deltas controls asynch page reclaim, and so should
                         * not be capped for highmem.
                         */
-                       int min_pages;
+                       unsigned long min_pages;
 
                        min_pages = zone->present_pages / 1024;
-                       if (min_pages < SWAP_CLUSTER_MAX)
-                               min_pages = SWAP_CLUSTER_MAX;
-                       if (min_pages > 128)
-                               min_pages = 128;
+                       min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
                        zone->watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
@@ -5806,9 +5934,11 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                                    0, false, MIGRATE_SYNC,
                                    MR_CMA);
        }
-
-       putback_movable_pages(&cc->migratepages);
-       return ret > 0 ? 0 : ret;
+       if (ret < 0) {
+               putback_movable_pages(&cc->migratepages);
+               return ret;
+       }
+       return 0;
 }
 
 /**