use the HighMem zone if it exists, and the Normal
zone if it does not.
+ movablecore=nn[KMG] [KNL,IA-32,IA-64,PPC,X86-64] This parameter
+ is similar to kernelcore except it specifies the
+ amount of memory used for migratable allocations.
+ If both kernelcore and movablecore is specified,
+ then kernelcore will be at *least* the specified
+ value but may be more. If movablecore on its own
+ is specified, the administrator must be careful
+ that the amount of memory usable for all allocations
+ is not too small.
+
keepinitrd [HW,ARM]
kstack=N [IA-32,X86-64] Print N words from the kernel stack
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
unsigned long __initdata required_kernelcore;
+ unsigned long __initdata required_movablecore;
unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
return max_pfn;
}
+unsigned long __init early_calculate_totalpages(void)
+{
+ int i;
+ unsigned long totalpages = 0;
+
+ for (i = 0; i < nr_nodemap_entries; i++)
+ totalpages += early_node_map[i].end_pfn -
+ early_node_map[i].start_pfn;
+
+ return totalpages;
+}
+
/*
* Find the PFN the Movable zone begins in each node. Kernel memory
* is spread evenly between nodes as long as the nodes have enough
unsigned long kernelcore_node, kernelcore_remaining;
int usable_nodes = num_online_nodes();
+ /*
+ * If movablecore was specified, calculate what size of
+ * kernelcore that corresponds so that memory usable for
+ * any allocation type is evenly spread. If both kernelcore
+ * and movablecore are specified, then the value of kernelcore
+ * will be used for required_kernelcore if it's greater than
+ * what movablecore would have allowed.
+ */
+ if (required_movablecore) {
+ unsigned long totalpages = early_calculate_totalpages();
+ unsigned long corepages;
+
+ /*
+ * Round-up so that ZONE_MOVABLE is at least as large as what
+ * was requested by the user
+ */
+ required_movablecore =
+ roundup(required_movablecore, MAX_ORDER_NR_PAGES);
+ corepages = totalpages - required_movablecore;
+
+ required_kernelcore = max(required_kernelcore, corepages);
+ }
+
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
return;
}
}
-/*
- * kernelcore=size sets the amount of memory for use for allocations that
- * cannot be reclaimed or migrated.
- */
-static int __init cmdline_parse_kernelcore(char *p)
+static int __init cmdline_parse_core(char *p, unsigned long *core)
{
unsigned long long coremem;
if (!p)
return -EINVAL;
coremem = memparse(p, &p);
- required_kernelcore = coremem >> PAGE_SHIFT;
+ *core = coremem >> PAGE_SHIFT;
- /* Paranoid check that UL is enough for required_kernelcore */
+ /* Paranoid check that UL is enough for the coremem value */
WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
return 0;
}
+/*
+ * kernelcore=size sets the amount of memory for use for allocations that
+ * cannot be reclaimed or migrated.
+ */
+static int __init cmdline_parse_kernelcore(char *p)
+{
+ return cmdline_parse_core(p, &required_kernelcore);
+}
+
+/*
+ * movablecore=size sets the amount of memory for use for allocations that
+ * can be reclaimed or migrated.
+ */
+static int __init cmdline_parse_movablecore(char *p)
+{
+ return cmdline_parse_core(p, &required_movablecore);
+}
+
early_param("kernelcore", cmdline_parse_kernelcore);
+early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */