2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/set_memory.h>
22 static DEFINE_MUTEX(vmem_mutex);
24 struct memory_segment {
25 struct list_head list;
30 static LIST_HEAD(mem_segs);
32 static void __ref *vmem_alloc_pages(unsigned int order)
34 unsigned long size = PAGE_SIZE << order;
36 if (slab_is_available())
37 return (void *)__get_free_pages(GFP_KERNEL, order);
38 return (void *) memblock_alloc(size, size);
41 static inline p4d_t *vmem_p4d_alloc(void)
45 p4d = vmem_alloc_pages(2);
48 clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4);
52 static inline pud_t *vmem_pud_alloc(void)
56 pud = vmem_alloc_pages(2);
59 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
63 pmd_t *vmem_pmd_alloc(void)
67 pmd = vmem_alloc_pages(2);
70 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
74 pte_t __ref *vmem_pte_alloc(void)
76 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
79 if (slab_is_available())
80 pte = (pte_t *) page_table_alloc(&init_mm);
82 pte = (pte_t *) memblock_alloc(size, size);
85 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
90 * Add a physical memory range to the 1:1 mapping.
92 static int vmem_add_mem(unsigned long start, unsigned long size)
94 unsigned long pgt_prot, sgt_prot, r3_prot;
95 unsigned long pages4k, pages1m, pages2g;
96 unsigned long end = start + size;
97 unsigned long address = start;
105 pgt_prot = pgprot_val(PAGE_KERNEL);
106 sgt_prot = pgprot_val(SEGMENT_KERNEL);
107 r3_prot = pgprot_val(REGION3_KERNEL);
108 if (!MACHINE_HAS_NX) {
109 pgt_prot &= ~_PAGE_NOEXEC;
110 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
111 r3_prot &= ~_REGION_ENTRY_NOEXEC;
113 pages4k = pages1m = pages2g = 0;
114 while (address < end) {
115 pg_dir = pgd_offset_k(address);
116 if (pgd_none(*pg_dir)) {
117 p4_dir = vmem_p4d_alloc();
120 pgd_populate(&init_mm, pg_dir, p4_dir);
122 p4_dir = p4d_offset(pg_dir, address);
123 if (p4d_none(*p4_dir)) {
124 pu_dir = vmem_pud_alloc();
127 p4d_populate(&init_mm, p4_dir, pu_dir);
129 pu_dir = pud_offset(p4_dir, address);
130 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
131 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
132 !debug_pagealloc_enabled()) {
133 pud_val(*pu_dir) = address | r3_prot;
138 if (pud_none(*pu_dir)) {
139 pm_dir = vmem_pmd_alloc();
142 pud_populate(&init_mm, pu_dir, pm_dir);
144 pm_dir = pmd_offset(pu_dir, address);
145 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
146 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
147 !debug_pagealloc_enabled()) {
148 pmd_val(*pm_dir) = address | sgt_prot;
153 if (pmd_none(*pm_dir)) {
154 pt_dir = vmem_pte_alloc();
157 pmd_populate(&init_mm, pm_dir, pt_dir);
160 pt_dir = pte_offset_kernel(pm_dir, address);
161 pte_val(*pt_dir) = address | pgt_prot;
162 address += PAGE_SIZE;
167 update_page_count(PG_DIRECT_MAP_4K, pages4k);
168 update_page_count(PG_DIRECT_MAP_1M, pages1m);
169 update_page_count(PG_DIRECT_MAP_2G, pages2g);
174 * Remove a physical memory range from the 1:1 mapping.
175 * Currently only invalidates page table entries.
177 static void vmem_remove_range(unsigned long start, unsigned long size)
179 unsigned long pages4k, pages1m, pages2g;
180 unsigned long end = start + size;
181 unsigned long address = start;
188 pages4k = pages1m = pages2g = 0;
189 while (address < end) {
190 pg_dir = pgd_offset_k(address);
191 if (pgd_none(*pg_dir)) {
192 address += PGDIR_SIZE;
195 p4_dir = p4d_offset(pg_dir, address);
196 if (p4d_none(*p4_dir)) {
200 pu_dir = pud_offset(p4_dir, address);
201 if (pud_none(*pu_dir)) {
205 if (pud_large(*pu_dir)) {
211 pm_dir = pmd_offset(pu_dir, address);
212 if (pmd_none(*pm_dir)) {
216 if (pmd_large(*pm_dir)) {
222 pt_dir = pte_offset_kernel(pm_dir, address);
223 pte_clear(&init_mm, address, pt_dir);
224 address += PAGE_SIZE;
227 flush_tlb_kernel_range(start, end);
228 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
229 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
230 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
234 * Add a backed mem_map array to the virtual mem_map array.
236 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
238 unsigned long pgt_prot, sgt_prot;
239 unsigned long address = start;
247 pgt_prot = pgprot_val(PAGE_KERNEL);
248 sgt_prot = pgprot_val(SEGMENT_KERNEL);
249 if (!MACHINE_HAS_NX) {
250 pgt_prot &= ~_PAGE_NOEXEC;
251 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
253 for (address = start; address < end;) {
254 pg_dir = pgd_offset_k(address);
255 if (pgd_none(*pg_dir)) {
256 p4_dir = vmem_p4d_alloc();
259 pgd_populate(&init_mm, pg_dir, p4_dir);
262 p4_dir = p4d_offset(pg_dir, address);
263 if (p4d_none(*p4_dir)) {
264 pu_dir = vmem_pud_alloc();
267 p4d_populate(&init_mm, p4_dir, pu_dir);
270 pu_dir = pud_offset(p4_dir, address);
271 if (pud_none(*pu_dir)) {
272 pm_dir = vmem_pmd_alloc();
275 pud_populate(&init_mm, pu_dir, pm_dir);
278 pm_dir = pmd_offset(pu_dir, address);
279 if (pmd_none(*pm_dir)) {
280 /* Use 1MB frames for vmemmap if available. We always
281 * use large frames even if they are only partially
283 * Otherwise we would have also page tables since
284 * vmemmap_populate gets called for each section
286 if (MACHINE_HAS_EDAT1) {
289 new_page = vmemmap_alloc_block(PMD_SIZE, node);
292 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
293 address = (address + PMD_SIZE) & PMD_MASK;
296 pt_dir = vmem_pte_alloc();
299 pmd_populate(&init_mm, pm_dir, pt_dir);
300 } else if (pmd_large(*pm_dir)) {
301 address = (address + PMD_SIZE) & PMD_MASK;
305 pt_dir = pte_offset_kernel(pm_dir, address);
306 if (pte_none(*pt_dir)) {
309 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
312 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
314 address += PAGE_SIZE;
321 void vmemmap_free(unsigned long start, unsigned long end)
326 * Add memory segment to the segment list if it doesn't overlap with
327 * an already present segment.
329 static int insert_memory_segment(struct memory_segment *seg)
331 struct memory_segment *tmp;
333 if (seg->start + seg->size > VMEM_MAX_PHYS ||
334 seg->start + seg->size < seg->start)
337 list_for_each_entry(tmp, &mem_segs, list) {
338 if (seg->start >= tmp->start + tmp->size)
340 if (seg->start + seg->size <= tmp->start)
344 list_add(&seg->list, &mem_segs);
349 * Remove memory segment from the segment list.
351 static void remove_memory_segment(struct memory_segment *seg)
353 list_del(&seg->list);
356 static void __remove_shared_memory(struct memory_segment *seg)
358 remove_memory_segment(seg);
359 vmem_remove_range(seg->start, seg->size);
362 int vmem_remove_mapping(unsigned long start, unsigned long size)
364 struct memory_segment *seg;
367 mutex_lock(&vmem_mutex);
370 list_for_each_entry(seg, &mem_segs, list) {
371 if (seg->start == start && seg->size == size)
375 if (seg->start != start || seg->size != size)
379 __remove_shared_memory(seg);
382 mutex_unlock(&vmem_mutex);
386 int vmem_add_mapping(unsigned long start, unsigned long size)
388 struct memory_segment *seg;
391 mutex_lock(&vmem_mutex);
393 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
399 ret = insert_memory_segment(seg);
403 ret = vmem_add_mem(start, size);
409 __remove_shared_memory(seg);
413 mutex_unlock(&vmem_mutex);
418 * map whole physical memory to virtual memory (identity mapping)
419 * we reserve enough space in the vmalloc area for vmemmap to hotplug
420 * additional memory segments.
422 void __init vmem_map_init(void)
424 struct memblock_region *reg;
426 for_each_memblock(memory, reg)
427 vmem_add_mem(reg->base, reg->size);
428 __set_memory((unsigned long) _stext,
429 (_etext - _stext) >> PAGE_SHIFT,
430 SET_MEMORY_RO | SET_MEMORY_X);
431 __set_memory((unsigned long) _etext,
432 (_eshared - _etext) >> PAGE_SHIFT,
434 __set_memory((unsigned long) _sinittext,
435 (_einittext - _sinittext) >> PAGE_SHIFT,
436 SET_MEMORY_RO | SET_MEMORY_X);
437 pr_info("Write protected kernel read-only data: %luk\n",
438 (_eshared - _stext) >> 10);
442 * Convert memblock.memory to a memory segment list so there is a single
443 * list that contains all memory segments.
445 static int __init vmem_convert_memory_chunk(void)
447 struct memblock_region *reg;
448 struct memory_segment *seg;
450 mutex_lock(&vmem_mutex);
451 for_each_memblock(memory, reg) {
452 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
454 panic("Out of memory...\n");
455 seg->start = reg->base;
456 seg->size = reg->size;
457 insert_memory_segment(seg);
459 mutex_unlock(&vmem_mutex);
463 core_initcall(vmem_convert_memory_chunk);