x86, mm: setup page table in top-down
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / x86 / mm / init_32.c
1 /*
2  *
3  *  Copyright (C) 1995  Linus Torvalds
4  *
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  */
7
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/memblock.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33 #include <linux/gfp.h>
34
35 #include <asm/asm.h>
36 #include <asm/bios_ebda.h>
37 #include <asm/processor.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/dma.h>
41 #include <asm/fixmap.h>
42 #include <asm/e820.h>
43 #include <asm/apic.h>
44 #include <asm/bugs.h>
45 #include <asm/tlb.h>
46 #include <asm/tlbflush.h>
47 #include <asm/olpc_ofw.h>
48 #include <asm/pgalloc.h>
49 #include <asm/sections.h>
50 #include <asm/paravirt.h>
51 #include <asm/setup.h>
52 #include <asm/cacheflush.h>
53 #include <asm/page_types.h>
54 #include <asm/init.h>
55
56 unsigned long highstart_pfn, highend_pfn;
57
58 static noinline int do_test_wp_bit(void);
59
60 bool __read_mostly __vmalloc_start_set = false;
61
62 static __init void *alloc_low_page(void)
63 {
64         unsigned long pfn;
65         void *adr;
66
67         if ((pgt_buf_end + 1) >= pgt_buf_top) {
68                 unsigned long ret;
69                 if (min_pfn_mapped >= max_pfn_mapped)
70                         panic("alloc_low_page: ran out of memory");
71                 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
72                                         max_pfn_mapped << PAGE_SHIFT,
73                                         PAGE_SIZE, PAGE_SIZE);
74                 if (!ret)
75                         panic("alloc_low_page: can not alloc memory");
76                 memblock_reserve(ret, PAGE_SIZE);
77                 pfn = ret >> PAGE_SHIFT;
78         } else
79                 pfn = pgt_buf_end++;
80
81         adr = __va(pfn * PAGE_SIZE);
82         clear_page(adr);
83         return adr;
84 }
85
86 /*
87  * Creates a middle page table and puts a pointer to it in the
88  * given global directory entry. This only returns the gd entry
89  * in non-PAE compilation mode, since the middle layer is folded.
90  */
91 static pmd_t * __init one_md_table_init(pgd_t *pgd)
92 {
93         pud_t *pud;
94         pmd_t *pmd_table;
95
96 #ifdef CONFIG_X86_PAE
97         if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
98                 if (after_bootmem)
99                         pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
100                 else
101                         pmd_table = (pmd_t *)alloc_low_page();
102                 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
103                 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
104                 pud = pud_offset(pgd, 0);
105                 BUG_ON(pmd_table != pmd_offset(pud, 0));
106
107                 return pmd_table;
108         }
109 #endif
110         pud = pud_offset(pgd, 0);
111         pmd_table = pmd_offset(pud, 0);
112
113         return pmd_table;
114 }
115
116 /*
117  * Create a page table and place a pointer to it in a middle page
118  * directory entry:
119  */
120 static pte_t * __init one_page_table_init(pmd_t *pmd)
121 {
122         if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
123                 pte_t *page_table = NULL;
124
125                 if (after_bootmem) {
126 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
127                         page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
128 #endif
129                         if (!page_table)
130                                 page_table =
131                                 (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
132                 } else
133                         page_table = (pte_t *)alloc_low_page();
134
135                 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
136                 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
137                 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
138         }
139
140         return pte_offset_kernel(pmd, 0);
141 }
142
143 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
144 {
145         int pgd_idx = pgd_index(vaddr);
146         int pmd_idx = pmd_index(vaddr);
147
148         return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
149 }
150
151 pte_t * __init populate_extra_pte(unsigned long vaddr)
152 {
153         int pte_idx = pte_index(vaddr);
154         pmd_t *pmd;
155
156         pmd = populate_extra_pmd(vaddr);
157         return one_page_table_init(pmd) + pte_idx;
158 }
159
160 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
161                                            unsigned long vaddr, pte_t *lastpte)
162 {
163 #ifdef CONFIG_HIGHMEM
164         /*
165          * Something (early fixmap) may already have put a pte
166          * page here, which causes the page table allocation
167          * to become nonlinear. Attempt to fix it, and if it
168          * is still nonlinear then we have to bug.
169          */
170         int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
171         int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
172
173         if (pmd_idx_kmap_begin != pmd_idx_kmap_end
174             && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
175             && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
176             && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
177                 || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
178                 pte_t *newpte;
179                 int i;
180
181                 BUG_ON(after_bootmem);
182                 newpte = alloc_low_page();
183                 for (i = 0; i < PTRS_PER_PTE; i++)
184                         set_pte(newpte + i, pte[i]);
185
186                 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
187                 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
188                 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
189                 __flush_tlb_all();
190
191                 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
192                 pte = newpte;
193         }
194         BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
195                && vaddr > fix_to_virt(FIX_KMAP_END)
196                && lastpte && lastpte + PTRS_PER_PTE != pte);
197 #endif
198         return pte;
199 }
200
201 /*
202  * This function initializes a certain range of kernel virtual memory
203  * with new bootmem page tables, everywhere page tables are missing in
204  * the given range.
205  *
206  * NOTE: The pagetables are allocated contiguous on the physical space
207  * so we can cache the place of the first one and move around without
208  * checking the pgd every time.
209  */
210 static void __init
211 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
212 {
213         int pgd_idx, pmd_idx;
214         unsigned long vaddr;
215         pgd_t *pgd;
216         pmd_t *pmd;
217         pte_t *pte = NULL;
218
219         vaddr = start;
220         pgd_idx = pgd_index(vaddr);
221         pmd_idx = pmd_index(vaddr);
222         pgd = pgd_base + pgd_idx;
223
224         for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
225                 pmd = one_md_table_init(pgd);
226                 pmd = pmd + pmd_index(vaddr);
227                 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
228                                                         pmd++, pmd_idx++) {
229                         pte = page_table_kmap_check(one_page_table_init(pmd),
230                                                     pmd, vaddr, pte);
231
232                         vaddr += PMD_SIZE;
233                 }
234                 pmd_idx = 0;
235         }
236 }
237
238 static inline int is_kernel_text(unsigned long addr)
239 {
240         if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
241                 return 1;
242         return 0;
243 }
244
245 /*
246  * This maps the physical memory to kernel virtual address space, a total
247  * of max_low_pfn pages, by creating page tables starting from address
248  * PAGE_OFFSET:
249  */
250 unsigned long __init
251 kernel_physical_mapping_init(unsigned long start,
252                              unsigned long end,
253                              unsigned long page_size_mask)
254 {
255         int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
256         unsigned long last_map_addr = end;
257         unsigned long start_pfn, end_pfn;
258         pgd_t *pgd_base = swapper_pg_dir;
259         int pgd_idx, pmd_idx, pte_ofs;
260         unsigned long pfn;
261         pgd_t *pgd;
262         pmd_t *pmd;
263         pte_t *pte;
264         unsigned pages_2m, pages_4k;
265         int mapping_iter;
266
267         start_pfn = start >> PAGE_SHIFT;
268         end_pfn = end >> PAGE_SHIFT;
269
270         /*
271          * First iteration will setup identity mapping using large/small pages
272          * based on use_pse, with other attributes same as set by
273          * the early code in head_32.S
274          *
275          * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
276          * as desired for the kernel identity mapping.
277          *
278          * This two pass mechanism conforms to the TLB app note which says:
279          *
280          *     "Software should not write to a paging-structure entry in a way
281          *      that would change, for any linear address, both the page size
282          *      and either the page frame or attributes."
283          */
284         mapping_iter = 1;
285
286         if (!cpu_has_pse)
287                 use_pse = 0;
288
289 repeat:
290         pages_2m = pages_4k = 0;
291         pfn = start_pfn;
292         pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
293         pgd = pgd_base + pgd_idx;
294         for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
295                 pmd = one_md_table_init(pgd);
296
297                 if (pfn >= end_pfn)
298                         continue;
299 #ifdef CONFIG_X86_PAE
300                 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
301                 pmd += pmd_idx;
302 #else
303                 pmd_idx = 0;
304 #endif
305                 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
306                      pmd++, pmd_idx++) {
307                         unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
308
309                         /*
310                          * Map with big pages if possible, otherwise
311                          * create normal page tables:
312                          */
313                         if (use_pse) {
314                                 unsigned int addr2;
315                                 pgprot_t prot = PAGE_KERNEL_LARGE;
316                                 /*
317                                  * first pass will use the same initial
318                                  * identity mapping attribute + _PAGE_PSE.
319                                  */
320                                 pgprot_t init_prot =
321                                         __pgprot(PTE_IDENT_ATTR |
322                                                  _PAGE_PSE);
323
324                                 pfn &= PMD_MASK >> PAGE_SHIFT;
325                                 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
326                                         PAGE_OFFSET + PAGE_SIZE-1;
327
328                                 if (is_kernel_text(addr) ||
329                                     is_kernel_text(addr2))
330                                         prot = PAGE_KERNEL_LARGE_EXEC;
331
332                                 pages_2m++;
333                                 if (mapping_iter == 1)
334                                         set_pmd(pmd, pfn_pmd(pfn, init_prot));
335                                 else
336                                         set_pmd(pmd, pfn_pmd(pfn, prot));
337
338                                 pfn += PTRS_PER_PTE;
339                                 continue;
340                         }
341                         pte = one_page_table_init(pmd);
342
343                         pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
344                         pte += pte_ofs;
345                         for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
346                              pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
347                                 pgprot_t prot = PAGE_KERNEL;
348                                 /*
349                                  * first pass will use the same initial
350                                  * identity mapping attribute.
351                                  */
352                                 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
353
354                                 if (is_kernel_text(addr))
355                                         prot = PAGE_KERNEL_EXEC;
356
357                                 pages_4k++;
358                                 if (mapping_iter == 1) {
359                                         set_pte(pte, pfn_pte(pfn, init_prot));
360                                         last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
361                                 } else
362                                         set_pte(pte, pfn_pte(pfn, prot));
363                         }
364                 }
365         }
366         if (mapping_iter == 1) {
367                 /*
368                  * update direct mapping page count only in the first
369                  * iteration.
370                  */
371                 update_page_count(PG_LEVEL_2M, pages_2m);
372                 update_page_count(PG_LEVEL_4K, pages_4k);
373
374                 /*
375                  * local global flush tlb, which will flush the previous
376                  * mappings present in both small and large page TLB's.
377                  */
378                 __flush_tlb_all();
379
380                 /*
381                  * Second iteration will set the actual desired PTE attributes.
382                  */
383                 mapping_iter = 2;
384                 goto repeat;
385         }
386         return last_map_addr;
387 }
388
389 pte_t *kmap_pte;
390 pgprot_t kmap_prot;
391
392 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
393 {
394         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
395                         vaddr), vaddr), vaddr);
396 }
397
398 static void __init kmap_init(void)
399 {
400         unsigned long kmap_vstart;
401
402         /*
403          * Cache the first kmap pte:
404          */
405         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
406         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
407
408         kmap_prot = PAGE_KERNEL;
409 }
410
411 #ifdef CONFIG_HIGHMEM
412 static void __init permanent_kmaps_init(pgd_t *pgd_base)
413 {
414         unsigned long vaddr;
415         pgd_t *pgd;
416         pud_t *pud;
417         pmd_t *pmd;
418         pte_t *pte;
419
420         vaddr = PKMAP_BASE;
421         page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
422
423         pgd = swapper_pg_dir + pgd_index(vaddr);
424         pud = pud_offset(pgd, vaddr);
425         pmd = pmd_offset(pud, vaddr);
426         pte = pte_offset_kernel(pmd, vaddr);
427         pkmap_page_table = pte;
428 }
429
430 static void __init add_one_highpage_init(struct page *page)
431 {
432         ClearPageReserved(page);
433         init_page_count(page);
434         __free_page(page);
435         totalhigh_pages++;
436 }
437
438 void __init add_highpages_with_active_regions(int nid,
439                          unsigned long start_pfn, unsigned long end_pfn)
440 {
441         phys_addr_t start, end;
442         u64 i;
443
444         for_each_free_mem_range(i, nid, &start, &end, NULL) {
445                 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
446                                             start_pfn, end_pfn);
447                 unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
448                                               start_pfn, end_pfn);
449                 for ( ; pfn < e_pfn; pfn++)
450                         if (pfn_valid(pfn))
451                                 add_one_highpage_init(pfn_to_page(pfn));
452         }
453 }
454 #else
455 static inline void permanent_kmaps_init(pgd_t *pgd_base)
456 {
457 }
458 #endif /* CONFIG_HIGHMEM */
459
460 void __init native_pagetable_init(void)
461 {
462         unsigned long pfn, va;
463         pgd_t *pgd, *base = swapper_pg_dir;
464         pud_t *pud;
465         pmd_t *pmd;
466         pte_t *pte;
467
468         /*
469          * Remove any mappings which extend past the end of physical
470          * memory from the boot time page table:
471          */
472         for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
473                 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
474                 pgd = base + pgd_index(va);
475                 if (!pgd_present(*pgd))
476                         break;
477
478                 pud = pud_offset(pgd, va);
479                 pmd = pmd_offset(pud, va);
480                 if (!pmd_present(*pmd))
481                         break;
482
483                 pte = pte_offset_kernel(pmd, va);
484                 if (!pte_present(*pte))
485                         break;
486
487                 pte_clear(NULL, va, pte);
488         }
489         paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
490         paging_init();
491 }
492
493 /*
494  * Build a proper pagetable for the kernel mappings.  Up until this
495  * point, we've been running on some set of pagetables constructed by
496  * the boot process.
497  *
498  * If we're booting on native hardware, this will be a pagetable
499  * constructed in arch/x86/kernel/head_32.S.  The root of the
500  * pagetable will be swapper_pg_dir.
501  *
502  * If we're booting paravirtualized under a hypervisor, then there are
503  * more options: we may already be running PAE, and the pagetable may
504  * or may not be based in swapper_pg_dir.  In any case,
505  * paravirt_pagetable_init() will set up swapper_pg_dir
506  * appropriately for the rest of the initialization to work.
507  *
508  * In general, pagetable_init() assumes that the pagetable may already
509  * be partially populated, and so it avoids stomping on any existing
510  * mappings.
511  */
512 void __init early_ioremap_page_table_range_init(void)
513 {
514         pgd_t *pgd_base = swapper_pg_dir;
515         unsigned long vaddr, end;
516
517         /*
518          * Fixed mappings, only the page table structure has to be
519          * created - mappings will be set by set_fixmap():
520          */
521         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
522         end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
523         page_table_range_init(vaddr, end, pgd_base);
524         early_ioremap_reset();
525 }
526
527 static void __init pagetable_init(void)
528 {
529         pgd_t *pgd_base = swapper_pg_dir;
530
531         permanent_kmaps_init(pgd_base);
532 }
533
534 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
535 EXPORT_SYMBOL_GPL(__supported_pte_mask);
536
537 /* user-defined highmem size */
538 static unsigned int highmem_pages = -1;
539
540 /*
541  * highmem=size forces highmem to be exactly 'size' bytes.
542  * This works even on boxes that have no highmem otherwise.
543  * This also works to reduce highmem size on bigger boxes.
544  */
545 static int __init parse_highmem(char *arg)
546 {
547         if (!arg)
548                 return -EINVAL;
549
550         highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
551         return 0;
552 }
553 early_param("highmem", parse_highmem);
554
555 #define MSG_HIGHMEM_TOO_BIG \
556         "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
557
558 #define MSG_LOWMEM_TOO_SMALL \
559         "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
560 /*
561  * All of RAM fits into lowmem - but if user wants highmem
562  * artificially via the highmem=x boot parameter then create
563  * it:
564  */
565 void __init lowmem_pfn_init(void)
566 {
567         /* max_low_pfn is 0, we already have early_res support */
568         max_low_pfn = max_pfn;
569
570         if (highmem_pages == -1)
571                 highmem_pages = 0;
572 #ifdef CONFIG_HIGHMEM
573         if (highmem_pages >= max_pfn) {
574                 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
575                         pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
576                 highmem_pages = 0;
577         }
578         if (highmem_pages) {
579                 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
580                         printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
581                                 pages_to_mb(highmem_pages));
582                         highmem_pages = 0;
583                 }
584                 max_low_pfn -= highmem_pages;
585         }
586 #else
587         if (highmem_pages)
588                 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
589 #endif
590 }
591
592 #define MSG_HIGHMEM_TOO_SMALL \
593         "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
594
595 #define MSG_HIGHMEM_TRIMMED \
596         "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
597 /*
598  * We have more RAM than fits into lowmem - we try to put it into
599  * highmem, also taking the highmem=x boot parameter into account:
600  */
601 void __init highmem_pfn_init(void)
602 {
603         max_low_pfn = MAXMEM_PFN;
604
605         if (highmem_pages == -1)
606                 highmem_pages = max_pfn - MAXMEM_PFN;
607
608         if (highmem_pages + MAXMEM_PFN < max_pfn)
609                 max_pfn = MAXMEM_PFN + highmem_pages;
610
611         if (highmem_pages + MAXMEM_PFN > max_pfn) {
612                 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
613                         pages_to_mb(max_pfn - MAXMEM_PFN),
614                         pages_to_mb(highmem_pages));
615                 highmem_pages = 0;
616         }
617 #ifndef CONFIG_HIGHMEM
618         /* Maximum memory usable is what is directly addressable */
619         printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
620         if (max_pfn > MAX_NONPAE_PFN)
621                 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
622         else
623                 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
624         max_pfn = MAXMEM_PFN;
625 #else /* !CONFIG_HIGHMEM */
626 #ifndef CONFIG_HIGHMEM64G
627         if (max_pfn > MAX_NONPAE_PFN) {
628                 max_pfn = MAX_NONPAE_PFN;
629                 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
630         }
631 #endif /* !CONFIG_HIGHMEM64G */
632 #endif /* !CONFIG_HIGHMEM */
633 }
634
635 /*
636  * Determine low and high memory ranges:
637  */
638 void __init find_low_pfn_range(void)
639 {
640         /* it could update max_pfn */
641
642         if (max_pfn <= MAXMEM_PFN)
643                 lowmem_pfn_init();
644         else
645                 highmem_pfn_init();
646 }
647
648 #ifndef CONFIG_NEED_MULTIPLE_NODES
649 void __init initmem_init(void)
650 {
651 #ifdef CONFIG_HIGHMEM
652         highstart_pfn = highend_pfn = max_pfn;
653         if (max_pfn > max_low_pfn)
654                 highstart_pfn = max_low_pfn;
655         printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
656                 pages_to_mb(highend_pfn - highstart_pfn));
657         num_physpages = highend_pfn;
658         high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
659 #else
660         num_physpages = max_low_pfn;
661         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
662 #endif
663
664         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
665         sparse_memory_present_with_active_regions(0);
666
667 #ifdef CONFIG_FLATMEM
668         max_mapnr = num_physpages;
669 #endif
670         __vmalloc_start_set = true;
671
672         printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
673                         pages_to_mb(max_low_pfn));
674
675         setup_bootmem_allocator();
676 }
677 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
678
679 void __init setup_bootmem_allocator(void)
680 {
681         printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
682                  max_pfn_mapped<<PAGE_SHIFT);
683         printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
684
685         after_bootmem = 1;
686 }
687
688 /*
689  * paging_init() sets up the page tables - note that the first 8MB are
690  * already mapped by head.S.
691  *
692  * This routines also unmaps the page at virtual kernel address 0, so
693  * that we can trap those pesky NULL-reference errors in the kernel.
694  */
695 void __init paging_init(void)
696 {
697         pagetable_init();
698
699         __flush_tlb_all();
700
701         kmap_init();
702
703         /*
704          * NOTE: at this point the bootmem allocator is fully available.
705          */
706         olpc_dt_build_devicetree();
707         sparse_memory_present_with_active_regions(MAX_NUMNODES);
708         sparse_init();
709         zone_sizes_init();
710 }
711
712 /*
713  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
714  * and also on some strange 486's. All 586+'s are OK. This used to involve
715  * black magic jumps to work around some nasty CPU bugs, but fortunately the
716  * switch to using exceptions got rid of all that.
717  */
718 static void __init test_wp_bit(void)
719 {
720         printk(KERN_INFO
721   "Checking if this processor honours the WP bit even in supervisor mode...");
722
723         /* Any page-aligned address will do, the test is non-destructive */
724         __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
725         boot_cpu_data.wp_works_ok = do_test_wp_bit();
726         clear_fixmap(FIX_WP_TEST);
727
728         if (!boot_cpu_data.wp_works_ok) {
729                 printk(KERN_CONT "No.\n");
730 #ifdef CONFIG_X86_WP_WORKS_OK
731                 panic(
732   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
733 #endif
734         } else {
735                 printk(KERN_CONT "Ok.\n");
736         }
737 }
738
739 void __init mem_init(void)
740 {
741         int codesize, reservedpages, datasize, initsize;
742         int tmp;
743
744         pci_iommu_alloc();
745
746 #ifdef CONFIG_FLATMEM
747         BUG_ON(!mem_map);
748 #endif
749         /*
750          * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
751          * be done before free_all_bootmem(). Memblock use free low memory for
752          * temporary data (see find_range_array()) and for this purpose can use
753          * pages that was already passed to the buddy allocator, hence marked as
754          * not accessible in the page tables when compiled with
755          * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
756          * important here.
757          */
758         set_highmem_pages_init();
759
760         /* this will put all low memory onto the freelists */
761         totalram_pages += free_all_bootmem();
762
763         reservedpages = 0;
764         for (tmp = 0; tmp < max_low_pfn; tmp++)
765                 /*
766                  * Only count reserved RAM pages:
767                  */
768                 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
769                         reservedpages++;
770
771         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
772         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
773         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
774
775         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
776                         "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
777                 nr_free_pages() << (PAGE_SHIFT-10),
778                 num_physpages << (PAGE_SHIFT-10),
779                 codesize >> 10,
780                 reservedpages << (PAGE_SHIFT-10),
781                 datasize >> 10,
782                 initsize >> 10,
783                 totalhigh_pages << (PAGE_SHIFT-10));
784
785         printk(KERN_INFO "virtual kernel memory layout:\n"
786                 "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
787 #ifdef CONFIG_HIGHMEM
788                 "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
789 #endif
790                 "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
791                 "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
792                 "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
793                 "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
794                 "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
795                 FIXADDR_START, FIXADDR_TOP,
796                 (FIXADDR_TOP - FIXADDR_START) >> 10,
797
798 #ifdef CONFIG_HIGHMEM
799                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
800                 (LAST_PKMAP*PAGE_SIZE) >> 10,
801 #endif
802
803                 VMALLOC_START, VMALLOC_END,
804                 (VMALLOC_END - VMALLOC_START) >> 20,
805
806                 (unsigned long)__va(0), (unsigned long)high_memory,
807                 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
808
809                 (unsigned long)&__init_begin, (unsigned long)&__init_end,
810                 ((unsigned long)&__init_end -
811                  (unsigned long)&__init_begin) >> 10,
812
813                 (unsigned long)&_etext, (unsigned long)&_edata,
814                 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
815
816                 (unsigned long)&_text, (unsigned long)&_etext,
817                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
818
819         /*
820          * Check boundaries twice: Some fundamental inconsistencies can
821          * be detected at build time already.
822          */
823 #define __FIXADDR_TOP (-PAGE_SIZE)
824 #ifdef CONFIG_HIGHMEM
825         BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE  > FIXADDR_START);
826         BUILD_BUG_ON(VMALLOC_END                        > PKMAP_BASE);
827 #endif
828 #define high_memory (-128UL << 20)
829         BUILD_BUG_ON(VMALLOC_START                      >= VMALLOC_END);
830 #undef high_memory
831 #undef __FIXADDR_TOP
832
833 #ifdef CONFIG_HIGHMEM
834         BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE        > FIXADDR_START);
835         BUG_ON(VMALLOC_END                              > PKMAP_BASE);
836 #endif
837         BUG_ON(VMALLOC_START                            >= VMALLOC_END);
838         BUG_ON((unsigned long)high_memory               > VMALLOC_START);
839
840         if (boot_cpu_data.wp_works_ok < 0)
841                 test_wp_bit();
842 }
843
844 #ifdef CONFIG_MEMORY_HOTPLUG
845 int arch_add_memory(int nid, u64 start, u64 size)
846 {
847         struct pglist_data *pgdata = NODE_DATA(nid);
848         struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
849         unsigned long start_pfn = start >> PAGE_SHIFT;
850         unsigned long nr_pages = size >> PAGE_SHIFT;
851
852         return __add_pages(nid, zone, start_pfn, nr_pages);
853 }
854 #endif
855
856 /*
857  * This function cannot be __init, since exceptions don't work in that
858  * section.  Put this after the callers, so that it cannot be inlined.
859  */
860 static noinline int do_test_wp_bit(void)
861 {
862         char tmp_reg;
863         int flag;
864
865         __asm__ __volatile__(
866                 "       movb %0, %1     \n"
867                 "1:     movb %1, %0     \n"
868                 "       xorl %2, %2     \n"
869                 "2:                     \n"
870                 _ASM_EXTABLE(1b,2b)
871                 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
872                  "=q" (tmp_reg),
873                  "=r" (flag)
874                 :"2" (1)
875                 :"memory");
876
877         return flag;
878 }
879
880 #ifdef CONFIG_DEBUG_RODATA
881 const int rodata_test_data = 0xC3;
882 EXPORT_SYMBOL_GPL(rodata_test_data);
883
884 int kernel_set_to_readonly __read_mostly;
885
886 void set_kernel_text_rw(void)
887 {
888         unsigned long start = PFN_ALIGN(_text);
889         unsigned long size = PFN_ALIGN(_etext) - start;
890
891         if (!kernel_set_to_readonly)
892                 return;
893
894         pr_debug("Set kernel text: %lx - %lx for read write\n",
895                  start, start+size);
896
897         set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
898 }
899
900 void set_kernel_text_ro(void)
901 {
902         unsigned long start = PFN_ALIGN(_text);
903         unsigned long size = PFN_ALIGN(_etext) - start;
904
905         if (!kernel_set_to_readonly)
906                 return;
907
908         pr_debug("Set kernel text: %lx - %lx for read only\n",
909                  start, start+size);
910
911         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
912 }
913
914 static void mark_nxdata_nx(void)
915 {
916         /*
917          * When this called, init has already been executed and released,
918          * so everything past _etext should be NX.
919          */
920         unsigned long start = PFN_ALIGN(_etext);
921         /*
922          * This comes from is_kernel_text upper limit. Also HPAGE where used:
923          */
924         unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
925
926         if (__supported_pte_mask & _PAGE_NX)
927                 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
928         set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
929 }
930
931 void mark_rodata_ro(void)
932 {
933         unsigned long start = PFN_ALIGN(_text);
934         unsigned long size = PFN_ALIGN(_etext) - start;
935
936         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
937         printk(KERN_INFO "Write protecting the kernel text: %luk\n",
938                 size >> 10);
939
940         kernel_set_to_readonly = 1;
941
942 #ifdef CONFIG_CPA_DEBUG
943         printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
944                 start, start+size);
945         set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
946
947         printk(KERN_INFO "Testing CPA: write protecting again\n");
948         set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
949 #endif
950
951         start += size;
952         size = (unsigned long)__end_rodata - start;
953         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
954         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
955                 size >> 10);
956         rodata_test();
957
958 #ifdef CONFIG_CPA_DEBUG
959         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
960         set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
961
962         printk(KERN_INFO "Testing CPA: write protecting again\n");
963         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
964 #endif
965         mark_nxdata_nx();
966 }
967 #endif
968