Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / arch / parisc / mm / init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/parisc/mm/init.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  *  Copyright 1999 SuSE GmbH
7  *    changed by Philipp Rumpf
8  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9  *  Copyright 2004 Randolph Chung (tausq@debian.org)
10  *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
11  *
12  */
13
14
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/memblock.h>
18 #include <linux/gfp.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/swap.h>
23 #include <linux/unistd.h>
24 #include <linux/nodemask.h>     /* for node_online_map */
25 #include <linux/pagemap.h>      /* for release_pages */
26 #include <linux/compat.h>
27
28 #include <asm/pgalloc.h>
29 #include <asm/tlb.h>
30 #include <asm/pdc_chassis.h>
31 #include <asm/mmzone.h>
32 #include <asm/sections.h>
33 #include <asm/msgbuf.h>
34 #include <asm/sparsemem.h>
35
36 extern int  data_start;
37 extern void parisc_kernel_start(void);  /* Kernel entry point in head.S */
38
39 #if CONFIG_PGTABLE_LEVELS == 3
40 pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
41 #endif
42
43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
44 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
45
46 static struct resource data_resource = {
47         .name   = "Kernel data",
48         .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
49 };
50
51 static struct resource code_resource = {
52         .name   = "Kernel code",
53         .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
54 };
55
56 static struct resource pdcdata_resource = {
57         .name   = "PDC data (Page Zero)",
58         .start  = 0,
59         .end    = 0x9ff,
60         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
61 };
62
63 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
64
65 /* The following array is initialized from the firmware specific
66  * information retrieved in kernel/inventory.c.
67  */
68
69 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
70 int npmem_ranges __initdata;
71
72 #ifdef CONFIG_64BIT
73 #define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
74 #else /* !CONFIG_64BIT */
75 #define MAX_MEM         (3584U*1024U*1024U)
76 #endif /* !CONFIG_64BIT */
77
78 static unsigned long mem_limit __read_mostly = MAX_MEM;
79
80 static void __init mem_limit_func(void)
81 {
82         char *cp, *end;
83         unsigned long limit;
84
85         /* We need this before __setup() functions are called */
86
87         limit = MAX_MEM;
88         for (cp = boot_command_line; *cp; ) {
89                 if (memcmp(cp, "mem=", 4) == 0) {
90                         cp += 4;
91                         limit = memparse(cp, &end);
92                         if (end != cp)
93                                 break;
94                         cp = end;
95                 } else {
96                         while (*cp != ' ' && *cp)
97                                 ++cp;
98                         while (*cp == ' ')
99                                 ++cp;
100                 }
101         }
102
103         if (limit < mem_limit)
104                 mem_limit = limit;
105 }
106
107 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
108
109 static void __init setup_bootmem(void)
110 {
111         unsigned long mem_max;
112 #ifndef CONFIG_SPARSEMEM
113         physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
114         int npmem_holes;
115 #endif
116         int i, sysram_resource_count;
117
118         disable_sr_hashing(); /* Turn off space register hashing */
119
120         /*
121          * Sort the ranges. Since the number of ranges is typically
122          * small, and performance is not an issue here, just do
123          * a simple insertion sort.
124          */
125
126         for (i = 1; i < npmem_ranges; i++) {
127                 int j;
128
129                 for (j = i; j > 0; j--) {
130                         if (pmem_ranges[j-1].start_pfn <
131                             pmem_ranges[j].start_pfn) {
132
133                                 break;
134                         }
135                         swap(pmem_ranges[j-1], pmem_ranges[j]);
136                 }
137         }
138
139 #ifndef CONFIG_SPARSEMEM
140         /*
141          * Throw out ranges that are too far apart (controlled by
142          * MAX_GAP).
143          */
144
145         for (i = 1; i < npmem_ranges; i++) {
146                 if (pmem_ranges[i].start_pfn -
147                         (pmem_ranges[i-1].start_pfn +
148                          pmem_ranges[i-1].pages) > MAX_GAP) {
149                         npmem_ranges = i;
150                         printk("Large gap in memory detected (%ld pages). "
151                                "Consider turning on CONFIG_SPARSEMEM\n",
152                                pmem_ranges[i].start_pfn -
153                                (pmem_ranges[i-1].start_pfn +
154                                 pmem_ranges[i-1].pages));
155                         break;
156                 }
157         }
158 #endif
159
160         /* Print the memory ranges */
161         pr_info("Memory Ranges:\n");
162
163         for (i = 0; i < npmem_ranges; i++) {
164                 struct resource *res = &sysram_resources[i];
165                 unsigned long start;
166                 unsigned long size;
167
168                 size = (pmem_ranges[i].pages << PAGE_SHIFT);
169                 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
170                 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
171                         i, start, start + (size - 1), size >> 20);
172
173                 /* request memory resource */
174                 res->name = "System RAM";
175                 res->start = start;
176                 res->end = start + size - 1;
177                 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
178                 request_resource(&iomem_resource, res);
179         }
180
181         sysram_resource_count = npmem_ranges;
182
183         /*
184          * For 32 bit kernels we limit the amount of memory we can
185          * support, in order to preserve enough kernel address space
186          * for other purposes. For 64 bit kernels we don't normally
187          * limit the memory, but this mechanism can be used to
188          * artificially limit the amount of memory (and it is written
189          * to work with multiple memory ranges).
190          */
191
192         mem_limit_func();       /* check for "mem=" argument */
193
194         mem_max = 0;
195         for (i = 0; i < npmem_ranges; i++) {
196                 unsigned long rsize;
197
198                 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
199                 if ((mem_max + rsize) > mem_limit) {
200                         printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
201                         if (mem_max == mem_limit)
202                                 npmem_ranges = i;
203                         else {
204                                 pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
205                                                        - (mem_max >> PAGE_SHIFT);
206                                 npmem_ranges = i + 1;
207                                 mem_max = mem_limit;
208                         }
209                         break;
210                 }
211                 mem_max += rsize;
212         }
213
214         printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
215
216 #ifndef CONFIG_SPARSEMEM
217         /* Merge the ranges, keeping track of the holes */
218         {
219                 unsigned long end_pfn;
220                 unsigned long hole_pages;
221
222                 npmem_holes = 0;
223                 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
224                 for (i = 1; i < npmem_ranges; i++) {
225
226                         hole_pages = pmem_ranges[i].start_pfn - end_pfn;
227                         if (hole_pages) {
228                                 pmem_holes[npmem_holes].start_pfn = end_pfn;
229                                 pmem_holes[npmem_holes++].pages = hole_pages;
230                                 end_pfn += hole_pages;
231                         }
232                         end_pfn += pmem_ranges[i].pages;
233                 }
234
235                 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
236                 npmem_ranges = 1;
237         }
238 #endif
239
240         /*
241          * Initialize and free the full range of memory in each range.
242          */
243
244         max_pfn = 0;
245         for (i = 0; i < npmem_ranges; i++) {
246                 unsigned long start_pfn;
247                 unsigned long npages;
248                 unsigned long start;
249                 unsigned long size;
250
251                 start_pfn = pmem_ranges[i].start_pfn;
252                 npages = pmem_ranges[i].pages;
253
254                 start = start_pfn << PAGE_SHIFT;
255                 size = npages << PAGE_SHIFT;
256
257                 /* add system RAM memblock */
258                 memblock_add(start, size);
259
260                 if ((start_pfn + npages) > max_pfn)
261                         max_pfn = start_pfn + npages;
262         }
263
264         /*
265          * We can't use memblock top-down allocations because we only
266          * created the initial mapping up to KERNEL_INITIAL_SIZE in
267          * the assembly bootup code.
268          */
269         memblock_set_bottom_up(true);
270
271         /* IOMMU is always used to access "high mem" on those boxes
272          * that can support enough mem that a PCI device couldn't
273          * directly DMA to any physical addresses.
274          * ISA DMA support will need to revisit this.
275          */
276         max_low_pfn = max_pfn;
277
278         /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
279
280 #define PDC_CONSOLE_IO_IODC_SIZE 32768
281
282         memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
283                                 PDC_CONSOLE_IO_IODC_SIZE));
284         memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
285                         (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
286
287 #ifndef CONFIG_SPARSEMEM
288
289         /* reserve the holes */
290
291         for (i = 0; i < npmem_holes; i++) {
292                 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
293                                 (pmem_holes[i].pages << PAGE_SHIFT));
294         }
295 #endif
296
297 #ifdef CONFIG_BLK_DEV_INITRD
298         if (initrd_start) {
299                 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
300                 if (__pa(initrd_start) < mem_max) {
301                         unsigned long initrd_reserve;
302
303                         if (__pa(initrd_end) > mem_max) {
304                                 initrd_reserve = mem_max - __pa(initrd_start);
305                         } else {
306                                 initrd_reserve = initrd_end - initrd_start;
307                         }
308                         initrd_below_start_ok = 1;
309                         printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
310
311                         memblock_reserve(__pa(initrd_start), initrd_reserve);
312                 }
313         }
314 #endif
315
316         data_resource.start =  virt_to_phys(&data_start);
317         data_resource.end = virt_to_phys(_end) - 1;
318         code_resource.start = virt_to_phys(_text);
319         code_resource.end = virt_to_phys(&data_start)-1;
320
321         /* We don't know which region the kernel will be in, so try
322          * all of them.
323          */
324         for (i = 0; i < sysram_resource_count; i++) {
325                 struct resource *res = &sysram_resources[i];
326                 request_resource(res, &code_resource);
327                 request_resource(res, &data_resource);
328         }
329         request_resource(&sysram_resources[0], &pdcdata_resource);
330
331         /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
332         pdc_pdt_init();
333
334         memblock_allow_resize();
335         memblock_dump_all();
336 }
337
338 static bool kernel_set_to_readonly;
339
340 static void __ref map_pages(unsigned long start_vaddr,
341                             unsigned long start_paddr, unsigned long size,
342                             pgprot_t pgprot, int force)
343 {
344         pmd_t *pmd;
345         pte_t *pg_table;
346         unsigned long end_paddr;
347         unsigned long start_pmd;
348         unsigned long start_pte;
349         unsigned long tmp1;
350         unsigned long tmp2;
351         unsigned long address;
352         unsigned long vaddr;
353         unsigned long ro_start;
354         unsigned long ro_end;
355         unsigned long kernel_start, kernel_end;
356
357         ro_start = __pa((unsigned long)_text);
358         ro_end   = __pa((unsigned long)&data_start);
359         kernel_start = __pa((unsigned long)&__init_begin);
360         kernel_end  = __pa((unsigned long)&_end);
361
362         end_paddr = start_paddr + size;
363
364         /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
365         start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
366         start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
367
368         address = start_paddr;
369         vaddr = start_vaddr;
370         while (address < end_paddr) {
371                 pgd_t *pgd = pgd_offset_k(vaddr);
372                 p4d_t *p4d = p4d_offset(pgd, vaddr);
373                 pud_t *pud = pud_offset(p4d, vaddr);
374
375 #if CONFIG_PGTABLE_LEVELS == 3
376                 if (pud_none(*pud)) {
377                         pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
378                                              PAGE_SIZE << PMD_TABLE_ORDER);
379                         if (!pmd)
380                                 panic("pmd allocation failed.\n");
381                         pud_populate(NULL, pud, pmd);
382                 }
383 #endif
384
385                 pmd = pmd_offset(pud, vaddr);
386                 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
387                         if (pmd_none(*pmd)) {
388                                 pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
389                                 if (!pg_table)
390                                         panic("page table allocation failed\n");
391                                 pmd_populate_kernel(NULL, pmd, pg_table);
392                         }
393
394                         pg_table = pte_offset_kernel(pmd, vaddr);
395                         for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
396                                 pte_t pte;
397                                 pgprot_t prot;
398                                 bool huge = false;
399
400                                 if (force) {
401                                         prot = pgprot;
402                                 } else if (address < kernel_start || address >= kernel_end) {
403                                         /* outside kernel memory */
404                                         prot = PAGE_KERNEL;
405                                 } else if (!kernel_set_to_readonly) {
406                                         /* still initializing, allow writing to RO memory */
407                                         prot = PAGE_KERNEL_RWX;
408                                         huge = true;
409                                 } else if (address >= ro_start) {
410                                         /* Code (ro) and Data areas */
411                                         prot = (address < ro_end) ?
412                                                 PAGE_KERNEL_EXEC : PAGE_KERNEL;
413                                         huge = true;
414                                 } else {
415                                         prot = PAGE_KERNEL;
416                                 }
417
418                                 pte = __mk_pte(address, prot);
419                                 if (huge)
420                                         pte = pte_mkhuge(pte);
421
422                                 if (address >= end_paddr)
423                                         break;
424
425                                 set_pte(pg_table, pte);
426
427                                 address += PAGE_SIZE;
428                                 vaddr += PAGE_SIZE;
429                         }
430                         start_pte = 0;
431
432                         if (address >= end_paddr)
433                             break;
434                 }
435                 start_pmd = 0;
436         }
437 }
438
439 void __init set_kernel_text_rw(int enable_read_write)
440 {
441         unsigned long start = (unsigned long) __init_begin;
442         unsigned long end   = (unsigned long) &data_start;
443
444         map_pages(start, __pa(start), end-start,
445                 PAGE_KERNEL_RWX, enable_read_write ? 1:0);
446
447         /* force the kernel to see the new page table entries */
448         flush_cache_all();
449         flush_tlb_all();
450 }
451
452 void free_initmem(void)
453 {
454         unsigned long init_begin = (unsigned long)__init_begin;
455         unsigned long init_end = (unsigned long)__init_end;
456         unsigned long kernel_end  = (unsigned long)&_end;
457
458         /* Remap kernel text and data, but do not touch init section yet. */
459         kernel_set_to_readonly = true;
460         map_pages(init_end, __pa(init_end), kernel_end - init_end,
461                   PAGE_KERNEL, 0);
462
463         /* The init text pages are marked R-X.  We have to
464          * flush the icache and mark them RW-
465          *
466          * Do a dummy remap of the data section first (the data
467          * section is already PAGE_KERNEL) to pull in the TLB entries
468          * for map_kernel */
469         map_pages(init_begin, __pa(init_begin), init_end - init_begin,
470                   PAGE_KERNEL_RWX, 1);
471         /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
472          * map_pages */
473         map_pages(init_begin, __pa(init_begin), init_end - init_begin,
474                   PAGE_KERNEL, 1);
475
476         /* force the kernel to see the new TLB entries */
477         __flush_tlb_range(0, init_begin, kernel_end);
478
479         /* finally dump all the instructions which were cached, since the
480          * pages are no-longer executable */
481         flush_icache_range(init_begin, init_end);
482         
483         free_initmem_default(POISON_FREE_INITMEM);
484
485         /* set up a new led state on systems shipped LED State panel */
486         pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
487 }
488
489
490 #ifdef CONFIG_STRICT_KERNEL_RWX
491 void mark_rodata_ro(void)
492 {
493         /* rodata memory was already mapped with KERNEL_RO access rights by
494            pagetable_init() and map_pages(). No need to do additional stuff here */
495         unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
496
497         pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
498 }
499 #endif
500
501
502 /*
503  * Just an arbitrary offset to serve as a "hole" between mapping areas
504  * (between top of physical memory and a potential pcxl dma mapping
505  * area, and below the vmalloc mapping area).
506  *
507  * The current 32K value just means that there will be a 32K "hole"
508  * between mapping areas. That means that  any out-of-bounds memory
509  * accesses will hopefully be caught. The vmalloc() routines leaves
510  * a hole of 4kB between each vmalloced area for the same reason.
511  */
512
513  /* Leave room for gateway page expansion */
514 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
515 #error KERNEL_MAP_START is in gateway reserved region
516 #endif
517 #define MAP_START (KERNEL_MAP_START)
518
519 #define VM_MAP_OFFSET  (32*1024)
520 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
521                                      & ~(VM_MAP_OFFSET-1)))
522
523 void *parisc_vmalloc_start __ro_after_init;
524 EXPORT_SYMBOL(parisc_vmalloc_start);
525
526 void __init mem_init(void)
527 {
528         /* Do sanity checks on IPC (compat) structures */
529         BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
530 #ifndef CONFIG_64BIT
531         BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
532         BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
533         BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
534 #endif
535 #ifdef CONFIG_COMPAT
536         BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
537         BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
538         BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
539         BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
540 #endif
541
542         /* Do sanity checks on page table constants */
543         BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
544         BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
545         BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
546         BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
547                         > BITS_PER_LONG);
548 #if CONFIG_PGTABLE_LEVELS == 3
549         BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
550 #else
551         BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
552 #endif
553
554 #ifdef CONFIG_64BIT
555         /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
556         BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
557         BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
558 #endif
559
560         high_memory = __va((max_pfn << PAGE_SHIFT));
561         set_max_mapnr(max_low_pfn);
562         memblock_free_all();
563
564 #ifdef CONFIG_PA11
565         if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
566                 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
567                 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
568                                                 + PCXL_DMA_MAP_SIZE);
569         } else
570 #endif
571                 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
572
573 #if 0
574         /*
575          * Do not expose the virtual kernel memory layout to userspace.
576          * But keep code for debugging purposes.
577          */
578         printk("virtual kernel memory layout:\n"
579                "     vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
580                "     fixmap  : 0x%px - 0x%px   (%4ld kB)\n"
581                "     memory  : 0x%px - 0x%px   (%4ld MB)\n"
582                "       .init : 0x%px - 0x%px   (%4ld kB)\n"
583                "       .data : 0x%px - 0x%px   (%4ld kB)\n"
584                "       .text : 0x%px - 0x%px   (%4ld kB)\n",
585
586                (void*)VMALLOC_START, (void*)VMALLOC_END,
587                (VMALLOC_END - VMALLOC_START) >> 20,
588
589                (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
590                (unsigned long)(FIXMAP_SIZE / 1024),
591
592                __va(0), high_memory,
593                ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
594
595                __init_begin, __init_end,
596                ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
597
598                _etext, _edata,
599                ((unsigned long)_edata - (unsigned long)_etext) >> 10,
600
601                _text, _etext,
602                ((unsigned long)_etext - (unsigned long)_text) >> 10);
603 #endif
604 }
605
606 unsigned long *empty_zero_page __ro_after_init;
607 EXPORT_SYMBOL(empty_zero_page);
608
609 /*
610  * pagetable_init() sets up the page tables
611  *
612  * Note that gateway_init() places the Linux gateway page at page 0.
613  * Since gateway pages cannot be dereferenced this has the desirable
614  * side effect of trapping those pesky NULL-reference errors in the
615  * kernel.
616  */
617 static void __init pagetable_init(void)
618 {
619         int range;
620
621         /* Map each physical memory range to its kernel vaddr */
622
623         for (range = 0; range < npmem_ranges; range++) {
624                 unsigned long start_paddr;
625                 unsigned long size;
626
627                 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
628                 size = pmem_ranges[range].pages << PAGE_SHIFT;
629
630                 map_pages((unsigned long)__va(start_paddr), start_paddr,
631                           size, PAGE_KERNEL, 0);
632         }
633
634 #ifdef CONFIG_BLK_DEV_INITRD
635         if (initrd_end && initrd_end > mem_limit) {
636                 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
637                 map_pages(initrd_start, __pa(initrd_start),
638                           initrd_end - initrd_start, PAGE_KERNEL, 0);
639         }
640 #endif
641
642         empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
643         if (!empty_zero_page)
644                 panic("zero page allocation failed.\n");
645
646 }
647
648 static void __init gateway_init(void)
649 {
650         unsigned long linux_gateway_page_addr;
651         /* FIXME: This is 'const' in order to trick the compiler
652            into not treating it as DP-relative data. */
653         extern void * const linux_gateway_page;
654
655         linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
656
657         /*
658          * Setup Linux Gateway page.
659          *
660          * The Linux gateway page will reside in kernel space (on virtual
661          * page 0), so it doesn't need to be aliased into user space.
662          */
663
664         map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
665                   PAGE_SIZE, PAGE_GATEWAY, 1);
666 }
667
668 static void __init fixmap_init(void)
669 {
670         unsigned long addr = FIXMAP_START;
671         unsigned long end = FIXMAP_START + FIXMAP_SIZE;
672         pgd_t *pgd = pgd_offset_k(addr);
673         p4d_t *p4d = p4d_offset(pgd, addr);
674         pud_t *pud = pud_offset(p4d, addr);
675         pmd_t *pmd;
676
677         BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
678
679 #if CONFIG_PGTABLE_LEVELS == 3
680         if (pud_none(*pud)) {
681                 pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
682                                      PAGE_SIZE << PMD_TABLE_ORDER);
683                 if (!pmd)
684                         panic("fixmap: pmd allocation failed.\n");
685                 pud_populate(NULL, pud, pmd);
686         }
687 #endif
688
689         pmd = pmd_offset(pud, addr);
690         do {
691                 pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
692                 if (!pte)
693                         panic("fixmap: pte allocation failed.\n");
694
695                 pmd_populate_kernel(&init_mm, pmd, pte);
696
697                 addr += PAGE_SIZE;
698         } while (addr < end);
699 }
700
701 static void __init parisc_bootmem_free(void)
702 {
703         unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
704
705         max_zone_pfn[0] = memblock_end_of_DRAM();
706
707         free_area_init(max_zone_pfn);
708 }
709
710 void __init paging_init(void)
711 {
712         setup_bootmem();
713         pagetable_init();
714         gateway_init();
715         fixmap_init();
716         flush_cache_all_local(); /* start with known state */
717         flush_tlb_all_local(NULL);
718
719         sparse_init();
720         parisc_bootmem_free();
721 }
722
723 #ifdef CONFIG_PA20
724
725 /*
726  * Currently, all PA20 chips have 18 bit protection IDs, which is the
727  * limiting factor (space ids are 32 bits).
728  */
729
730 #define NR_SPACE_IDS 262144
731
732 #else
733
734 /*
735  * Currently we have a one-to-one relationship between space IDs and
736  * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
737  * support 15 bit protection IDs, so that is the limiting factor.
738  * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
739  * probably not worth the effort for a special case here.
740  */
741
742 #define NR_SPACE_IDS 32768
743
744 #endif  /* !CONFIG_PA20 */
745
746 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
747 #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
748
749 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
750 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
751 static unsigned long space_id_index;
752 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
753 static unsigned long dirty_space_ids;
754
755 static DEFINE_SPINLOCK(sid_lock);
756
757 unsigned long alloc_sid(void)
758 {
759         unsigned long index;
760
761         spin_lock(&sid_lock);
762
763         if (free_space_ids == 0) {
764                 if (dirty_space_ids != 0) {
765                         spin_unlock(&sid_lock);
766                         flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
767                         spin_lock(&sid_lock);
768                 }
769                 BUG_ON(free_space_ids == 0);
770         }
771
772         free_space_ids--;
773
774         index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
775         space_id[BIT_WORD(index)] |= BIT_MASK(index);
776         space_id_index = index;
777
778         spin_unlock(&sid_lock);
779
780         return index << SPACEID_SHIFT;
781 }
782
783 void free_sid(unsigned long spaceid)
784 {
785         unsigned long index = spaceid >> SPACEID_SHIFT;
786         unsigned long *dirty_space_offset, mask;
787
788         dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
789         mask = BIT_MASK(index);
790
791         spin_lock(&sid_lock);
792
793         BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
794
795         *dirty_space_offset |= mask;
796         dirty_space_ids++;
797
798         spin_unlock(&sid_lock);
799 }
800
801
802 #ifdef CONFIG_SMP
803 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
804 {
805         int i;
806
807         /* NOTE: sid_lock must be held upon entry */
808
809         *ndirtyptr = dirty_space_ids;
810         if (dirty_space_ids != 0) {
811             for (i = 0; i < SID_ARRAY_SIZE; i++) {
812                 dirty_array[i] = dirty_space_id[i];
813                 dirty_space_id[i] = 0;
814             }
815             dirty_space_ids = 0;
816         }
817
818         return;
819 }
820
821 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
822 {
823         int i;
824
825         /* NOTE: sid_lock must be held upon entry */
826
827         if (ndirty != 0) {
828                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
829                         space_id[i] ^= dirty_array[i];
830                 }
831
832                 free_space_ids += ndirty;
833                 space_id_index = 0;
834         }
835 }
836
837 #else /* CONFIG_SMP */
838
839 static void recycle_sids(void)
840 {
841         int i;
842
843         /* NOTE: sid_lock must be held upon entry */
844
845         if (dirty_space_ids != 0) {
846                 for (i = 0; i < SID_ARRAY_SIZE; i++) {
847                         space_id[i] ^= dirty_space_id[i];
848                         dirty_space_id[i] = 0;
849                 }
850
851                 free_space_ids += dirty_space_ids;
852                 dirty_space_ids = 0;
853                 space_id_index = 0;
854         }
855 }
856 #endif
857
858 /*
859  * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
860  * purged, we can safely reuse the space ids that were released but
861  * not flushed from the tlb.
862  */
863
864 #ifdef CONFIG_SMP
865
866 static unsigned long recycle_ndirty;
867 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
868 static unsigned int recycle_inuse;
869
870 void flush_tlb_all(void)
871 {
872         int do_recycle;
873
874         do_recycle = 0;
875         spin_lock(&sid_lock);
876         __inc_irq_stat(irq_tlb_count);
877         if (dirty_space_ids > RECYCLE_THRESHOLD) {
878             BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
879             get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
880             recycle_inuse++;
881             do_recycle++;
882         }
883         spin_unlock(&sid_lock);
884         on_each_cpu(flush_tlb_all_local, NULL, 1);
885         if (do_recycle) {
886             spin_lock(&sid_lock);
887             recycle_sids(recycle_ndirty,recycle_dirty_array);
888             recycle_inuse = 0;
889             spin_unlock(&sid_lock);
890         }
891 }
892 #else
893 void flush_tlb_all(void)
894 {
895         spin_lock(&sid_lock);
896         __inc_irq_stat(irq_tlb_count);
897         flush_tlb_all_local(NULL);
898         recycle_sids();
899         spin_unlock(&sid_lock);
900 }
901 #endif
902
903 static const pgprot_t protection_map[16] = {
904         [VM_NONE]                                       = PAGE_NONE,
905         [VM_READ]                                       = PAGE_READONLY,
906         [VM_WRITE]                                      = PAGE_NONE,
907         [VM_WRITE | VM_READ]                            = PAGE_READONLY,
908         [VM_EXEC]                                       = PAGE_EXECREAD,
909         [VM_EXEC | VM_READ]                             = PAGE_EXECREAD,
910         [VM_EXEC | VM_WRITE]                            = PAGE_EXECREAD,
911         [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_EXECREAD,
912         [VM_SHARED]                                     = PAGE_NONE,
913         [VM_SHARED | VM_READ]                           = PAGE_READONLY,
914         [VM_SHARED | VM_WRITE]                          = PAGE_WRITEONLY,
915         [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_SHARED,
916         [VM_SHARED | VM_EXEC]                           = PAGE_EXECREAD,
917         [VM_SHARED | VM_EXEC | VM_READ]                 = PAGE_EXECREAD,
918         [VM_SHARED | VM_EXEC | VM_WRITE]                = PAGE_RWX,
919         [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = PAGE_RWX
920 };
921 DECLARE_VM_GET_PAGE_PROT