2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
6 * Alternatively, this software may be distributed under the terms of the
7 * GNU General Public License ("GPL") version 2 as published by the Free
15 #include <asm/global_data.h>
16 #include <linux/compiler.h>
17 #include <linux/sizes.h>
19 DECLARE_GLOBAL_DATA_PTR;
21 /* Large pages are 2MB. */
22 #define LARGE_PAGE_SIZE ((1 << 20) * 2)
25 * Paging data structures.
39 typedef struct pdpe pdpt_t[512];
42 uint64_t p:1; /* present */
43 uint64_t rw:1; /* read/write */
44 uint64_t us:1; /* user/supervisor */
45 uint64_t pwt:1; /* page-level writethrough */
46 uint64_t pcd:1; /* page-level cache disable */
47 uint64_t a:1; /* accessed */
48 uint64_t d:1; /* dirty */
49 uint64_t ps:1; /* page size */
50 uint64_t g:1; /* global page */
51 uint64_t avl:3; /* available to software */
52 uint64_t pat:1; /* page-attribute table */
53 uint64_t mbz_0:8; /* must be zero */
54 uint64_t base:31; /* base address */
57 typedef struct pde pdt_t[512];
59 static pdpt_t pdpt __aligned(4096);
60 static pdt_t pdts[4] __aligned(4096);
63 * Map a virtual address to a physical address and optionally invalidate any
66 * @param virt The virtual address to use.
67 * @param phys The physical address to use.
68 * @param invlpg Whether to use invlpg to clear any old mappings.
70 static void x86_phys_map_page(uintptr_t virt, phys_addr_t phys, int invlpg)
72 /* Extract the two bit PDPT index and the 9 bit PDT index. */
73 uintptr_t pdpt_idx = (virt >> 30) & 0x3;
74 uintptr_t pdt_idx = (virt >> 21) & 0x1ff;
76 /* Set up a handy pointer to the appropriate PDE. */
77 struct pde *pde = &(pdts[pdpt_idx][pdt_idx]);
79 memset(pde, 0, sizeof(struct pde));
84 pde->base = phys >> 21;
87 /* Flush any stale mapping out of the TLBs. */
91 : "m" (*(uint8_t *)virt)
96 /* Identity map the lower 4GB and turn on paging with PAE. */
97 static void x86_phys_enter_paging(void)
99 phys_addr_t page_addr;
102 /* Zero out the page tables. */
103 memset(pdpt, 0, sizeof(pdpt));
104 memset(pdts, 0, sizeof(pdts));
106 /* Set up the PDPT. */
107 for (i = 0; i < ARRAY_SIZE(pdts); i++) {
109 pdpt[i].base = ((uintptr_t)&pdts[i]) >> 12;
112 /* Identity map everything up to 4GB. */
113 for (page_addr = 0; page_addr < (1ULL << 32);
114 page_addr += LARGE_PAGE_SIZE) {
115 /* There's no reason to invalidate the TLB with paging off. */
116 x86_phys_map_page(page_addr, page_addr, 0);
119 cpu_enable_paging_pae((ulong)pdpt);
122 /* Disable paging and PAE mode. */
123 static void x86_phys_exit_paging(void)
125 cpu_disable_paging_pae();
129 * Set physical memory to a particular value when the whole region fits on one
132 * @param map_addr The address that starts the physical page.
133 * @param offset How far into that page to start setting a value.
134 * @param c The value to set memory to.
135 * @param size The size in bytes of the area to set.
137 static void x86_phys_memset_page(phys_addr_t map_addr, uintptr_t offset, int c,
141 * U-Boot should be far away from the beginning of memory, so that's a
142 * good place to map our window on top of.
144 const uintptr_t window = LARGE_PAGE_SIZE;
146 /* Make sure the window is below U-Boot. */
147 assert(window + LARGE_PAGE_SIZE <
148 gd->relocaddr - CONFIG_SYS_MALLOC_LEN - SZ_32K);
149 /* Map the page into the window and then memset the appropriate part. */
150 x86_phys_map_page(window, map_addr, 1);
151 memset((void *)(window + offset), c, size);
155 * A physical memory anologue to memset with matching parameters and return
158 phys_addr_t arch_phys_memset(phys_addr_t start, int c, phys_size_t size)
160 const phys_addr_t max_addr = (phys_addr_t)~(uintptr_t)0;
161 const phys_addr_t orig_start = start;
166 /* Handle memory below 4GB. */
167 if (start <= max_addr) {
168 phys_size_t low_size = min(max_addr + 1 - start, size);
169 void *start_ptr = (void *)(uintptr_t)start;
171 assert(((phys_addr_t)(uintptr_t)start) == start);
172 memset(start_ptr, c, low_size);
177 /* Use paging and PAE to handle memory above 4GB up to 64GB. */
179 phys_addr_t map_addr = start & ~(LARGE_PAGE_SIZE - 1);
180 phys_addr_t offset = start - map_addr;
182 x86_phys_enter_paging();
184 /* Handle the first partial page. */
187 min(map_addr + LARGE_PAGE_SIZE, start + size);
188 phys_size_t cur_size = end - start;
189 x86_phys_memset_page(map_addr, offset, c, cur_size);
191 map_addr += LARGE_PAGE_SIZE;
193 /* Handle the complete pages. */
194 while (size > LARGE_PAGE_SIZE) {
195 x86_phys_memset_page(map_addr, 0, c, LARGE_PAGE_SIZE);
196 size -= LARGE_PAGE_SIZE;
197 map_addr += LARGE_PAGE_SIZE;
199 /* Handle the last partial page. */
201 x86_phys_memset_page(map_addr, 0, c, size);
203 x86_phys_exit_paging();