2 * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include "mali_kernel_common.h"
13 #include "mali_uk_types.h"
14 #include "mali_mmu_page_directory.h"
15 #include "mali_memory.h"
16 #include "mali_l2_cache.h"
18 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
20 u32 mali_allocate_empty_page(mali_io_address *virt_addr)
22 _mali_osk_errcode_t err;
23 mali_io_address mapping;
26 if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
27 /* Allocation failed */
28 MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
32 MALI_DEBUG_ASSERT_POINTER( mapping );
34 err = fill_page(mapping, 0);
35 if (_MALI_OSK_ERR_OK != err) {
36 mali_mmu_release_table_page(address, mapping);
37 MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
45 void mali_free_empty_page(u32 address, mali_io_address virt_addr)
47 if (MALI_INVALID_PAGE != address) {
48 mali_mmu_release_table_page(address, virt_addr);
52 _mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
53 u32 *page_table, mali_io_address *page_table_mapping,
54 u32 *data_page, mali_io_address *data_page_mapping)
56 _mali_osk_errcode_t err;
58 err = mali_mmu_get_table_page(data_page, data_page_mapping);
59 if (_MALI_OSK_ERR_OK == err) {
60 err = mali_mmu_get_table_page(page_table, page_table_mapping);
61 if (_MALI_OSK_ERR_OK == err) {
62 err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
63 if (_MALI_OSK_ERR_OK == err) {
64 fill_page(*data_page_mapping, 0);
65 fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
66 fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
69 mali_mmu_release_table_page(*page_table, *page_table_mapping);
70 *page_table = MALI_INVALID_PAGE;
72 mali_mmu_release_table_page(*data_page, *data_page_mapping);
73 *data_page = MALI_INVALID_PAGE;
78 void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
79 u32 *page_table, mali_io_address *page_table_mapping,
80 u32 *data_page, mali_io_address *data_page_mapping)
82 if (MALI_INVALID_PAGE != *page_directory) {
83 mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
84 *page_directory = MALI_INVALID_PAGE;
85 *page_directory_mapping = NULL;
88 if (MALI_INVALID_PAGE != *page_table) {
89 mali_mmu_release_table_page(*page_table, *page_table_mapping);
90 *page_table = MALI_INVALID_PAGE;
91 *page_table_mapping = NULL;
94 if (MALI_INVALID_PAGE != *data_page) {
95 mali_mmu_release_table_page(*data_page, *data_page_mapping);
96 *data_page = MALI_INVALID_PAGE;
97 *data_page_mapping = NULL;
101 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
104 MALI_DEBUG_ASSERT_POINTER( mapping );
106 for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++) {
107 _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
109 _mali_osk_mem_barrier();
113 _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
115 const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
116 const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
117 _mali_osk_errcode_t err;
118 mali_io_address pde_mapping;
122 if (last_pde < first_pde) {
123 MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
126 for(i = first_pde; i <= last_pde; i++) {
127 if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
128 /* Page table not present */
129 MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
130 MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
132 err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
133 if(_MALI_OSK_ERR_OK != err) {
134 MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
137 pagedir->page_entries_mapped[i] = pde_mapping;
139 /* Update PDE, mark as present */
140 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
141 pde_phys | MALI_MMU_FLAGS_PRESENT);
143 MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
144 pagedir->page_entries_usage_count[i] = 1;
146 pagedir->page_entries_usage_count[i]++;
149 _mali_osk_write_mem_barrier();
154 MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
157 const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
158 const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
160 for (i = first_pte; i <= last_pte; i++) {
161 _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
165 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
167 const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
168 const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
171 mali_bool pd_changed = MALI_FALSE;
172 u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
173 u32 num_pages_inv = 0;
174 mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
176 /* For all page directory entries in range. */
177 for (i = first_pde; i <= last_pde; i++) {
178 u32 size_in_pde, offset;
180 MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
181 MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
183 /* Offset into page table, 0 if mali_address is 4MiB aligned */
184 offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
185 if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
188 size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
191 pagedir->page_entries_usage_count[i]--;
193 /* If entire page table is unused, free it */
194 if (0 == pagedir->page_entries_usage_count[i]) {
197 MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
198 /* last reference removed, no need to zero out each PTE */
200 page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
201 page_virt = pagedir->page_entries_mapped[i];
202 pagedir->page_entries_mapped[i] = NULL;
203 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
205 mali_mmu_release_table_page(page_phys, page_virt);
206 pd_changed = MALI_TRUE;
208 MALI_DEBUG_ASSERT(num_pages_inv < 2);
209 if (num_pages_inv < 2) {
210 pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
213 invalidate_all = MALI_TRUE;
216 /* If part of the page table is still in use, zero the relevant PTEs */
217 mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
221 mali_address += size_in_pde;
223 _mali_osk_write_mem_barrier();
225 /* L2 pages invalidation */
226 if (MALI_TRUE == pd_changed) {
227 MALI_DEBUG_ASSERT(num_pages_inv < 3);
228 if (num_pages_inv < 3) {
229 pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
232 invalidate_all = MALI_TRUE;
236 if (invalidate_all) {
237 mali_l2_cache_invalidate_all();
239 mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
245 struct mali_page_directory *mali_mmu_pagedir_alloc(void)
247 struct mali_page_directory *pagedir;
249 pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
250 if(NULL == pagedir) {
254 if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
255 _mali_osk_free(pagedir);
259 /* Zero page directory */
260 fill_page(pagedir->page_directory_mapped, 0);
265 void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
267 const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
270 /* Free referenced page tables and zero PDEs. */
271 for (i = 0; i < num_page_table_entries; i++) {
272 if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
273 u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
274 _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
275 mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
278 _mali_osk_write_mem_barrier();
280 /* Free the page directory page. */
281 mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
283 _mali_osk_free(pagedir);
287 void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
289 u32 end_address = mali_address + size;
291 /* Map physical pages into MMU page tables */
292 for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
293 MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
294 _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
295 MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
296 phys_address | permission_bits);
300 u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
302 return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
305 /* For instrumented */
308 u32 register_writes_size;
309 u32 page_table_dump_size;
313 static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
316 info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
318 if (NULL != info->buffer) {
319 /* check that we have enough space */
320 if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
322 *info->buffer = where;
325 *info->buffer = what;
328 info->buffer_left -= sizeof(u32)*2;
335 static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
338 /* 4096 for the page and 4 bytes for the address */
339 const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
340 const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
341 const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
343 info->page_table_dump_size += dump_size_in_bytes;
345 if (NULL != info->buffer) {
346 if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
348 *info->buffer = phys_addr;
351 _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
352 info->buffer += page_size_in_elements;
354 info->buffer_left -= dump_size_in_bytes;
361 static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
363 MALI_DEBUG_ASSERT_POINTER(pagedir);
364 MALI_DEBUG_ASSERT_POINTER(info);
366 if (NULL != pagedir->page_directory_mapped) {
370 mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
373 for (i = 0; i < 1024; i++) {
374 if (NULL != pagedir->page_entries_mapped[i]) {
376 mali_mmu_dump_page(pagedir->page_entries_mapped[i],
377 _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
378 i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
387 static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
389 MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
390 "set the page directory address", info));
391 MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
392 MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
396 _mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
398 struct dump_info info = { 0, 0, 0, NULL };
399 struct mali_session_data * session_data;
401 MALI_DEBUG_ASSERT_POINTER(args);
402 MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
404 session_data = (struct mali_session_data *)(args->ctx);
406 MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
407 MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
408 args->size = info.register_writes_size + info.page_table_dump_size;
412 _mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
414 struct dump_info info = { 0, 0, 0, NULL };
415 struct mali_session_data * session_data;
417 MALI_DEBUG_ASSERT_POINTER(args);
418 MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
419 MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
421 session_data = (struct mali_session_data *)(args->ctx);
423 info.buffer_left = args->size;
424 info.buffer = args->buffer;
426 args->register_writes = info.buffer;
427 MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
429 args->page_table_dump = info.buffer;
430 MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
432 args->register_writes_size = info.register_writes_size;
433 args->page_table_dump_size = info.page_table_dump_size;