2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
36 #include "qemu-timer.h"
38 #include "exec-memory.h"
39 #if defined(CONFIG_USER_ONLY)
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <machine/profile.h>
56 #else /* !CONFIG_USER_ONLY */
57 #include "xen-mapcache.h"
63 #define WANT_EXEC_OBSOLETE
64 #include "exec-obsolete.h"
66 //#define DEBUG_TB_INVALIDATE
68 //#define DEBUG_UNASSIGNED
70 /* make various TB consistency checks */
71 //#define DEBUG_TB_CHECK
73 //#define DEBUG_IOPORT
74 //#define DEBUG_SUBPAGE
76 #if !defined(CONFIG_USER_ONLY)
77 /* TB consistency checks only implemented for usermode emulation. */
81 #define SMC_BITMAP_USE_THRESHOLD 10
83 static TranslationBlock *tbs;
84 static int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
97 #elif defined(_WIN32) && !defined(_WIN64)
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue[1024] code_gen_section;
106 static uint8_t *code_gen_buffer;
107 static unsigned long code_gen_buffer_size;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size;
110 static uint8_t *code_gen_ptr;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration;
116 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
118 static MemoryRegion *system_memory;
119 static MemoryRegion *system_io;
121 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122 static MemoryRegion io_mem_subpage_ram;
126 CPUArchState *first_cpu;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUArchState *,cpu_single_env);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 uintptr_t qemu_real_host_page_size;
181 uintptr_t qemu_host_page_size;
182 uintptr_t qemu_host_page_mask;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageEntry PhysPageEntry;
191 static MemoryRegionSection *phys_sections;
192 static unsigned phys_sections_nb, phys_sections_nb_alloc;
193 static uint16_t phys_section_unassigned;
194 static uint16_t phys_section_notdirty;
195 static uint16_t phys_section_rom;
196 static uint16_t phys_section_watch;
198 struct PhysPageEntry {
199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
204 /* Simple allocator for PhysPageEntry nodes */
205 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
208 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
210 /* This is a multi-level map on the physical address space.
211 The bottom level has pointers to MemoryRegionSections. */
212 static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
214 static void io_mem_init(void);
215 static void memory_map_init(void);
217 static MemoryRegion io_mem_watch;
221 static int tb_flush_count;
222 static int tb_phys_invalidate_count;
225 static void map_exec(void *addr, long size)
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
233 static void map_exec(void *addr, long size)
235 unsigned long start, end, page_size;
237 page_size = getpagesize();
238 start = (unsigned long)addr;
239 start &= ~(page_size - 1);
241 end = (unsigned long)addr + size;
242 end += page_size - 1;
243 end &= ~(page_size - 1);
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info;
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
262 qemu_real_host_page_size = getpagesize();
264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_mask = ~(qemu_host_page_size - 1);
270 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
272 #ifdef HAVE_KINFO_GETVMMAP
273 struct kinfo_vmentry *freep;
276 freep = kinfo_getvmmap(getpid(), &cnt);
279 for (i = 0; i < cnt; i++) {
280 unsigned long startaddr, endaddr;
282 startaddr = freep[i].kve_start;
283 endaddr = freep[i].kve_end;
284 if (h2g_valid(startaddr)) {
285 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
287 if (h2g_valid(endaddr)) {
288 endaddr = h2g(endaddr);
289 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
291 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
293 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
304 last_brk = (unsigned long)sbrk(0);
306 f = fopen("/compat/linux/proc/self/maps", "r");
311 unsigned long startaddr, endaddr;
314 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
316 if (n == 2 && h2g_valid(startaddr)) {
317 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
319 if (h2g_valid(endaddr)) {
320 endaddr = h2g(endaddr);
324 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
336 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
342 #if defined(CONFIG_USER_ONLY)
343 /* We can't use g_malloc because it may recurse into a locked mutex. */
344 # define ALLOC(P, SIZE) \
346 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
347 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
350 # define ALLOC(P, SIZE) \
351 do { P = g_malloc0(SIZE); } while (0)
354 /* Level 1. Always allocated. */
355 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
358 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
365 ALLOC(p, sizeof(void *) * L2_SIZE);
369 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
377 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 return pd + (index & (L2_SIZE - 1));
386 static inline PageDesc *page_find(tb_page_addr_t index)
388 return page_find_alloc(index, 0);
391 #if !defined(CONFIG_USER_ONLY)
393 static void phys_map_node_reserve(unsigned nodes)
395 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
396 typedef PhysPageEntry Node[L2_SIZE];
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
398 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
399 phys_map_nodes_nb + nodes);
400 phys_map_nodes = g_renew(Node, phys_map_nodes,
401 phys_map_nodes_nb_alloc);
405 static uint16_t phys_map_node_alloc(void)
410 ret = phys_map_nodes_nb++;
411 assert(ret != PHYS_MAP_NODE_NIL);
412 assert(ret != phys_map_nodes_nb_alloc);
413 for (i = 0; i < L2_SIZE; ++i) {
414 phys_map_nodes[ret][i].is_leaf = 0;
415 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
420 static void phys_map_nodes_reset(void)
422 phys_map_nodes_nb = 0;
426 static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
427 target_phys_addr_t *nb, uint16_t leaf,
432 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
434 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
435 lp->ptr = phys_map_node_alloc();
436 p = phys_map_nodes[lp->ptr];
438 for (i = 0; i < L2_SIZE; i++) {
440 p[i].ptr = phys_section_unassigned;
444 p = phys_map_nodes[lp->ptr];
446 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
448 while (*nb && lp < &p[L2_SIZE]) {
449 if ((*index & (step - 1)) == 0 && *nb >= step) {
455 phys_page_set_level(lp, index, nb, leaf, level - 1);
461 static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
464 /* Wildly overreserve - it doesn't matter much. */
465 phys_map_node_reserve(3 * P_L2_LEVELS);
467 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
470 MemoryRegionSection *phys_page_find(target_phys_addr_t index)
472 PhysPageEntry lp = phys_map;
475 uint16_t s_index = phys_section_unassigned;
477 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
478 if (lp.ptr == PHYS_MAP_NODE_NIL) {
481 p = phys_map_nodes[lp.ptr];
482 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
487 return &phys_sections[s_index];
490 bool memory_region_is_unassigned(MemoryRegion *mr)
492 return mr != &io_mem_ram && mr != &io_mem_rom
493 && mr != &io_mem_notdirty && !mr->rom_device
494 && mr != &io_mem_watch;
497 #define mmap_lock() do { } while(0)
498 #define mmap_unlock() do { } while(0)
501 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
503 #if defined(CONFIG_USER_ONLY)
504 /* Currently it is not recommended to allocate big chunks of data in
505 user mode. It will change when a dedicated libc will be used */
506 #define USE_STATIC_CODE_GEN_BUFFER
509 #ifdef USE_STATIC_CODE_GEN_BUFFER
510 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
511 __attribute__((aligned (CODE_GEN_ALIGN)));
514 static void code_gen_alloc(unsigned long tb_size)
516 #ifdef USE_STATIC_CODE_GEN_BUFFER
517 code_gen_buffer = static_code_gen_buffer;
518 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
519 map_exec(code_gen_buffer, code_gen_buffer_size);
521 code_gen_buffer_size = tb_size;
522 if (code_gen_buffer_size == 0) {
523 #if defined(CONFIG_USER_ONLY)
524 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
526 /* XXX: needs adjustments */
527 code_gen_buffer_size = (unsigned long)(ram_size / 4);
530 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
531 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
532 /* The code gen buffer location may have constraints depending on
533 the host cpu and OS */
534 #if defined(__linux__)
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540 #if defined(__x86_64__)
542 /* Cannot map more than that */
543 if (code_gen_buffer_size > (800 * 1024 * 1024))
544 code_gen_buffer_size = (800 * 1024 * 1024);
545 #elif defined(__sparc_v9__)
546 // Map the buffer below 2G, so we can use direct calls and branches
548 start = (void *) 0x60000000UL;
549 if (code_gen_buffer_size > (512 * 1024 * 1024))
550 code_gen_buffer_size = (512 * 1024 * 1024);
551 #elif defined(__arm__)
552 /* Keep the buffer no bigger than 16MB to branch between blocks */
553 if (code_gen_buffer_size > 16 * 1024 * 1024)
554 code_gen_buffer_size = 16 * 1024 * 1024;
555 #elif defined(__s390x__)
556 /* Map the buffer so that we can use direct calls and branches. */
557 /* We have a +- 4GB range on the branches; leave some slop. */
558 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
559 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
561 start = (void *)0x90000000UL;
563 code_gen_buffer = mmap(start, code_gen_buffer_size,
564 PROT_WRITE | PROT_READ | PROT_EXEC,
566 if (code_gen_buffer == MAP_FAILED) {
567 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
571 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
572 || defined(__DragonFly__) || defined(__OpenBSD__) \
573 || defined(__NetBSD__)
577 flags = MAP_PRIVATE | MAP_ANONYMOUS;
578 #if defined(__x86_64__)
579 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
580 * 0x40000000 is free */
582 addr = (void *)0x40000000;
583 /* Cannot map more than that */
584 if (code_gen_buffer_size > (800 * 1024 * 1024))
585 code_gen_buffer_size = (800 * 1024 * 1024);
586 #elif defined(__sparc_v9__)
587 // Map the buffer below 2G, so we can use direct calls and branches
589 addr = (void *) 0x60000000UL;
590 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
591 code_gen_buffer_size = (512 * 1024 * 1024);
594 code_gen_buffer = mmap(addr, code_gen_buffer_size,
595 PROT_WRITE | PROT_READ | PROT_EXEC,
597 if (code_gen_buffer == MAP_FAILED) {
598 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
603 code_gen_buffer = g_malloc(code_gen_buffer_size);
604 map_exec(code_gen_buffer, code_gen_buffer_size);
606 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
607 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
608 code_gen_buffer_max_size = code_gen_buffer_size -
609 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
610 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
611 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
614 /* Must be called before using the QEMU cpus. 'tb_size' is the size
615 (in bytes) allocated to the translation buffer. Zero means default
617 void tcg_exec_init(unsigned long tb_size)
620 code_gen_alloc(tb_size);
621 code_gen_ptr = code_gen_buffer;
622 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
624 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
625 /* There's no guest base to take into account, so go ahead and
626 initialize the prologue now. */
627 tcg_prologue_init(&tcg_ctx);
631 bool tcg_enabled(void)
633 return code_gen_buffer != NULL;
636 void cpu_exec_init_all(void)
638 #if !defined(CONFIG_USER_ONLY)
644 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
646 static int cpu_common_post_load(void *opaque, int version_id)
648 CPUArchState *env = opaque;
650 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
651 version_id is increased. */
652 env->interrupt_request &= ~0x01;
658 static const VMStateDescription vmstate_cpu_common = {
659 .name = "cpu_common",
661 .minimum_version_id = 1,
662 .minimum_version_id_old = 1,
663 .post_load = cpu_common_post_load,
664 .fields = (VMStateField []) {
665 VMSTATE_UINT32(halted, CPUArchState),
666 VMSTATE_UINT32(interrupt_request, CPUArchState),
667 VMSTATE_END_OF_LIST()
672 CPUArchState *qemu_get_cpu(int cpu)
674 CPUArchState *env = first_cpu;
677 if (env->cpu_index == cpu)
685 void cpu_exec_init(CPUArchState *env)
690 #if defined(CONFIG_USER_ONLY)
693 env->next_cpu = NULL;
696 while (*penv != NULL) {
697 penv = &(*penv)->next_cpu;
700 env->cpu_index = cpu_index;
702 QTAILQ_INIT(&env->breakpoints);
703 QTAILQ_INIT(&env->watchpoints);
704 #ifndef CONFIG_USER_ONLY
705 env->thread_id = qemu_get_thread_id();
708 #if defined(CONFIG_USER_ONLY)
711 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
712 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
713 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
714 cpu_save, cpu_load, env);
718 /* Allocate a new translation block. Flush the translation buffer if
719 too many translation blocks or too much generated code. */
720 static TranslationBlock *tb_alloc(target_ulong pc)
722 TranslationBlock *tb;
724 if (nb_tbs >= code_gen_max_blocks ||
725 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
733 void tb_free(TranslationBlock *tb)
735 /* In practice this is mostly used for single use temporary TB
736 Ignore the hard cases and just back up if this TB happens to
737 be the last one generated. */
738 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
739 code_gen_ptr = tb->tc_ptr;
744 static inline void invalidate_page_bitmap(PageDesc *p)
746 if (p->code_bitmap) {
747 g_free(p->code_bitmap);
748 p->code_bitmap = NULL;
750 p->code_write_count = 0;
753 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
755 static void page_flush_tb_1 (int level, void **lp)
764 for (i = 0; i < L2_SIZE; ++i) {
765 pd[i].first_tb = NULL;
766 invalidate_page_bitmap(pd + i);
770 for (i = 0; i < L2_SIZE; ++i) {
771 page_flush_tb_1 (level - 1, pp + i);
776 static void page_flush_tb(void)
779 for (i = 0; i < V_L1_SIZE; i++) {
780 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
784 /* flush all the translation blocks */
785 /* XXX: tb_flush is currently not thread safe */
786 void tb_flush(CPUArchState *env1)
789 #if defined(DEBUG_FLUSH)
790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
795 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
796 cpu_abort(env1, "Internal error: code buffer overflow\n");
800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
807 code_gen_ptr = code_gen_buffer;
808 /* XXX: flush processor icache at this point if cache flush is
813 #ifdef DEBUG_TB_CHECK
815 static void tb_invalidate_check(target_ulong address)
817 TranslationBlock *tb;
819 address &= TARGET_PAGE_MASK;
820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
822 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
823 address >= tb->pc + tb->size)) {
824 printf("ERROR invalidate: address=" TARGET_FMT_lx
825 " PC=%08lx size=%04x\n",
826 address, (long)tb->pc, tb->size);
832 /* verify that all the pages have correct rights for code */
833 static void tb_page_check(void)
835 TranslationBlock *tb;
836 int i, flags1, flags2;
838 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
839 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
840 flags1 = page_get_flags(tb->pc);
841 flags2 = page_get_flags(tb->pc + tb->size - 1);
842 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
843 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
844 (long)tb->pc, tb->size, flags1, flags2);
852 /* invalidate one TB */
853 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
856 TranslationBlock *tb1;
860 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
863 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
867 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
869 TranslationBlock *tb1;
874 n1 = (uintptr_t)tb1 & 3;
875 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
877 *ptb = tb1->page_next[n1];
880 ptb = &tb1->page_next[n1];
884 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
886 TranslationBlock *tb1, **ptb;
889 ptb = &tb->jmp_next[n];
892 /* find tb(n) in circular list */
895 n1 = (uintptr_t)tb1 & 3;
896 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
897 if (n1 == n && tb1 == tb)
900 ptb = &tb1->jmp_first;
902 ptb = &tb1->jmp_next[n1];
905 /* now we can suppress tb(n) from the list */
906 *ptb = tb->jmp_next[n];
908 tb->jmp_next[n] = NULL;
912 /* reset the jump entry 'n' of a TB so that it is not chained to
914 static inline void tb_reset_jump(TranslationBlock *tb, int n)
916 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
919 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
924 tb_page_addr_t phys_pc;
925 TranslationBlock *tb1, *tb2;
927 /* remove the TB from the hash list */
928 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 h = tb_phys_hash_func(phys_pc);
930 tb_remove(&tb_phys_hash[h], tb,
931 offsetof(TranslationBlock, phys_hash_next));
933 /* remove the TB from the page list */
934 if (tb->page_addr[0] != page_addr) {
935 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
939 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
940 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
941 tb_page_remove(&p->first_tb, tb);
942 invalidate_page_bitmap(p);
945 tb_invalidated_flag = 1;
947 /* remove the TB from the hash list */
948 h = tb_jmp_cache_hash_func(tb->pc);
949 for(env = first_cpu; env != NULL; env = env->next_cpu) {
950 if (env->tb_jmp_cache[h] == tb)
951 env->tb_jmp_cache[h] = NULL;
954 /* suppress this TB from the two jump lists */
955 tb_jmp_remove(tb, 0);
956 tb_jmp_remove(tb, 1);
958 /* suppress any remaining jumps to this TB */
961 n1 = (uintptr_t)tb1 & 3;
964 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
970 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
972 tb_phys_invalidate_count++;
975 static inline void set_bits(uint8_t *tab, int start, int len)
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
984 mask &= ~(0xff << (end & 7));
989 start = (start + 8) & ~7;
991 while (start < end1) {
996 mask = ~(0xff << (end & 7));
1002 static void build_page_bitmap(PageDesc *p)
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
1007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1010 while (tb != NULL) {
1011 n = (uintptr_t)tb & 3;
1012 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE)
1020 tb_end = TARGET_PAGE_SIZE;
1023 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1025 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1026 tb = tb->page_next[n];
1030 TranslationBlock *tb_gen_code(CPUArchState *env,
1031 target_ulong pc, target_ulong cs_base,
1032 int flags, int cflags)
1034 TranslationBlock *tb;
1036 tb_page_addr_t phys_pc, phys_page2;
1037 target_ulong virt_page2;
1040 phys_pc = get_page_addr_code(env, pc);
1043 /* flush must be done */
1045 /* cannot fail at this point */
1047 /* Don't forget to invalidate previous TB info. */
1048 tb_invalidated_flag = 1;
1050 tc_ptr = code_gen_ptr;
1051 tb->tc_ptr = tc_ptr;
1052 tb->cs_base = cs_base;
1054 tb->cflags = cflags;
1055 cpu_gen_code(env, tb, &code_gen_size);
1056 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1057 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1059 /* check next page if needed */
1060 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1062 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1063 phys_page2 = get_page_addr_code(env, virt_page2);
1065 tb_link_page(tb, phys_pc, phys_page2);
1070 * Invalidate all TBs which intersect with the target physical address range
1071 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1072 * 'is_cpu_write_access' should be true if called from a real cpu write
1073 * access: the virtual CPU will exit the current TB if code is modified inside
1076 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1077 int is_cpu_write_access)
1079 while (start < end) {
1080 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1081 start &= TARGET_PAGE_MASK;
1082 start += TARGET_PAGE_SIZE;
1087 * Invalidate all TBs which intersect with the target physical address range
1088 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1089 * 'is_cpu_write_access' should be true if called from a real cpu write
1090 * access: the virtual CPU will exit the current TB if code is modified inside
1093 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1094 int is_cpu_write_access)
1096 TranslationBlock *tb, *tb_next, *saved_tb;
1097 CPUArchState *env = cpu_single_env;
1098 tb_page_addr_t tb_start, tb_end;
1101 #ifdef TARGET_HAS_PRECISE_SMC
1102 int current_tb_not_found = is_cpu_write_access;
1103 TranslationBlock *current_tb = NULL;
1104 int current_tb_modified = 0;
1105 target_ulong current_pc = 0;
1106 target_ulong current_cs_base = 0;
1107 int current_flags = 0;
1108 #endif /* TARGET_HAS_PRECISE_SMC */
1110 p = page_find(start >> TARGET_PAGE_BITS);
1113 if (!p->code_bitmap &&
1114 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1115 is_cpu_write_access) {
1116 /* build code bitmap */
1117 build_page_bitmap(p);
1120 /* we remove all the TBs in the range [start, end[ */
1121 /* XXX: see if in some cases it could be faster to invalidate all the code */
1123 while (tb != NULL) {
1124 n = (uintptr_t)tb & 3;
1125 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1126 tb_next = tb->page_next[n];
1127 /* NOTE: this is subtle as a TB may span two physical pages */
1129 /* NOTE: tb_end may be after the end of the page, but
1130 it is not a problem */
1131 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1132 tb_end = tb_start + tb->size;
1134 tb_start = tb->page_addr[1];
1135 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1137 if (!(tb_end <= start || tb_start >= end)) {
1138 #ifdef TARGET_HAS_PRECISE_SMC
1139 if (current_tb_not_found) {
1140 current_tb_not_found = 0;
1142 if (env->mem_io_pc) {
1143 /* now we have a real cpu fault */
1144 current_tb = tb_find_pc(env->mem_io_pc);
1147 if (current_tb == tb &&
1148 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1149 /* If we are modifying the current TB, we must stop
1150 its execution. We could be more precise by checking
1151 that the modification is after the current PC, but it
1152 would require a specialized function to partially
1153 restore the CPU state */
1155 current_tb_modified = 1;
1156 cpu_restore_state(current_tb, env, env->mem_io_pc);
1157 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1160 #endif /* TARGET_HAS_PRECISE_SMC */
1161 /* we need to do that to handle the case where a signal
1162 occurs while doing tb_phys_invalidate() */
1165 saved_tb = env->current_tb;
1166 env->current_tb = NULL;
1168 tb_phys_invalidate(tb, -1);
1170 env->current_tb = saved_tb;
1171 if (env->interrupt_request && env->current_tb)
1172 cpu_interrupt(env, env->interrupt_request);
1177 #if !defined(CONFIG_USER_ONLY)
1178 /* if no code remaining, no need to continue to use slow writes */
1180 invalidate_page_bitmap(p);
1181 if (is_cpu_write_access) {
1182 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (current_tb_modified) {
1188 /* we generate a block containing just the instruction
1189 modifying the memory. It will ensure that it cannot modify
1191 env->current_tb = NULL;
1192 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1193 cpu_resume_from_signal(env, NULL);
1198 /* len must be <= 8 and start must be a multiple of len */
1199 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1205 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1206 cpu_single_env->mem_io_vaddr, len,
1207 cpu_single_env->eip,
1208 cpu_single_env->eip +
1209 (intptr_t)cpu_single_env->segs[R_CS].base);
1212 p = page_find(start >> TARGET_PAGE_BITS);
1215 if (p->code_bitmap) {
1216 offset = start & ~TARGET_PAGE_MASK;
1217 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1218 if (b & ((1 << len) - 1))
1222 tb_invalidate_phys_page_range(start, start + len, 1);
1226 #if !defined(CONFIG_SOFTMMU)
1227 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1228 uintptr_t pc, void *puc)
1230 TranslationBlock *tb;
1233 #ifdef TARGET_HAS_PRECISE_SMC
1234 TranslationBlock *current_tb = NULL;
1235 CPUArchState *env = cpu_single_env;
1236 int current_tb_modified = 0;
1237 target_ulong current_pc = 0;
1238 target_ulong current_cs_base = 0;
1239 int current_flags = 0;
1242 addr &= TARGET_PAGE_MASK;
1243 p = page_find(addr >> TARGET_PAGE_BITS);
1247 #ifdef TARGET_HAS_PRECISE_SMC
1248 if (tb && pc != 0) {
1249 current_tb = tb_find_pc(pc);
1252 while (tb != NULL) {
1253 n = (uintptr_t)tb & 3;
1254 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1255 #ifdef TARGET_HAS_PRECISE_SMC
1256 if (current_tb == tb &&
1257 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1258 /* If we are modifying the current TB, we must stop
1259 its execution. We could be more precise by checking
1260 that the modification is after the current PC, but it
1261 would require a specialized function to partially
1262 restore the CPU state */
1264 current_tb_modified = 1;
1265 cpu_restore_state(current_tb, env, pc);
1266 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1269 #endif /* TARGET_HAS_PRECISE_SMC */
1270 tb_phys_invalidate(tb, addr);
1271 tb = tb->page_next[n];
1274 #ifdef TARGET_HAS_PRECISE_SMC
1275 if (current_tb_modified) {
1276 /* we generate a block containing just the instruction
1277 modifying the memory. It will ensure that it cannot modify
1279 env->current_tb = NULL;
1280 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1281 cpu_resume_from_signal(env, puc);
1287 /* add the tb in the target page and protect it if necessary */
1288 static inline void tb_alloc_page(TranslationBlock *tb,
1289 unsigned int n, tb_page_addr_t page_addr)
1292 #ifndef CONFIG_USER_ONLY
1293 bool page_already_protected;
1296 tb->page_addr[n] = page_addr;
1297 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1298 tb->page_next[n] = p->first_tb;
1299 #ifndef CONFIG_USER_ONLY
1300 page_already_protected = p->first_tb != NULL;
1302 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1303 invalidate_page_bitmap(p);
1305 #if defined(TARGET_HAS_SMC) || 1
1307 #if defined(CONFIG_USER_ONLY)
1308 if (p->flags & PAGE_WRITE) {
1313 /* force the host page as non writable (writes will have a
1314 page fault + mprotect overhead) */
1315 page_addr &= qemu_host_page_mask;
1317 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1318 addr += TARGET_PAGE_SIZE) {
1320 p2 = page_find (addr >> TARGET_PAGE_BITS);
1324 p2->flags &= ~PAGE_WRITE;
1326 mprotect(g2h(page_addr), qemu_host_page_size,
1327 (prot & PAGE_BITS) & ~PAGE_WRITE);
1328 #ifdef DEBUG_TB_INVALIDATE
1329 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1334 /* if some code is already present, then the pages are already
1335 protected. So we handle the case where only the first TB is
1336 allocated in a physical page */
1337 if (!page_already_protected) {
1338 tlb_protect_code(page_addr);
1342 #endif /* TARGET_HAS_SMC */
1345 /* add a new TB and link it to the physical page tables. phys_page2 is
1346 (-1) to indicate that only one page contains the TB. */
1347 void tb_link_page(TranslationBlock *tb,
1348 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1351 TranslationBlock **ptb;
1353 /* Grab the mmap lock to stop another thread invalidating this TB
1354 before we are done. */
1356 /* add in the physical hash table */
1357 h = tb_phys_hash_func(phys_pc);
1358 ptb = &tb_phys_hash[h];
1359 tb->phys_hash_next = *ptb;
1362 /* add in the page list */
1363 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1364 if (phys_page2 != -1)
1365 tb_alloc_page(tb, 1, phys_page2);
1367 tb->page_addr[1] = -1;
1369 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1370 tb->jmp_next[0] = NULL;
1371 tb->jmp_next[1] = NULL;
1373 /* init original jump addresses */
1374 if (tb->tb_next_offset[0] != 0xffff)
1375 tb_reset_jump(tb, 0);
1376 if (tb->tb_next_offset[1] != 0xffff)
1377 tb_reset_jump(tb, 1);
1379 #ifdef DEBUG_TB_CHECK
1385 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1386 /* check whether the given addr is in TCG generated code buffer or not */
1387 bool is_tcg_gen_code(uintptr_t tc_ptr)
1389 /* This can be called during code generation, code_gen_buffer_max_size
1390 is used instead of code_gen_ptr for upper boundary checking */
1391 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1392 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1396 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1397 tb[1].tc_ptr. Return NULL if not found */
1398 TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1400 int m_min, m_max, m;
1402 TranslationBlock *tb;
1406 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1407 tc_ptr >= (uintptr_t)code_gen_ptr) {
1410 /* binary search (cf Knuth) */
1413 while (m_min <= m_max) {
1414 m = (m_min + m_max) >> 1;
1416 v = (uintptr_t)tb->tc_ptr;
1419 else if (tc_ptr < v) {
1428 static void tb_reset_jump_recursive(TranslationBlock *tb);
1430 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1432 TranslationBlock *tb1, *tb_next, **ptb;
1435 tb1 = tb->jmp_next[n];
1437 /* find head of list */
1439 n1 = (uintptr_t)tb1 & 3;
1440 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1443 tb1 = tb1->jmp_next[n1];
1445 /* we are now sure now that tb jumps to tb1 */
1448 /* remove tb from the jmp_first list */
1449 ptb = &tb_next->jmp_first;
1452 n1 = (uintptr_t)tb1 & 3;
1453 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1454 if (n1 == n && tb1 == tb)
1456 ptb = &tb1->jmp_next[n1];
1458 *ptb = tb->jmp_next[n];
1459 tb->jmp_next[n] = NULL;
1461 /* suppress the jump to next tb in generated code */
1462 tb_reset_jump(tb, n);
1464 /* suppress jumps in the tb on which we could have jumped */
1465 tb_reset_jump_recursive(tb_next);
1469 static void tb_reset_jump_recursive(TranslationBlock *tb)
1471 tb_reset_jump_recursive2(tb, 0);
1472 tb_reset_jump_recursive2(tb, 1);
1475 #if defined(TARGET_HAS_ICE)
1476 #if defined(CONFIG_USER_ONLY)
1477 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1479 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1482 void tb_invalidate_phys_addr(target_phys_addr_t addr)
1484 ram_addr_t ram_addr;
1485 MemoryRegionSection *section;
1487 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1488 if (!(memory_region_is_ram(section->mr)
1489 || (section->mr->rom_device && section->mr->readable))) {
1492 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1493 + memory_region_section_addr(section, addr);
1494 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1497 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1499 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1500 (pc & ~TARGET_PAGE_MASK));
1503 #endif /* TARGET_HAS_ICE */
1505 #if defined(CONFIG_USER_ONLY)
1506 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1511 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1512 int flags, CPUWatchpoint **watchpoint)
1517 /* Add a watchpoint. */
1518 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1519 int flags, CPUWatchpoint **watchpoint)
1521 target_ulong len_mask = ~(len - 1);
1524 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1525 if ((len & (len - 1)) || (addr & ~len_mask) ||
1526 len == 0 || len > TARGET_PAGE_SIZE) {
1527 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1528 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1531 wp = g_malloc(sizeof(*wp));
1534 wp->len_mask = len_mask;
1537 /* keep all GDB-injected watchpoints in front */
1539 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1541 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1543 tlb_flush_page(env, addr);
1550 /* Remove a specific watchpoint. */
1551 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1554 target_ulong len_mask = ~(len - 1);
1557 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1558 if (addr == wp->vaddr && len_mask == wp->len_mask
1559 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1560 cpu_watchpoint_remove_by_ref(env, wp);
1567 /* Remove a specific watchpoint by reference. */
1568 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1570 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1572 tlb_flush_page(env, watchpoint->vaddr);
1577 /* Remove all matching watchpoints. */
1578 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1580 CPUWatchpoint *wp, *next;
1582 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1583 if (wp->flags & mask)
1584 cpu_watchpoint_remove_by_ref(env, wp);
1589 /* Add a breakpoint. */
1590 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1591 CPUBreakpoint **breakpoint)
1593 #if defined(TARGET_HAS_ICE)
1596 bp = g_malloc(sizeof(*bp));
1601 /* keep all GDB-injected breakpoints in front */
1603 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1605 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1607 breakpoint_invalidate(env, pc);
1617 /* Remove a specific breakpoint. */
1618 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1620 #if defined(TARGET_HAS_ICE)
1623 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1624 if (bp->pc == pc && bp->flags == flags) {
1625 cpu_breakpoint_remove_by_ref(env, bp);
1635 /* Remove a specific breakpoint by reference. */
1636 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
1638 #if defined(TARGET_HAS_ICE)
1639 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1641 breakpoint_invalidate(env, breakpoint->pc);
1647 /* Remove all matching breakpoints. */
1648 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1650 #if defined(TARGET_HAS_ICE)
1651 CPUBreakpoint *bp, *next;
1653 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1654 if (bp->flags & mask)
1655 cpu_breakpoint_remove_by_ref(env, bp);
1660 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1661 CPU loop after each instruction */
1662 void cpu_single_step(CPUArchState *env, int enabled)
1664 #if defined(TARGET_HAS_ICE)
1665 if (env->singlestep_enabled != enabled) {
1666 env->singlestep_enabled = enabled;
1668 kvm_update_guest_debug(env, 0);
1670 /* must flush all the translated code to avoid inconsistencies */
1671 /* XXX: only flush what is necessary */
1678 static void cpu_unlink_tb(CPUArchState *env)
1680 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1681 problem and hope the cpu will stop of its own accord. For userspace
1682 emulation this often isn't actually as bad as it sounds. Often
1683 signals are used primarily to interrupt blocking syscalls. */
1684 TranslationBlock *tb;
1685 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1687 spin_lock(&interrupt_lock);
1688 tb = env->current_tb;
1689 /* if the cpu is currently executing code, we must unlink it and
1690 all the potentially executing TB */
1692 env->current_tb = NULL;
1693 tb_reset_jump_recursive(tb);
1695 spin_unlock(&interrupt_lock);
1698 #ifndef CONFIG_USER_ONLY
1699 /* mask must never be zero, except for A20 change call */
1700 static void tcg_handle_interrupt(CPUArchState *env, int mask)
1704 old_mask = env->interrupt_request;
1705 env->interrupt_request |= mask;
1708 * If called from iothread context, wake the target cpu in
1711 if (!qemu_cpu_is_self(env)) {
1717 env->icount_decr.u16.high = 0xffff;
1719 && (mask & ~old_mask) != 0) {
1720 cpu_abort(env, "Raised interrupt while not in I/O function");
1727 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1729 #else /* CONFIG_USER_ONLY */
1731 void cpu_interrupt(CPUArchState *env, int mask)
1733 env->interrupt_request |= mask;
1736 #endif /* CONFIG_USER_ONLY */
1738 void cpu_reset_interrupt(CPUArchState *env, int mask)
1740 env->interrupt_request &= ~mask;
1743 void cpu_exit(CPUArchState *env)
1745 env->exit_request = 1;
1749 void cpu_abort(CPUArchState *env, const char *fmt, ...)
1756 fprintf(stderr, "qemu: fatal: ");
1757 vfprintf(stderr, fmt, ap);
1758 fprintf(stderr, "\n");
1760 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1762 cpu_dump_state(env, stderr, fprintf, 0);
1764 if (qemu_log_enabled()) {
1765 qemu_log("qemu: fatal: ");
1766 qemu_log_vprintf(fmt, ap2);
1769 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1771 log_cpu_state(env, 0);
1778 #if defined(CONFIG_USER_ONLY)
1780 struct sigaction act;
1781 sigfillset(&act.sa_mask);
1782 act.sa_handler = SIG_DFL;
1783 sigaction(SIGABRT, &act, NULL);
1789 CPUArchState *cpu_copy(CPUArchState *env)
1791 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1792 CPUArchState *next_cpu = new_env->next_cpu;
1793 int cpu_index = new_env->cpu_index;
1794 #if defined(TARGET_HAS_ICE)
1799 memcpy(new_env, env, sizeof(CPUArchState));
1801 /* Preserve chaining and index. */
1802 new_env->next_cpu = next_cpu;
1803 new_env->cpu_index = cpu_index;
1805 /* Clone all break/watchpoints.
1806 Note: Once we support ptrace with hw-debug register access, make sure
1807 BP_CPU break/watchpoints are handled correctly on clone. */
1808 QTAILQ_INIT(&env->breakpoints);
1809 QTAILQ_INIT(&env->watchpoints);
1810 #if defined(TARGET_HAS_ICE)
1811 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1812 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1814 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1815 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1823 #if !defined(CONFIG_USER_ONLY)
1824 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1828 /* Discard jump cache entries for any tb which might potentially
1829 overlap the flushed page. */
1830 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1831 memset (&env->tb_jmp_cache[i], 0,
1832 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1834 i = tb_jmp_cache_hash_page(addr);
1835 memset (&env->tb_jmp_cache[i], 0,
1836 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1839 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1844 /* we modify the TLB cache so that the dirty bit will be set again
1845 when accessing the range */
1846 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1847 /* Check that we don't span multiple blocks - this breaks the
1848 address comparisons below. */
1849 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1850 != (end - 1) - start) {
1853 cpu_tlb_reset_dirty_all(start1, length);
1857 /* Note: start and end must be within the same ram block. */
1858 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1863 start &= TARGET_PAGE_MASK;
1864 end = TARGET_PAGE_ALIGN(end);
1866 length = end - start;
1869 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1871 if (tcg_enabled()) {
1872 tlb_reset_dirty_range_all(start, end, length);
1876 int cpu_physical_memory_set_dirty_tracking(int enable)
1879 in_migration = enable;
1883 target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1884 MemoryRegionSection *section,
1886 target_phys_addr_t paddr,
1888 target_ulong *address)
1890 target_phys_addr_t iotlb;
1893 if (memory_region_is_ram(section->mr)) {
1895 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1896 + memory_region_section_addr(section, paddr);
1897 if (!section->readonly) {
1898 iotlb |= phys_section_notdirty;
1900 iotlb |= phys_section_rom;
1903 /* IO handlers are currently passed a physical address.
1904 It would be nice to pass an offset from the base address
1905 of that region. This would avoid having to special case RAM,
1906 and avoid full address decoding in every device.
1907 We can't use the high bits of pd for this because
1908 IO_MEM_ROMD uses these as a ram address. */
1909 iotlb = section - phys_sections;
1910 iotlb += memory_region_section_addr(section, paddr);
1913 /* Make accesses to pages with watchpoints go via the
1914 watchpoint trap routines. */
1915 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1916 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1917 /* Avoid trapping reads of pages with a write breakpoint. */
1918 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1919 iotlb = phys_section_watch + paddr;
1920 *address |= TLB_MMIO;
1931 * Walks guest process memory "regions" one by one
1932 * and calls callback function 'fn' for each region.
1935 struct walk_memory_regions_data
1937 walk_memory_regions_fn fn;
1943 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1944 abi_ulong end, int new_prot)
1946 if (data->start != -1ul) {
1947 int rc = data->fn(data->priv, data->start, end, data->prot);
1953 data->start = (new_prot ? end : -1ul);
1954 data->prot = new_prot;
1959 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1960 abi_ulong base, int level, void **lp)
1966 return walk_memory_regions_end(data, base, 0);
1971 for (i = 0; i < L2_SIZE; ++i) {
1972 int prot = pd[i].flags;
1974 pa = base | (i << TARGET_PAGE_BITS);
1975 if (prot != data->prot) {
1976 rc = walk_memory_regions_end(data, pa, prot);
1984 for (i = 0; i < L2_SIZE; ++i) {
1985 pa = base | ((abi_ulong)i <<
1986 (TARGET_PAGE_BITS + L2_BITS * level));
1987 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1997 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1999 struct walk_memory_regions_data data;
2007 for (i = 0; i < V_L1_SIZE; i++) {
2008 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2009 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2015 return walk_memory_regions_end(&data, 0, 0);
2018 static int dump_region(void *priv, abi_ulong start,
2019 abi_ulong end, unsigned long prot)
2021 FILE *f = (FILE *)priv;
2023 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2024 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2025 start, end, end - start,
2026 ((prot & PAGE_READ) ? 'r' : '-'),
2027 ((prot & PAGE_WRITE) ? 'w' : '-'),
2028 ((prot & PAGE_EXEC) ? 'x' : '-'));
2033 /* dump memory mappings */
2034 void page_dump(FILE *f)
2036 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2037 "start", "end", "size", "prot");
2038 walk_memory_regions(f, dump_region);
2041 int page_get_flags(target_ulong address)
2045 p = page_find(address >> TARGET_PAGE_BITS);
2051 /* Modify the flags of a page and invalidate the code if necessary.
2052 The flag PAGE_WRITE_ORG is positioned automatically depending
2053 on PAGE_WRITE. The mmap_lock should already be held. */
2054 void page_set_flags(target_ulong start, target_ulong end, int flags)
2056 target_ulong addr, len;
2058 /* This function should never be called with addresses outside the
2059 guest address space. If this assert fires, it probably indicates
2060 a missing call to h2g_valid. */
2061 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2062 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2064 assert(start < end);
2066 start = start & TARGET_PAGE_MASK;
2067 end = TARGET_PAGE_ALIGN(end);
2069 if (flags & PAGE_WRITE) {
2070 flags |= PAGE_WRITE_ORG;
2073 for (addr = start, len = end - start;
2075 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2076 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2078 /* If the write protection bit is set, then we invalidate
2080 if (!(p->flags & PAGE_WRITE) &&
2081 (flags & PAGE_WRITE) &&
2083 tb_invalidate_phys_page(addr, 0, NULL);
2089 int page_check_range(target_ulong start, target_ulong len, int flags)
2095 /* This function should never be called with addresses outside the
2096 guest address space. If this assert fires, it probably indicates
2097 a missing call to h2g_valid. */
2098 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2099 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2105 if (start + len - 1 < start) {
2106 /* We've wrapped around. */
2110 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2111 start = start & TARGET_PAGE_MASK;
2113 for (addr = start, len = end - start;
2115 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2116 p = page_find(addr >> TARGET_PAGE_BITS);
2119 if( !(p->flags & PAGE_VALID) )
2122 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2124 if (flags & PAGE_WRITE) {
2125 if (!(p->flags & PAGE_WRITE_ORG))
2127 /* unprotect the page if it was put read-only because it
2128 contains translated code */
2129 if (!(p->flags & PAGE_WRITE)) {
2130 if (!page_unprotect(addr, 0, NULL))
2139 /* called from signal handler: invalidate the code and unprotect the
2140 page. Return TRUE if the fault was successfully handled. */
2141 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2145 target_ulong host_start, host_end, addr;
2147 /* Technically this isn't safe inside a signal handler. However we
2148 know this only ever happens in a synchronous SEGV handler, so in
2149 practice it seems to be ok. */
2152 p = page_find(address >> TARGET_PAGE_BITS);
2158 /* if the page was really writable, then we change its
2159 protection back to writable */
2160 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2161 host_start = address & qemu_host_page_mask;
2162 host_end = host_start + qemu_host_page_size;
2165 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2166 p = page_find(addr >> TARGET_PAGE_BITS);
2167 p->flags |= PAGE_WRITE;
2170 /* and since the content will be modified, we must invalidate
2171 the corresponding translated code. */
2172 tb_invalidate_phys_page(addr, pc, puc);
2173 #ifdef DEBUG_TB_CHECK
2174 tb_invalidate_check(addr);
2177 mprotect((void *)g2h(host_start), qemu_host_page_size,
2186 #endif /* defined(CONFIG_USER_ONLY) */
2188 #if !defined(CONFIG_USER_ONLY)
2190 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2191 typedef struct subpage_t {
2193 target_phys_addr_t base;
2194 uint16_t sub_section[TARGET_PAGE_SIZE];
2197 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2199 static subpage_t *subpage_init(target_phys_addr_t base);
2200 static void destroy_page_desc(uint16_t section_index)
2202 MemoryRegionSection *section = &phys_sections[section_index];
2203 MemoryRegion *mr = section->mr;
2206 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2207 memory_region_destroy(&subpage->iomem);
2212 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
2217 if (lp->ptr == PHYS_MAP_NODE_NIL) {
2221 p = phys_map_nodes[lp->ptr];
2222 for (i = 0; i < L2_SIZE; ++i) {
2223 if (!p[i].is_leaf) {
2224 destroy_l2_mapping(&p[i], level - 1);
2226 destroy_page_desc(p[i].ptr);
2230 lp->ptr = PHYS_MAP_NODE_NIL;
2233 static void destroy_all_mappings(void)
2235 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2236 phys_map_nodes_reset();
2239 static uint16_t phys_section_add(MemoryRegionSection *section)
2241 if (phys_sections_nb == phys_sections_nb_alloc) {
2242 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2243 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2244 phys_sections_nb_alloc);
2246 phys_sections[phys_sections_nb] = *section;
2247 return phys_sections_nb++;
2250 static void phys_sections_clear(void)
2252 phys_sections_nb = 0;
2255 static void register_subpage(MemoryRegionSection *section)
2258 target_phys_addr_t base = section->offset_within_address_space
2260 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
2261 MemoryRegionSection subsection = {
2262 .offset_within_address_space = base,
2263 .size = TARGET_PAGE_SIZE,
2265 target_phys_addr_t start, end;
2267 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
2269 if (!(existing->mr->subpage)) {
2270 subpage = subpage_init(base);
2271 subsection.mr = &subpage->iomem;
2272 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2273 phys_section_add(&subsection));
2275 subpage = container_of(existing->mr, subpage_t, iomem);
2277 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2278 end = start + section->size - 1;
2279 subpage_register(subpage, start, end, phys_section_add(section));
2283 static void register_multipage(MemoryRegionSection *section)
2285 target_phys_addr_t start_addr = section->offset_within_address_space;
2286 ram_addr_t size = section->size;
2287 target_phys_addr_t addr;
2288 uint16_t section_index = phys_section_add(section);
2293 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2297 void cpu_register_physical_memory_log(MemoryRegionSection *section,
2300 MemoryRegionSection now = *section, remain = *section;
2302 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2303 || (now.size < TARGET_PAGE_SIZE)) {
2304 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2305 - now.offset_within_address_space,
2307 register_subpage(&now);
2308 remain.size -= now.size;
2309 remain.offset_within_address_space += now.size;
2310 remain.offset_within_region += now.size;
2312 while (remain.size >= TARGET_PAGE_SIZE) {
2314 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2315 now.size = TARGET_PAGE_SIZE;
2316 register_subpage(&now);
2318 now.size &= TARGET_PAGE_MASK;
2319 register_multipage(&now);
2321 remain.size -= now.size;
2322 remain.offset_within_address_space += now.size;
2323 remain.offset_within_region += now.size;
2327 register_subpage(&now);
2332 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2335 kvm_coalesce_mmio_region(addr, size);
2338 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2341 kvm_uncoalesce_mmio_region(addr, size);
2344 void qemu_flush_coalesced_mmio_buffer(void)
2347 kvm_flush_coalesced_mmio_buffer();
2350 #if defined(__linux__) && !defined(TARGET_S390X)
2352 #include <sys/vfs.h>
2354 #define HUGETLBFS_MAGIC 0x958458f6
2356 static long gethugepagesize(const char *path)
2362 ret = statfs(path, &fs);
2363 } while (ret != 0 && errno == EINTR);
2370 if (fs.f_type != HUGETLBFS_MAGIC)
2371 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2376 static void *file_ram_alloc(RAMBlock *block,
2386 unsigned long hpagesize;
2388 hpagesize = gethugepagesize(path);
2393 if (memory < hpagesize) {
2397 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2398 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2402 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2406 fd = mkstemp(filename);
2408 perror("unable to create backing store for hugepages");
2415 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2418 * ftruncate is not supported by hugetlbfs in older
2419 * hosts, so don't bother bailing out on errors.
2420 * If anything goes wrong with it under other filesystems,
2423 if (ftruncate(fd, memory))
2424 perror("ftruncate");
2427 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2428 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2429 * to sidestep this quirk.
2431 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2432 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2434 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2436 if (area == MAP_FAILED) {
2437 perror("file_ram_alloc: can't mmap RAM pages");
2446 static ram_addr_t find_ram_offset(ram_addr_t size)
2448 RAMBlock *block, *next_block;
2449 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2451 if (QLIST_EMPTY(&ram_list.blocks))
2454 QLIST_FOREACH(block, &ram_list.blocks, next) {
2455 ram_addr_t end, next = RAM_ADDR_MAX;
2457 end = block->offset + block->length;
2459 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2460 if (next_block->offset >= end) {
2461 next = MIN(next, next_block->offset);
2464 if (next - end >= size && next - end < mingap) {
2466 mingap = next - end;
2470 if (offset == RAM_ADDR_MAX) {
2471 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2479 static ram_addr_t last_ram_offset(void)
2482 ram_addr_t last = 0;
2484 QLIST_FOREACH(block, &ram_list.blocks, next)
2485 last = MAX(last, block->offset + block->length);
2490 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2493 QemuOpts *machine_opts;
2495 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2496 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2498 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2499 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2501 perror("qemu_madvise");
2502 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2503 "but dump_guest_core=off specified\n");
2508 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2510 RAMBlock *new_block, *block;
2513 QLIST_FOREACH(block, &ram_list.blocks, next) {
2514 if (block->offset == addr) {
2520 assert(!new_block->idstr[0]);
2523 char *id = qdev_get_dev_path(dev);
2525 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2529 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2531 QLIST_FOREACH(block, &ram_list.blocks, next) {
2532 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2533 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2540 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2543 RAMBlock *new_block;
2545 size = TARGET_PAGE_ALIGN(size);
2546 new_block = g_malloc0(sizeof(*new_block));
2549 new_block->offset = find_ram_offset(size);
2551 new_block->host = host;
2552 new_block->flags |= RAM_PREALLOC_MASK;
2555 #if defined (__linux__) && !defined(TARGET_S390X)
2556 new_block->host = file_ram_alloc(new_block, size, mem_path);
2557 if (!new_block->host) {
2558 new_block->host = qemu_vmalloc(size);
2559 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2562 fprintf(stderr, "-mem-path option unsupported\n");
2566 if (xen_enabled()) {
2567 xen_ram_alloc(new_block->offset, size, mr);
2568 } else if (kvm_enabled()) {
2569 /* some s390/kvm configurations have special constraints */
2570 new_block->host = kvm_vmalloc(size);
2572 new_block->host = qemu_vmalloc(size);
2575 * In Hax, the qemu allocate the virtual address, and HAX kernel
2576 * populate the memory with physical memory. Currently we have no
2577 * paging, so user should make sure enough free memory in advance
2582 ret = hax_populate_ram((uint64_t)new_block->host, size);
2585 fprintf(stderr, "Hax failed to populate ram\n");
2591 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2594 new_block->length = size;
2596 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2598 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2599 last_ram_offset() >> TARGET_PAGE_BITS);
2600 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2601 0, size >> TARGET_PAGE_BITS);
2602 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
2604 qemu_ram_setup_dump(new_block->host, size);
2607 kvm_setup_guest_memory(new_block->host, size);
2609 return new_block->offset;
2612 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2614 return qemu_ram_alloc_from_ptr(size, NULL, mr);
2617 void qemu_ram_free_from_ptr(ram_addr_t addr)
2621 QLIST_FOREACH(block, &ram_list.blocks, next) {
2622 if (addr == block->offset) {
2623 QLIST_REMOVE(block, next);
2630 void qemu_ram_free(ram_addr_t addr)
2634 QLIST_FOREACH(block, &ram_list.blocks, next) {
2635 if (addr == block->offset) {
2636 QLIST_REMOVE(block, next);
2637 if (block->flags & RAM_PREALLOC_MASK) {
2639 } else if (mem_path) {
2640 #if defined (__linux__) && !defined(TARGET_S390X)
2642 munmap(block->host, block->length);
2645 qemu_vfree(block->host);
2651 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2652 munmap(block->host, block->length);
2654 if (xen_enabled()) {
2655 xen_invalidate_map_cache_entry(block->host);
2657 qemu_vfree(block->host);
2669 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2676 QLIST_FOREACH(block, &ram_list.blocks, next) {
2677 offset = addr - block->offset;
2678 if (offset < block->length) {
2679 vaddr = block->host + offset;
2680 if (block->flags & RAM_PREALLOC_MASK) {
2684 munmap(vaddr, length);
2686 #if defined(__linux__) && !defined(TARGET_S390X)
2689 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2692 flags |= MAP_PRIVATE;
2694 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2695 flags, block->fd, offset);
2697 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2698 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2705 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2706 flags |= MAP_SHARED | MAP_ANONYMOUS;
2707 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2710 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2711 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2715 if (area != vaddr) {
2716 fprintf(stderr, "Could not remap addr: "
2717 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2721 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2722 qemu_ram_setup_dump(vaddr, length);
2728 #endif /* !_WIN32 */
2730 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2731 With the exception of the softmmu code in this file, this should
2732 only be used for local memory (e.g. video ram) that the device owns,
2733 and knows it isn't going to access beyond the end of the block.
2735 It should not be used for general purpose DMA.
2736 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2738 void *qemu_get_ram_ptr(ram_addr_t addr)
2742 QLIST_FOREACH(block, &ram_list.blocks, next) {
2743 if (addr - block->offset < block->length) {
2744 /* Move this entry to to start of the list. */
2745 if (block != QLIST_FIRST(&ram_list.blocks)) {
2746 QLIST_REMOVE(block, next);
2747 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2749 if (xen_enabled()) {
2750 /* We need to check if the requested address is in the RAM
2751 * because we don't want to map the entire memory in QEMU.
2752 * In that case just map until the end of the page.
2754 if (block->offset == 0) {
2755 return xen_map_cache(addr, 0, 0);
2756 } else if (block->host == NULL) {
2758 xen_map_cache(block->offset, block->length, 1);
2761 return block->host + (addr - block->offset);
2765 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2771 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2772 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2774 void *qemu_safe_ram_ptr(ram_addr_t addr)
2778 QLIST_FOREACH(block, &ram_list.blocks, next) {
2779 if (addr - block->offset < block->length) {
2780 if (xen_enabled()) {
2781 /* We need to check if the requested address is in the RAM
2782 * because we don't want to map the entire memory in QEMU.
2783 * In that case just map until the end of the page.
2785 if (block->offset == 0) {
2786 return xen_map_cache(addr, 0, 0);
2787 } else if (block->host == NULL) {
2789 xen_map_cache(block->offset, block->length, 1);
2792 return block->host + (addr - block->offset);
2796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2802 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2803 * but takes a size argument */
2804 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
2809 if (xen_enabled()) {
2810 return xen_map_cache(addr, *size, 1);
2814 QLIST_FOREACH(block, &ram_list.blocks, next) {
2815 if (addr - block->offset < block->length) {
2816 if (addr - block->offset + *size > block->length)
2817 *size = block->length - addr + block->offset;
2818 return block->host + (addr - block->offset);
2822 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2827 void qemu_put_ram_ptr(void *addr)
2829 trace_qemu_put_ram_ptr(addr);
2832 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2835 uint8_t *host = ptr;
2837 if (xen_enabled()) {
2838 *ram_addr = xen_ram_addr_from_mapcache(ptr);
2842 QLIST_FOREACH(block, &ram_list.blocks, next) {
2843 /* This case append when the block is not mapped. */
2844 if (block->host == NULL) {
2847 if (host - block->host < block->length) {
2848 *ram_addr = block->offset + (host - block->host);
2856 /* Some of the softmmu routines need to translate from a host pointer
2857 (typically a TLB entry) back to a ram offset. */
2858 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2860 ram_addr_t ram_addr;
2862 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2863 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2869 static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2872 #ifdef DEBUG_UNASSIGNED
2873 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2875 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2876 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2881 static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2882 uint64_t val, unsigned size)
2884 #ifdef DEBUG_UNASSIGNED
2885 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2887 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2888 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2892 static const MemoryRegionOps unassigned_mem_ops = {
2893 .read = unassigned_mem_read,
2894 .write = unassigned_mem_write,
2895 .endianness = DEVICE_NATIVE_ENDIAN,
2898 static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2904 static void error_mem_write(void *opaque, target_phys_addr_t addr,
2905 uint64_t value, unsigned size)
2910 static const MemoryRegionOps error_mem_ops = {
2911 .read = error_mem_read,
2912 .write = error_mem_write,
2913 .endianness = DEVICE_NATIVE_ENDIAN,
2916 static const MemoryRegionOps rom_mem_ops = {
2917 .read = error_mem_read,
2918 .write = unassigned_mem_write,
2919 .endianness = DEVICE_NATIVE_ENDIAN,
2922 static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2923 uint64_t val, unsigned size)
2926 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2927 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2928 #if !defined(CONFIG_USER_ONLY)
2929 tb_invalidate_phys_page_fast(ram_addr, size);
2930 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2935 stb_p(qemu_get_ram_ptr(ram_addr), val);
2938 stw_p(qemu_get_ram_ptr(ram_addr), val);
2941 stl_p(qemu_get_ram_ptr(ram_addr), val);
2946 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2947 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2948 /* we remove the notdirty callback only if the code has been
2950 if (dirty_flags == 0xff)
2951 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2954 static const MemoryRegionOps notdirty_mem_ops = {
2955 .read = error_mem_read,
2956 .write = notdirty_mem_write,
2957 .endianness = DEVICE_NATIVE_ENDIAN,
2960 /* Generate a debug exception if a watchpoint has been hit. */
2961 static void check_watchpoint(int offset, int len_mask, int flags)
2963 CPUArchState *env = cpu_single_env;
2964 target_ulong pc, cs_base;
2965 TranslationBlock *tb;
2970 if (env->watchpoint_hit) {
2971 /* We re-entered the check after replacing the TB. Now raise
2972 * the debug interrupt so that is will trigger after the
2973 * current instruction. */
2974 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2977 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2978 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2979 if ((vaddr == (wp->vaddr & len_mask) ||
2980 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2981 wp->flags |= BP_WATCHPOINT_HIT;
2982 if (!env->watchpoint_hit) {
2983 env->watchpoint_hit = wp;
2984 tb = tb_find_pc(env->mem_io_pc);
2986 cpu_abort(env, "check_watchpoint: could not find TB for "
2987 "pc=%p", (void *)env->mem_io_pc);
2989 cpu_restore_state(tb, env, env->mem_io_pc);
2990 tb_phys_invalidate(tb, -1);
2991 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2992 env->exception_index = EXCP_DEBUG;
2995 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2996 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2997 cpu_resume_from_signal(env, NULL);
3001 wp->flags &= ~BP_WATCHPOINT_HIT;
3006 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3007 so these check for a hit then pass through to the normal out-of-line
3009 static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3012 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3014 case 1: return ldub_phys(addr);
3015 case 2: return lduw_phys(addr);
3016 case 4: return ldl_phys(addr);
3021 static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3022 uint64_t val, unsigned size)
3024 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3027 stb_phys(addr, val);
3030 stw_phys(addr, val);
3033 stl_phys(addr, val);
3039 static const MemoryRegionOps watch_mem_ops = {
3040 .read = watch_mem_read,
3041 .write = watch_mem_write,
3042 .endianness = DEVICE_NATIVE_ENDIAN,
3045 static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3048 subpage_t *mmio = opaque;
3049 unsigned int idx = SUBPAGE_IDX(addr);
3050 MemoryRegionSection *section;
3051 #if defined(DEBUG_SUBPAGE)
3052 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3053 mmio, len, addr, idx);
3056 section = &phys_sections[mmio->sub_section[idx]];
3058 addr -= section->offset_within_address_space;
3059 addr += section->offset_within_region;
3060 return io_mem_read(section->mr, addr, len);
3063 static void subpage_write(void *opaque, target_phys_addr_t addr,
3064 uint64_t value, unsigned len)
3066 subpage_t *mmio = opaque;
3067 unsigned int idx = SUBPAGE_IDX(addr);
3068 MemoryRegionSection *section;
3069 #if defined(DEBUG_SUBPAGE)
3070 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3071 " idx %d value %"PRIx64"\n",
3072 __func__, mmio, len, addr, idx, value);
3075 section = &phys_sections[mmio->sub_section[idx]];
3077 addr -= section->offset_within_address_space;
3078 addr += section->offset_within_region;
3079 io_mem_write(section->mr, addr, value, len);
3082 static const MemoryRegionOps subpage_ops = {
3083 .read = subpage_read,
3084 .write = subpage_write,
3085 .endianness = DEVICE_NATIVE_ENDIAN,
3088 static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3091 ram_addr_t raddr = addr;
3092 void *ptr = qemu_get_ram_ptr(raddr);
3094 case 1: return ldub_p(ptr);
3095 case 2: return lduw_p(ptr);
3096 case 4: return ldl_p(ptr);
3101 static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3102 uint64_t value, unsigned size)
3104 ram_addr_t raddr = addr;
3105 void *ptr = qemu_get_ram_ptr(raddr);
3107 case 1: return stb_p(ptr, value);
3108 case 2: return stw_p(ptr, value);
3109 case 4: return stl_p(ptr, value);
3114 static const MemoryRegionOps subpage_ram_ops = {
3115 .read = subpage_ram_read,
3116 .write = subpage_ram_write,
3117 .endianness = DEVICE_NATIVE_ENDIAN,
3120 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3125 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3127 idx = SUBPAGE_IDX(start);
3128 eidx = SUBPAGE_IDX(end);
3129 #if defined(DEBUG_SUBPAGE)
3130 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3131 mmio, start, end, idx, eidx, memory);
3133 if (memory_region_is_ram(phys_sections[section].mr)) {
3134 MemoryRegionSection new_section = phys_sections[section];
3135 new_section.mr = &io_mem_subpage_ram;
3136 section = phys_section_add(&new_section);
3138 for (; idx <= eidx; idx++) {
3139 mmio->sub_section[idx] = section;
3145 static subpage_t *subpage_init(target_phys_addr_t base)
3149 mmio = g_malloc0(sizeof(subpage_t));
3152 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3153 "subpage", TARGET_PAGE_SIZE);
3154 mmio->iomem.subpage = true;
3155 #if defined(DEBUG_SUBPAGE)
3156 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3157 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3159 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3164 static uint16_t dummy_section(MemoryRegion *mr)
3166 MemoryRegionSection section = {
3168 .offset_within_address_space = 0,
3169 .offset_within_region = 0,
3173 return phys_section_add(§ion);
3176 MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3178 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3181 static void io_mem_init(void)
3183 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3184 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3185 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3186 "unassigned", UINT64_MAX);
3187 memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
3188 "notdirty", UINT64_MAX);
3189 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3190 "subpage-ram", UINT64_MAX);
3191 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3192 "watch", UINT64_MAX);
3195 static void core_begin(MemoryListener *listener)
3197 destroy_all_mappings();
3198 phys_sections_clear();
3199 phys_map.ptr = PHYS_MAP_NODE_NIL;
3200 phys_section_unassigned = dummy_section(&io_mem_unassigned);
3201 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3202 phys_section_rom = dummy_section(&io_mem_rom);
3203 phys_section_watch = dummy_section(&io_mem_watch);
3206 static void core_commit(MemoryListener *listener)
3210 /* since each CPU stores ram addresses in its TLB cache, we must
3211 reset the modified entries */
3213 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3218 static void core_region_add(MemoryListener *listener,
3219 MemoryRegionSection *section)
3221 cpu_register_physical_memory_log(section, section->readonly);
3224 static void core_region_del(MemoryListener *listener,
3225 MemoryRegionSection *section)
3229 static void core_region_nop(MemoryListener *listener,
3230 MemoryRegionSection *section)
3232 cpu_register_physical_memory_log(section, section->readonly);
3235 static void core_log_start(MemoryListener *listener,
3236 MemoryRegionSection *section)
3240 static void core_log_stop(MemoryListener *listener,
3241 MemoryRegionSection *section)
3245 static void core_log_sync(MemoryListener *listener,
3246 MemoryRegionSection *section)
3250 static void core_log_global_start(MemoryListener *listener)
3252 cpu_physical_memory_set_dirty_tracking(1);
3255 static void core_log_global_stop(MemoryListener *listener)
3257 cpu_physical_memory_set_dirty_tracking(0);
3260 static void core_eventfd_add(MemoryListener *listener,
3261 MemoryRegionSection *section,
3262 bool match_data, uint64_t data, EventNotifier *e)
3266 static void core_eventfd_del(MemoryListener *listener,
3267 MemoryRegionSection *section,
3268 bool match_data, uint64_t data, EventNotifier *e)
3272 static void io_begin(MemoryListener *listener)
3276 static void io_commit(MemoryListener *listener)
3280 static void io_region_add(MemoryListener *listener,
3281 MemoryRegionSection *section)
3283 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3285 mrio->mr = section->mr;
3286 mrio->offset = section->offset_within_region;
3287 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
3288 section->offset_within_address_space, section->size);
3289 ioport_register(&mrio->iorange);
3292 static void io_region_del(MemoryListener *listener,
3293 MemoryRegionSection *section)
3295 isa_unassign_ioport(section->offset_within_address_space, section->size);
3298 static void io_region_nop(MemoryListener *listener,
3299 MemoryRegionSection *section)
3303 static void io_log_start(MemoryListener *listener,
3304 MemoryRegionSection *section)
3308 static void io_log_stop(MemoryListener *listener,
3309 MemoryRegionSection *section)
3313 static void io_log_sync(MemoryListener *listener,
3314 MemoryRegionSection *section)
3318 static void io_log_global_start(MemoryListener *listener)
3322 static void io_log_global_stop(MemoryListener *listener)
3326 static void io_eventfd_add(MemoryListener *listener,
3327 MemoryRegionSection *section,
3328 bool match_data, uint64_t data, EventNotifier *e)
3332 static void io_eventfd_del(MemoryListener *listener,
3333 MemoryRegionSection *section,
3334 bool match_data, uint64_t data, EventNotifier *e)
3338 static MemoryListener core_memory_listener = {
3339 .begin = core_begin,
3340 .commit = core_commit,
3341 .region_add = core_region_add,
3342 .region_del = core_region_del,
3343 .region_nop = core_region_nop,
3344 .log_start = core_log_start,
3345 .log_stop = core_log_stop,
3346 .log_sync = core_log_sync,
3347 .log_global_start = core_log_global_start,
3348 .log_global_stop = core_log_global_stop,
3349 .eventfd_add = core_eventfd_add,
3350 .eventfd_del = core_eventfd_del,
3354 static MemoryListener io_memory_listener = {
3356 .commit = io_commit,
3357 .region_add = io_region_add,
3358 .region_del = io_region_del,
3359 .region_nop = io_region_nop,
3360 .log_start = io_log_start,
3361 .log_stop = io_log_stop,
3362 .log_sync = io_log_sync,
3363 .log_global_start = io_log_global_start,
3364 .log_global_stop = io_log_global_stop,
3365 .eventfd_add = io_eventfd_add,
3366 .eventfd_del = io_eventfd_del,
3370 static void memory_map_init(void)
3372 system_memory = g_malloc(sizeof(*system_memory));
3373 memory_region_init(system_memory, "system", INT64_MAX);
3374 set_system_memory_map(system_memory);
3376 system_io = g_malloc(sizeof(*system_io));
3377 memory_region_init(system_io, "io", 65536);
3378 set_system_io_map(system_io);
3380 memory_listener_register(&core_memory_listener, system_memory);
3381 memory_listener_register(&io_memory_listener, system_io);
3384 MemoryRegion *get_system_memory(void)
3386 return system_memory;
3389 MemoryRegion *get_system_io(void)
3394 #endif /* !defined(CONFIG_USER_ONLY) */
3396 /* physical memory access (slow version, mainly for debug) */
3397 #if defined(CONFIG_USER_ONLY)
3398 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3399 uint8_t *buf, int len, int is_write)
3406 page = addr & TARGET_PAGE_MASK;
3407 l = (page + TARGET_PAGE_SIZE) - addr;
3410 flags = page_get_flags(page);
3411 if (!(flags & PAGE_VALID))
3414 if (!(flags & PAGE_WRITE))
3416 /* XXX: this code should not depend on lock_user */
3417 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3420 unlock_user(p, addr, l);
3422 if (!(flags & PAGE_READ))
3424 /* XXX: this code should not depend on lock_user */
3425 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3428 unlock_user(p, addr, 0);
3438 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3439 int len, int is_write)
3444 target_phys_addr_t page;
3445 MemoryRegionSection *section;
3448 page = addr & TARGET_PAGE_MASK;
3449 l = (page + TARGET_PAGE_SIZE) - addr;
3452 section = phys_page_find(page >> TARGET_PAGE_BITS);
3455 if (!memory_region_is_ram(section->mr)) {
3456 target_phys_addr_t addr1;
3457 addr1 = memory_region_section_addr(section, addr);
3458 /* XXX: could force cpu_single_env to NULL to avoid
3460 if (l >= 4 && ((addr1 & 3) == 0)) {
3461 /* 32 bit write access */
3463 io_mem_write(section->mr, addr1, val, 4);
3465 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3466 /* 16 bit write access */
3468 io_mem_write(section->mr, addr1, val, 2);
3471 /* 8 bit write access */
3473 io_mem_write(section->mr, addr1, val, 1);
3476 } else if (!section->readonly) {
3478 addr1 = memory_region_get_ram_addr(section->mr)
3479 + memory_region_section_addr(section, addr);
3481 ptr = qemu_get_ram_ptr(addr1);
3482 memcpy(ptr, buf, l);
3483 if (!cpu_physical_memory_is_dirty(addr1)) {
3484 /* invalidate code */
3485 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3487 cpu_physical_memory_set_dirty_flags(
3488 addr1, (0xff & ~CODE_DIRTY_FLAG));
3490 qemu_put_ram_ptr(ptr);
3493 if (!(memory_region_is_ram(section->mr) ||
3494 memory_region_is_romd(section->mr))) {
3495 target_phys_addr_t addr1;
3497 addr1 = memory_region_section_addr(section, addr);
3498 if (l >= 4 && ((addr1 & 3) == 0)) {
3499 /* 32 bit read access */
3500 val = io_mem_read(section->mr, addr1, 4);
3503 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3504 /* 16 bit read access */
3505 val = io_mem_read(section->mr, addr1, 2);
3509 /* 8 bit read access */
3510 val = io_mem_read(section->mr, addr1, 1);
3516 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3517 + memory_region_section_addr(section,
3519 memcpy(buf, ptr, l);
3520 qemu_put_ram_ptr(ptr);
3529 /* used for ROM loading : can write in RAM and ROM */
3530 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3531 const uint8_t *buf, int len)
3535 target_phys_addr_t page;
3536 MemoryRegionSection *section;
3539 page = addr & TARGET_PAGE_MASK;
3540 l = (page + TARGET_PAGE_SIZE) - addr;
3543 section = phys_page_find(page >> TARGET_PAGE_BITS);
3545 if (!(memory_region_is_ram(section->mr) ||
3546 memory_region_is_romd(section->mr))) {
3549 unsigned long addr1;
3550 addr1 = memory_region_get_ram_addr(section->mr)
3551 + memory_region_section_addr(section, addr);
3553 ptr = qemu_get_ram_ptr(addr1);
3554 memcpy(ptr, buf, l);
3555 qemu_put_ram_ptr(ptr);
3565 target_phys_addr_t addr;
3566 target_phys_addr_t len;
3569 static BounceBuffer bounce;
3571 typedef struct MapClient {
3573 void (*callback)(void *opaque);
3574 QLIST_ENTRY(MapClient) link;
3577 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3578 = QLIST_HEAD_INITIALIZER(map_client_list);
3580 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3582 MapClient *client = g_malloc(sizeof(*client));
3584 client->opaque = opaque;
3585 client->callback = callback;
3586 QLIST_INSERT_HEAD(&map_client_list, client, link);
3590 void cpu_unregister_map_client(void *_client)
3592 MapClient *client = (MapClient *)_client;
3594 QLIST_REMOVE(client, link);
3598 static void cpu_notify_map_clients(void)
3602 while (!QLIST_EMPTY(&map_client_list)) {
3603 client = QLIST_FIRST(&map_client_list);
3604 client->callback(client->opaque);
3605 cpu_unregister_map_client(client);
3609 /* Map a physical memory region into a host virtual address.
3610 * May map a subset of the requested range, given by and returned in *plen.
3611 * May return NULL if resources needed to perform the mapping are exhausted.
3612 * Use only for reads OR writes - not for read-modify-write operations.
3613 * Use cpu_register_map_client() to know when retrying the map operation is
3614 * likely to succeed.
3616 void *cpu_physical_memory_map(target_phys_addr_t addr,
3617 target_phys_addr_t *plen,
3620 target_phys_addr_t len = *plen;
3621 target_phys_addr_t todo = 0;
3623 target_phys_addr_t page;
3624 MemoryRegionSection *section;
3625 ram_addr_t raddr = RAM_ADDR_MAX;
3630 page = addr & TARGET_PAGE_MASK;
3631 l = (page + TARGET_PAGE_SIZE) - addr;
3634 section = phys_page_find(page >> TARGET_PAGE_BITS);
3636 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
3637 if (todo || bounce.buffer) {
3640 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3644 cpu_physical_memory_read(addr, bounce.buffer, l);
3648 return bounce.buffer;
3651 raddr = memory_region_get_ram_addr(section->mr)
3652 + memory_region_section_addr(section, addr);
3660 ret = qemu_ram_ptr_length(raddr, &rlen);
3665 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3666 * Will also mark the memory as dirty if is_write == 1. access_len gives
3667 * the amount of memory that was actually read or written by the caller.
3669 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3670 int is_write, target_phys_addr_t access_len)
3672 if (buffer != bounce.buffer) {
3674 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3675 while (access_len) {
3677 l = TARGET_PAGE_SIZE;
3680 if (!cpu_physical_memory_is_dirty(addr1)) {
3681 /* invalidate code */
3682 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3684 cpu_physical_memory_set_dirty_flags(
3685 addr1, (0xff & ~CODE_DIRTY_FLAG));
3691 if (xen_enabled()) {
3692 xen_invalidate_map_cache_entry(buffer);
3697 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3699 qemu_vfree(bounce.buffer);
3700 bounce.buffer = NULL;
3701 cpu_notify_map_clients();
3704 /* warning: addr must be aligned */
3705 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3706 enum device_endian endian)
3710 MemoryRegionSection *section;
3712 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3714 if (!(memory_region_is_ram(section->mr) ||
3715 memory_region_is_romd(section->mr))) {
3717 addr = memory_region_section_addr(section, addr);
3718 val = io_mem_read(section->mr, addr, 4);
3719 #if defined(TARGET_WORDS_BIGENDIAN)
3720 if (endian == DEVICE_LITTLE_ENDIAN) {
3724 if (endian == DEVICE_BIG_ENDIAN) {
3730 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3732 + memory_region_section_addr(section, addr));
3734 case DEVICE_LITTLE_ENDIAN:
3735 val = ldl_le_p(ptr);
3737 case DEVICE_BIG_ENDIAN:
3738 val = ldl_be_p(ptr);
3748 uint32_t ldl_phys(target_phys_addr_t addr)
3750 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3753 uint32_t ldl_le_phys(target_phys_addr_t addr)
3755 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3758 uint32_t ldl_be_phys(target_phys_addr_t addr)
3760 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3763 /* warning: addr must be aligned */
3764 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3765 enum device_endian endian)
3769 MemoryRegionSection *section;
3771 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3773 if (!(memory_region_is_ram(section->mr) ||
3774 memory_region_is_romd(section->mr))) {
3776 addr = memory_region_section_addr(section, addr);
3778 /* XXX This is broken when device endian != cpu endian.
3779 Fix and add "endian" variable check */
3780 #ifdef TARGET_WORDS_BIGENDIAN
3781 val = io_mem_read(section->mr, addr, 4) << 32;
3782 val |= io_mem_read(section->mr, addr + 4, 4);
3784 val = io_mem_read(section->mr, addr, 4);
3785 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
3789 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3791 + memory_region_section_addr(section, addr));
3793 case DEVICE_LITTLE_ENDIAN:
3794 val = ldq_le_p(ptr);
3796 case DEVICE_BIG_ENDIAN:
3797 val = ldq_be_p(ptr);
3807 uint64_t ldq_phys(target_phys_addr_t addr)
3809 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3812 uint64_t ldq_le_phys(target_phys_addr_t addr)
3814 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3817 uint64_t ldq_be_phys(target_phys_addr_t addr)
3819 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3823 uint32_t ldub_phys(target_phys_addr_t addr)
3826 cpu_physical_memory_read(addr, &val, 1);
3830 /* warning: addr must be aligned */
3831 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3832 enum device_endian endian)
3836 MemoryRegionSection *section;
3838 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3840 if (!(memory_region_is_ram(section->mr) ||
3841 memory_region_is_romd(section->mr))) {
3843 addr = memory_region_section_addr(section, addr);
3844 val = io_mem_read(section->mr, addr, 2);
3845 #if defined(TARGET_WORDS_BIGENDIAN)
3846 if (endian == DEVICE_LITTLE_ENDIAN) {
3850 if (endian == DEVICE_BIG_ENDIAN) {
3856 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3858 + memory_region_section_addr(section, addr));
3860 case DEVICE_LITTLE_ENDIAN:
3861 val = lduw_le_p(ptr);
3863 case DEVICE_BIG_ENDIAN:
3864 val = lduw_be_p(ptr);
3874 uint32_t lduw_phys(target_phys_addr_t addr)
3876 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3879 uint32_t lduw_le_phys(target_phys_addr_t addr)
3881 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3884 uint32_t lduw_be_phys(target_phys_addr_t addr)
3886 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3889 /* warning: addr must be aligned. The ram page is not masked as dirty
3890 and the code inside is not invalidated. It is useful if the dirty
3891 bits are used to track modified PTEs */
3892 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3895 MemoryRegionSection *section;
3897 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3899 if (!memory_region_is_ram(section->mr) || section->readonly) {
3900 addr = memory_region_section_addr(section, addr);
3901 if (memory_region_is_ram(section->mr)) {
3902 section = &phys_sections[phys_section_rom];
3904 io_mem_write(section->mr, addr, val, 4);
3906 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
3908 + memory_region_section_addr(section, addr);
3909 ptr = qemu_get_ram_ptr(addr1);
3912 if (unlikely(in_migration)) {
3913 if (!cpu_physical_memory_is_dirty(addr1)) {
3914 /* invalidate code */
3915 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3917 cpu_physical_memory_set_dirty_flags(
3918 addr1, (0xff & ~CODE_DIRTY_FLAG));
3924 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3927 MemoryRegionSection *section;
3929 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3931 if (!memory_region_is_ram(section->mr) || section->readonly) {
3932 addr = memory_region_section_addr(section, addr);
3933 if (memory_region_is_ram(section->mr)) {
3934 section = &phys_sections[phys_section_rom];
3936 #ifdef TARGET_WORDS_BIGENDIAN
3937 io_mem_write(section->mr, addr, val >> 32, 4);
3938 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3940 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3941 io_mem_write(section->mr, addr + 4, val >> 32, 4);
3944 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
3946 + memory_region_section_addr(section, addr));
3951 /* warning: addr must be aligned */
3952 static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3953 enum device_endian endian)
3956 MemoryRegionSection *section;
3958 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3960 if (!memory_region_is_ram(section->mr) || section->readonly) {
3961 addr = memory_region_section_addr(section, addr);
3962 if (memory_region_is_ram(section->mr)) {
3963 section = &phys_sections[phys_section_rom];
3965 #if defined(TARGET_WORDS_BIGENDIAN)
3966 if (endian == DEVICE_LITTLE_ENDIAN) {
3970 if (endian == DEVICE_BIG_ENDIAN) {
3974 io_mem_write(section->mr, addr, val, 4);
3976 unsigned long addr1;
3977 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3978 + memory_region_section_addr(section, addr);
3980 ptr = qemu_get_ram_ptr(addr1);
3982 case DEVICE_LITTLE_ENDIAN:
3985 case DEVICE_BIG_ENDIAN:
3992 if (!cpu_physical_memory_is_dirty(addr1)) {
3993 /* invalidate code */
3994 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3996 cpu_physical_memory_set_dirty_flags(addr1,
3997 (0xff & ~CODE_DIRTY_FLAG));
4002 void stl_phys(target_phys_addr_t addr, uint32_t val)
4004 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4007 void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4009 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4012 void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4014 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4018 void stb_phys(target_phys_addr_t addr, uint32_t val)
4021 cpu_physical_memory_write(addr, &v, 1);
4024 /* warning: addr must be aligned */
4025 static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4026 enum device_endian endian)
4029 MemoryRegionSection *section;
4031 section = phys_page_find(addr >> TARGET_PAGE_BITS);
4033 if (!memory_region_is_ram(section->mr) || section->readonly) {
4034 addr = memory_region_section_addr(section, addr);
4035 if (memory_region_is_ram(section->mr)) {
4036 section = &phys_sections[phys_section_rom];
4038 #if defined(TARGET_WORDS_BIGENDIAN)
4039 if (endian == DEVICE_LITTLE_ENDIAN) {
4043 if (endian == DEVICE_BIG_ENDIAN) {
4047 io_mem_write(section->mr, addr, val, 2);
4049 unsigned long addr1;
4050 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4051 + memory_region_section_addr(section, addr);
4053 ptr = qemu_get_ram_ptr(addr1);
4055 case DEVICE_LITTLE_ENDIAN:
4058 case DEVICE_BIG_ENDIAN:
4065 if (!cpu_physical_memory_is_dirty(addr1)) {
4066 /* invalidate code */
4067 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4069 cpu_physical_memory_set_dirty_flags(addr1,
4070 (0xff & ~CODE_DIRTY_FLAG));
4075 void stw_phys(target_phys_addr_t addr, uint32_t val)
4077 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4080 void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4082 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4085 void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4087 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4091 void stq_phys(target_phys_addr_t addr, uint64_t val)
4094 cpu_physical_memory_write(addr, &val, 8);
4097 void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4099 val = cpu_to_le64(val);
4100 cpu_physical_memory_write(addr, &val, 8);
4103 void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4105 val = cpu_to_be64(val);
4106 cpu_physical_memory_write(addr, &val, 8);
4109 /* virtual memory access for debug (includes writing to ROM) */
4110 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4111 uint8_t *buf, int len, int is_write)
4114 target_phys_addr_t phys_addr;
4118 page = addr & TARGET_PAGE_MASK;
4119 phys_addr = cpu_get_phys_page_debug(env, page);
4120 /* if no physical page mapped, return an error */
4121 if (phys_addr == -1)
4123 l = (page + TARGET_PAGE_SIZE) - addr;
4126 phys_addr += (addr & ~TARGET_PAGE_MASK);
4128 cpu_physical_memory_write_rom(phys_addr, buf, l);
4130 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4139 /* in deterministic execution mode, instructions doing device I/Os
4140 must be at the end of the TB */
4141 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
4143 TranslationBlock *tb;
4145 target_ulong pc, cs_base;
4148 tb = tb_find_pc(retaddr);
4150 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4153 n = env->icount_decr.u16.low + tb->icount;
4154 cpu_restore_state(tb, env, retaddr);
4155 /* Calculate how many instructions had been executed before the fault
4157 n = n - env->icount_decr.u16.low;
4158 /* Generate a new TB ending on the I/O insn. */
4160 /* On MIPS and SH, delay slot instructions can only be restarted if
4161 they were already the first instruction in the TB. If this is not
4162 the first instruction in a TB then re-execute the preceding
4164 #if defined(TARGET_MIPS)
4165 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4166 env->active_tc.PC -= 4;
4167 env->icount_decr.u16.low++;
4168 env->hflags &= ~MIPS_HFLAG_BMASK;
4170 #elif defined(TARGET_SH4)
4171 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4174 env->icount_decr.u16.low++;
4175 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4178 /* This should never happen. */
4179 if (n > CF_COUNT_MASK)
4180 cpu_abort(env, "TB too big during recompile");
4182 cflags = n | CF_LAST_IO;
4184 cs_base = tb->cs_base;
4186 tb_phys_invalidate(tb, -1);
4187 /* FIXME: In theory this could raise an exception. In practice
4188 we have already translated the block once so it's probably ok. */
4189 tb_gen_code(env, pc, cs_base, flags, cflags);
4190 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4191 the first in the TB) then we end up generating a whole new TB and
4192 repeating the fault, which is horribly inefficient.
4193 Better would be to execute just this insn uncached, or generate a
4195 cpu_resume_from_signal(env, NULL);
4198 #if !defined(CONFIG_USER_ONLY)
4200 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4202 int i, target_code_size, max_target_code_size;
4203 int direct_jmp_count, direct_jmp2_count, cross_page;
4204 TranslationBlock *tb;
4206 target_code_size = 0;
4207 max_target_code_size = 0;
4209 direct_jmp_count = 0;
4210 direct_jmp2_count = 0;
4211 for(i = 0; i < nb_tbs; i++) {
4213 target_code_size += tb->size;
4214 if (tb->size > max_target_code_size)
4215 max_target_code_size = tb->size;
4216 if (tb->page_addr[1] != -1)
4218 if (tb->tb_next_offset[0] != 0xffff) {
4220 if (tb->tb_next_offset[1] != 0xffff) {
4221 direct_jmp2_count++;
4225 /* XXX: avoid using doubles ? */
4226 cpu_fprintf(f, "Translation buffer state:\n");
4227 cpu_fprintf(f, "gen code size %td/%ld\n",
4228 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4229 cpu_fprintf(f, "TB count %d/%d\n",
4230 nb_tbs, code_gen_max_blocks);
4231 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4232 nb_tbs ? target_code_size / nb_tbs : 0,
4233 max_target_code_size);
4234 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4235 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4236 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4237 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4239 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4240 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4242 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4244 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4245 cpu_fprintf(f, "\nStatistics:\n");
4246 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4247 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4248 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4249 tcg_dump_info(f, cpu_fprintf);
4253 * A helper function for the _utterly broken_ virtio device model to find out if
4254 * it's running on a big endian machine. Don't do this at home kids!
4256 bool virtio_is_big_endian(void);
4257 bool virtio_is_big_endian(void)
4259 #if defined(TARGET_WORDS_BIGENDIAN)
4268 #ifndef CONFIG_USER_ONLY
4269 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4271 MemoryRegionSection *section;
4273 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4275 return !(memory_region_is_ram(section->mr) ||
4276 memory_region_is_romd(section->mr));