2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
36 #include "qemu-timer.h"
37 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
54 #else /* !CONFIG_USER_ONLY */
55 #include "xen-mapcache.h"
58 //#define DEBUG_TB_INVALIDATE
61 //#define DEBUG_UNASSIGNED
63 /* make various TB consistency checks */
64 //#define DEBUG_TB_CHECK
65 //#define DEBUG_TLB_CHECK
67 //#define DEBUG_IOPORT
68 //#define DEBUG_SUBPAGE
70 #if !defined(CONFIG_USER_ONLY)
71 /* TB consistency checks only implemented for usermode emulation. */
75 #define SMC_BITMAP_USE_THRESHOLD 10
77 static TranslationBlock *tbs;
78 static int code_gen_max_blocks;
79 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
81 /* any access to the tbs or the page table must use this lock */
82 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
84 #if defined(__arm__) || defined(__sparc_v9__)
85 /* The prologue must be reachable with a direct jump. ARM and Sparc64
86 have limited branch ranges (possibly also PPC) so place it in a
87 section close to code segment. */
88 #define code_gen_section \
89 __attribute__((__section__(".gen_code"))) \
90 __attribute__((aligned (32)))
92 /* Maximum alignment for Win32 is 16. */
93 #define code_gen_section \
94 __attribute__((aligned (16)))
96 #define code_gen_section \
97 __attribute__((aligned (32)))
100 uint8_t code_gen_prologue[1024] code_gen_section;
101 static uint8_t *code_gen_buffer;
102 static unsigned long code_gen_buffer_size;
103 /* threshold to flush the translated code buffer */
104 static unsigned long code_gen_buffer_max_size;
105 static uint8_t *code_gen_ptr;
107 #if !defined(CONFIG_USER_ONLY)
109 static int in_migration;
111 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
115 /* current CPU in the current thread. It is only valid inside
117 CPUState *cpu_single_env;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc {
127 /* list of TBs intersecting this ram page */
128 TranslationBlock *first_tb;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133 #if defined(CONFIG_USER_ONLY)
138 /* In system mode we want L1_MAP to be based on ram offsets,
139 while in user mode we want it to be based on virtual addresses. */
140 #if !defined(CONFIG_USER_ONLY)
141 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
150 /* Size of the L2 (and L3, etc) page tables. */
152 #define L2_SIZE (1 << L2_BITS)
154 /* The bits remaining after N lower levels of page tables. */
155 #define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 #define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 /* Size of the L1 page table. Avoid silly small sizes. */
161 #if P_L1_BITS_REM < 4
162 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164 #define P_L1_BITS P_L1_BITS_REM
167 #if V_L1_BITS_REM < 4
168 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170 #define V_L1_BITS V_L1_BITS_REM
173 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 unsigned long qemu_real_host_page_size;
180 unsigned long qemu_host_page_bits;
181 unsigned long qemu_host_page_size;
182 unsigned long qemu_host_page_mask;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map[V_L1_SIZE];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *l1_phys_map[P_L1_SIZE];
199 static void io_mem_init(void);
201 /* io memory support */
202 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
204 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
205 static char io_mem_used[IO_MEM_NB_ENTRIES];
206 static int io_mem_watch;
211 static const char *logfilename = "qemu.log";
213 static const char *logfilename = "/tmp/qemu.log";
217 static int log_append = 0;
220 #if !defined(CONFIG_USER_ONLY)
221 static int tlb_flush_count;
223 static int tb_flush_count;
224 static int tb_phys_invalidate_count;
227 static void map_exec(void *addr, long size)
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
235 static void map_exec(void *addr, long size)
237 unsigned long start, end, page_size;
239 page_size = getpagesize();
240 start = (unsigned long)addr;
241 start &= ~(page_size - 1);
243 end = (unsigned long)addr + size;
244 end += page_size - 1;
245 end &= ~(page_size - 1);
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
252 static void page_init(void)
254 /* NOTE: we can always suppose that qemu_host_page_size >=
258 SYSTEM_INFO system_info;
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
264 qemu_real_host_page_size = getpagesize();
266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
275 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
277 #ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
281 freep = kinfo_getvmmap(getpid(), &cnt);
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
296 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
309 last_brk = (unsigned long)sbrk(0);
311 f = fopen("/compat/linux/proc/self/maps", "r");
316 unsigned long startaddr, endaddr;
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
341 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
347 #if defined(CONFIG_USER_ONLY)
348 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
349 # define ALLOC(P, SIZE) \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
355 # define ALLOC(P, SIZE) \
356 do { P = qemu_mallocz(SIZE); } while (0)
359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 ALLOC(p, sizeof(void *) * L2_SIZE);
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 return pd + (index & (L2_SIZE - 1));
391 static inline PageDesc *page_find(tb_page_addr_t index)
393 return page_find_alloc(index, 0);
396 #if !defined(CONFIG_USER_ONLY)
397 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
426 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
428 for (i = 0; i < L2_SIZE; i++) {
429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
434 return pd + (index & (L2_SIZE - 1));
437 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
439 return phys_page_find_alloc(index, 0);
442 static void tlb_protect_code(ram_addr_t ram_addr);
443 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
445 #define mmap_lock() do { } while(0)
446 #define mmap_unlock() do { } while(0)
449 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
451 #if defined(CONFIG_USER_ONLY)
452 /* Currently it is not recommended to allocate big chunks of data in
453 user mode. It will change when a dedicated libc will be used */
454 #define USE_STATIC_CODE_GEN_BUFFER
457 #ifdef USE_STATIC_CODE_GEN_BUFFER
458 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
462 static void code_gen_alloc(unsigned long tb_size)
464 #ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
471 #if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
475 /* XXX: needs adjustments */
476 code_gen_buffer_size = (unsigned long)(ram_size / 4);
479 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483 #if defined(__linux__)
488 flags = MAP_PRIVATE | MAP_ANONYMOUS;
489 #if defined(__x86_64__)
491 /* Cannot map more than that */
492 if (code_gen_buffer_size > (800 * 1024 * 1024))
493 code_gen_buffer_size = (800 * 1024 * 1024);
494 #elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
497 start = (void *) 0x60000000UL;
498 if (code_gen_buffer_size > (512 * 1024 * 1024))
499 code_gen_buffer_size = (512 * 1024 * 1024);
500 #elif defined(__arm__)
501 /* Map the buffer below 32M, so we can use direct calls and branches */
503 start = (void *) 0x01000000UL;
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
506 #elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 start = (void *)0x90000000UL;
514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
522 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__)
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528 #if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
536 #elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
553 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
554 map_exec(code_gen_buffer, code_gen_buffer_size);
556 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
564 /* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
567 void cpu_exec_init_all(unsigned long tb_size)
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
573 #if !defined(CONFIG_USER_ONLY)
576 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
583 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585 static int cpu_common_post_load(void *opaque, int version_id)
587 CPUState *env = opaque;
589 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 version_id is increased. */
591 env->interrupt_request &= ~0x01;
597 static const VMStateDescription vmstate_cpu_common = {
598 .name = "cpu_common",
600 .minimum_version_id = 1,
601 .minimum_version_id_old = 1,
602 .post_load = cpu_common_post_load,
603 .fields = (VMStateField []) {
604 VMSTATE_UINT32(halted, CPUState),
605 VMSTATE_UINT32(interrupt_request, CPUState),
606 VMSTATE_END_OF_LIST()
611 CPUState *qemu_get_cpu(int cpu)
613 CPUState *env = first_cpu;
616 if (env->cpu_index == cpu)
624 void cpu_exec_init(CPUState *env)
629 #if defined(CONFIG_USER_ONLY)
632 env->next_cpu = NULL;
635 while (*penv != NULL) {
636 penv = &(*penv)->next_cpu;
639 env->cpu_index = cpu_index;
641 QTAILQ_INIT(&env->breakpoints);
642 QTAILQ_INIT(&env->watchpoints);
643 #ifndef CONFIG_USER_ONLY
644 env->thread_id = qemu_get_thread_id();
647 #if defined(CONFIG_USER_ONLY)
650 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
651 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
653 cpu_save, cpu_load, env);
657 /* Allocate a new translation block. Flush the translation buffer if
658 too many translation blocks or too much generated code. */
659 static TranslationBlock *tb_alloc(target_ulong pc)
661 TranslationBlock *tb;
663 if (nb_tbs >= code_gen_max_blocks ||
664 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
672 void tb_free(TranslationBlock *tb)
674 /* In practice this is mostly used for single use temporary TB
675 Ignore the hard cases and just back up if this TB happens to
676 be the last one generated. */
677 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 code_gen_ptr = tb->tc_ptr;
683 static inline void invalidate_page_bitmap(PageDesc *p)
685 if (p->code_bitmap) {
686 qemu_free(p->code_bitmap);
687 p->code_bitmap = NULL;
689 p->code_write_count = 0;
692 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
694 static void page_flush_tb_1 (int level, void **lp)
703 for (i = 0; i < L2_SIZE; ++i) {
704 pd[i].first_tb = NULL;
705 invalidate_page_bitmap(pd + i);
709 for (i = 0; i < L2_SIZE; ++i) {
710 page_flush_tb_1 (level - 1, pp + i);
715 static void page_flush_tb(void)
718 for (i = 0; i < V_L1_SIZE; i++) {
719 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
723 /* flush all the translation blocks */
724 /* XXX: tb_flush is currently not thread safe */
725 void tb_flush(CPUState *env1)
728 #if defined(DEBUG_FLUSH)
729 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 (unsigned long)(code_gen_ptr - code_gen_buffer),
732 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
734 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
735 cpu_abort(env1, "Internal error: code buffer overflow\n");
739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
743 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
746 code_gen_ptr = code_gen_buffer;
747 /* XXX: flush processor icache at this point if cache flush is
752 #ifdef DEBUG_TB_CHECK
754 static void tb_invalidate_check(target_ulong address)
756 TranslationBlock *tb;
758 address &= TARGET_PAGE_MASK;
759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
761 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 address >= tb->pc + tb->size)) {
763 printf("ERROR invalidate: address=" TARGET_FMT_lx
764 " PC=%08lx size=%04x\n",
765 address, (long)tb->pc, tb->size);
771 /* verify that all the pages have correct rights for code */
772 static void tb_page_check(void)
774 TranslationBlock *tb;
775 int i, flags1, flags2;
777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
779 flags1 = page_get_flags(tb->pc);
780 flags2 = page_get_flags(tb->pc + tb->size - 1);
781 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
783 (long)tb->pc, tb->size, flags1, flags2);
791 /* invalidate one TB */
792 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
795 TranslationBlock *tb1;
799 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
802 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
806 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
808 TranslationBlock *tb1;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
816 *ptb = tb1->page_next[n1];
819 ptb = &tb1->page_next[n1];
823 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
825 TranslationBlock *tb1, **ptb;
828 ptb = &tb->jmp_next[n];
831 /* find tb(n) in circular list */
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == n && tb1 == tb)
839 ptb = &tb1->jmp_first;
841 ptb = &tb1->jmp_next[n1];
844 /* now we can suppress tb(n) from the list */
845 *ptb = tb->jmp_next[n];
847 tb->jmp_next[n] = NULL;
851 /* reset the jump entry 'n' of a TB so that it is not chained to
853 static inline void tb_reset_jump(TranslationBlock *tb, int n)
855 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
858 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
863 tb_page_addr_t phys_pc;
864 TranslationBlock *tb1, *tb2;
866 /* remove the TB from the hash list */
867 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 h = tb_phys_hash_func(phys_pc);
869 tb_remove(&tb_phys_hash[h], tb,
870 offsetof(TranslationBlock, phys_hash_next));
872 /* remove the TB from the page list */
873 if (tb->page_addr[0] != page_addr) {
874 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
878 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 tb_page_remove(&p->first_tb, tb);
881 invalidate_page_bitmap(p);
884 tb_invalidated_flag = 1;
886 /* remove the TB from the hash list */
887 h = tb_jmp_cache_hash_func(tb->pc);
888 for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 if (env->tb_jmp_cache[h] == tb)
890 env->tb_jmp_cache[h] = NULL;
893 /* suppress this TB from the two jump lists */
894 tb_jmp_remove(tb, 0);
895 tb_jmp_remove(tb, 1);
897 /* suppress any remaining jumps to this TB */
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 tb2 = tb1->jmp_next[n1];
905 tb_reset_jump(tb1, n1);
906 tb1->jmp_next[n1] = NULL;
909 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
911 tb_phys_invalidate_count++;
914 static inline void set_bits(uint8_t *tab, int start, int len)
920 mask = 0xff << (start & 7);
921 if ((start & ~7) == (end & ~7)) {
923 mask &= ~(0xff << (end & 7));
928 start = (start + 8) & ~7;
930 while (start < end1) {
935 mask = ~(0xff << (end & 7));
941 static void build_page_bitmap(PageDesc *p)
943 int n, tb_start, tb_end;
944 TranslationBlock *tb;
946 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
951 tb = (TranslationBlock *)((long)tb & ~3);
952 /* NOTE: this is subtle as a TB may span two physical pages */
954 /* NOTE: tb_end may be after the end of the page, but
955 it is not a problem */
956 tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 tb_end = tb_start + tb->size;
958 if (tb_end > TARGET_PAGE_SIZE)
959 tb_end = TARGET_PAGE_SIZE;
962 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 tb = tb->page_next[n];
969 TranslationBlock *tb_gen_code(CPUState *env,
970 target_ulong pc, target_ulong cs_base,
971 int flags, int cflags)
973 TranslationBlock *tb;
975 tb_page_addr_t phys_pc, phys_page2;
976 target_ulong virt_page2;
979 phys_pc = get_page_addr_code(env, pc);
982 /* flush must be done */
984 /* cannot fail at this point */
986 /* Don't forget to invalidate previous TB info. */
987 tb_invalidated_flag = 1;
989 tc_ptr = code_gen_ptr;
991 tb->cs_base = cs_base;
994 cpu_gen_code(env, tb, &code_gen_size);
995 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
997 /* check next page if needed */
998 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1000 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1001 phys_page2 = get_page_addr_code(env, virt_page2);
1003 tb_link_page(tb, phys_pc, phys_page2);
1007 /* invalidate all TBs which intersect with the target physical page
1008 starting in range [start;end[. NOTE: start and end must refer to
1009 the same physical page. 'is_cpu_write_access' should be true if called
1010 from a real cpu write access: the virtual CPU will exit the current
1011 TB if code is modified inside this TB. */
1012 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1013 int is_cpu_write_access)
1015 TranslationBlock *tb, *tb_next, *saved_tb;
1016 CPUState *env = cpu_single_env;
1017 tb_page_addr_t tb_start, tb_end;
1020 #ifdef TARGET_HAS_PRECISE_SMC
1021 int current_tb_not_found = is_cpu_write_access;
1022 TranslationBlock *current_tb = NULL;
1023 int current_tb_modified = 0;
1024 target_ulong current_pc = 0;
1025 target_ulong current_cs_base = 0;
1026 int current_flags = 0;
1027 #endif /* TARGET_HAS_PRECISE_SMC */
1029 p = page_find(start >> TARGET_PAGE_BITS);
1032 if (!p->code_bitmap &&
1033 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 is_cpu_write_access) {
1035 /* build code bitmap */
1036 build_page_bitmap(p);
1039 /* we remove all the TBs in the range [start, end[ */
1040 /* XXX: see if in some cases it could be faster to invalidate all the code */
1042 while (tb != NULL) {
1044 tb = (TranslationBlock *)((long)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1056 if (!(tb_end <= start || tb_start >= end)) {
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1061 if (env->mem_io_pc) {
1062 /* now we have a real cpu fault */
1063 current_tb = tb_find_pc(env->mem_io_pc);
1066 if (current_tb == tb &&
1067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
1074 current_tb_modified = 1;
1075 cpu_restore_state(current_tb, env, env->mem_io_pc);
1076 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1079 #endif /* TARGET_HAS_PRECISE_SMC */
1080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1084 saved_tb = env->current_tb;
1085 env->current_tb = NULL;
1087 tb_phys_invalidate(tb, -1);
1089 env->current_tb = saved_tb;
1090 if (env->interrupt_request && env->current_tb)
1091 cpu_interrupt(env, env->interrupt_request);
1096 #if !defined(CONFIG_USER_ONLY)
1097 /* if no code remaining, no need to continue to use slow writes */
1099 invalidate_page_bitmap(p);
1100 if (is_cpu_write_access) {
1101 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1105 #ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb_modified) {
1107 /* we generate a block containing just the instruction
1108 modifying the memory. It will ensure that it cannot modify
1110 env->current_tb = NULL;
1111 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1112 cpu_resume_from_signal(env, NULL);
1117 /* len must be <= 8 and start must be a multiple of len */
1118 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env->mem_io_vaddr, len,
1126 cpu_single_env->eip,
1127 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1130 p = page_find(start >> TARGET_PAGE_BITS);
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1))
1140 tb_invalidate_phys_page_range(start, start + len, 1);
1144 #if !defined(CONFIG_SOFTMMU)
1145 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1146 unsigned long pc, void *puc)
1148 TranslationBlock *tb;
1151 #ifdef TARGET_HAS_PRECISE_SMC
1152 TranslationBlock *current_tb = NULL;
1153 CPUState *env = cpu_single_env;
1154 int current_tb_modified = 0;
1155 target_ulong current_pc = 0;
1156 target_ulong current_cs_base = 0;
1157 int current_flags = 0;
1160 addr &= TARGET_PAGE_MASK;
1161 p = page_find(addr >> TARGET_PAGE_BITS);
1165 #ifdef TARGET_HAS_PRECISE_SMC
1166 if (tb && pc != 0) {
1167 current_tb = tb_find_pc(pc);
1170 while (tb != NULL) {
1172 tb = (TranslationBlock *)((long)tb & ~3);
1173 #ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb == tb &&
1175 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1176 /* If we are modifying the current TB, we must stop
1177 its execution. We could be more precise by checking
1178 that the modification is after the current PC, but it
1179 would require a specialized function to partially
1180 restore the CPU state */
1182 current_tb_modified = 1;
1183 cpu_restore_state(current_tb, env, pc);
1184 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1187 #endif /* TARGET_HAS_PRECISE_SMC */
1188 tb_phys_invalidate(tb, addr);
1189 tb = tb->page_next[n];
1192 #ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1197 env->current_tb = NULL;
1198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199 cpu_resume_from_signal(env, puc);
1205 /* add the tb in the target page and protect it if necessary */
1206 static inline void tb_alloc_page(TranslationBlock *tb,
1207 unsigned int n, tb_page_addr_t page_addr)
1210 TranslationBlock *last_first_tb;
1212 tb->page_addr[n] = page_addr;
1213 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1214 tb->page_next[n] = p->first_tb;
1215 last_first_tb = p->first_tb;
1216 p->first_tb = (TranslationBlock *)((long)tb | n);
1217 invalidate_page_bitmap(p);
1219 #if defined(TARGET_HAS_SMC) || 1
1221 #if defined(CONFIG_USER_ONLY)
1222 if (p->flags & PAGE_WRITE) {
1227 /* force the host page as non writable (writes will have a
1228 page fault + mprotect overhead) */
1229 page_addr &= qemu_host_page_mask;
1231 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1232 addr += TARGET_PAGE_SIZE) {
1234 p2 = page_find (addr >> TARGET_PAGE_BITS);
1238 p2->flags &= ~PAGE_WRITE;
1240 mprotect(g2h(page_addr), qemu_host_page_size,
1241 (prot & PAGE_BITS) & ~PAGE_WRITE);
1242 #ifdef DEBUG_TB_INVALIDATE
1243 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1248 /* if some code is already present, then the pages are already
1249 protected. So we handle the case where only the first TB is
1250 allocated in a physical page */
1251 if (!last_first_tb) {
1252 tlb_protect_code(page_addr);
1256 #endif /* TARGET_HAS_SMC */
1259 /* add a new TB and link it to the physical page tables. phys_page2 is
1260 (-1) to indicate that only one page contains the TB. */
1261 void tb_link_page(TranslationBlock *tb,
1262 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1265 TranslationBlock **ptb;
1267 /* Grab the mmap lock to stop another thread invalidating this TB
1268 before we are done. */
1270 /* add in the physical hash table */
1271 h = tb_phys_hash_func(phys_pc);
1272 ptb = &tb_phys_hash[h];
1273 tb->phys_hash_next = *ptb;
1276 /* add in the page list */
1277 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1278 if (phys_page2 != -1)
1279 tb_alloc_page(tb, 1, phys_page2);
1281 tb->page_addr[1] = -1;
1283 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1284 tb->jmp_next[0] = NULL;
1285 tb->jmp_next[1] = NULL;
1287 /* init original jump addresses */
1288 if (tb->tb_next_offset[0] != 0xffff)
1289 tb_reset_jump(tb, 0);
1290 if (tb->tb_next_offset[1] != 0xffff)
1291 tb_reset_jump(tb, 1);
1293 #ifdef DEBUG_TB_CHECK
1299 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1300 tb[1].tc_ptr. Return NULL if not found */
1301 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1303 int m_min, m_max, m;
1305 TranslationBlock *tb;
1309 if (tc_ptr < (unsigned long)code_gen_buffer ||
1310 tc_ptr >= (unsigned long)code_gen_ptr)
1312 /* binary search (cf Knuth) */
1315 while (m_min <= m_max) {
1316 m = (m_min + m_max) >> 1;
1318 v = (unsigned long)tb->tc_ptr;
1321 else if (tc_ptr < v) {
1330 static void tb_reset_jump_recursive(TranslationBlock *tb);
1332 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1334 TranslationBlock *tb1, *tb_next, **ptb;
1337 tb1 = tb->jmp_next[n];
1339 /* find head of list */
1342 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1345 tb1 = tb1->jmp_next[n1];
1347 /* we are now sure now that tb jumps to tb1 */
1350 /* remove tb from the jmp_first list */
1351 ptb = &tb_next->jmp_first;
1355 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1356 if (n1 == n && tb1 == tb)
1358 ptb = &tb1->jmp_next[n1];
1360 *ptb = tb->jmp_next[n];
1361 tb->jmp_next[n] = NULL;
1363 /* suppress the jump to next tb in generated code */
1364 tb_reset_jump(tb, n);
1366 /* suppress jumps in the tb on which we could have jumped */
1367 tb_reset_jump_recursive(tb_next);
1371 static void tb_reset_jump_recursive(TranslationBlock *tb)
1373 tb_reset_jump_recursive2(tb, 0);
1374 tb_reset_jump_recursive2(tb, 1);
1377 #if defined(TARGET_HAS_ICE)
1378 #if defined(CONFIG_USER_ONLY)
1379 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1384 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1386 target_phys_addr_t addr;
1388 ram_addr_t ram_addr;
1391 addr = cpu_get_phys_page_debug(env, pc);
1392 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1394 pd = IO_MEM_UNASSIGNED;
1396 pd = p->phys_offset;
1398 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1399 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1402 #endif /* TARGET_HAS_ICE */
1404 #if defined(CONFIG_USER_ONLY)
1405 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1411 int flags, CPUWatchpoint **watchpoint)
1416 /* Add a watchpoint. */
1417 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1418 int flags, CPUWatchpoint **watchpoint)
1420 target_ulong len_mask = ~(len - 1);
1423 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1424 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1425 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1426 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1429 wp = qemu_malloc(sizeof(*wp));
1432 wp->len_mask = len_mask;
1435 /* keep all GDB-injected watchpoints in front */
1437 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1439 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1441 tlb_flush_page(env, addr);
1448 /* Remove a specific watchpoint. */
1449 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1452 target_ulong len_mask = ~(len - 1);
1455 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1456 if (addr == wp->vaddr && len_mask == wp->len_mask
1457 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1458 cpu_watchpoint_remove_by_ref(env, wp);
1465 /* Remove a specific watchpoint by reference. */
1466 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1468 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1470 tlb_flush_page(env, watchpoint->vaddr);
1472 qemu_free(watchpoint);
1475 /* Remove all matching watchpoints. */
1476 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1478 CPUWatchpoint *wp, *next;
1480 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1481 if (wp->flags & mask)
1482 cpu_watchpoint_remove_by_ref(env, wp);
1487 /* Add a breakpoint. */
1488 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1489 CPUBreakpoint **breakpoint)
1491 #if defined(TARGET_HAS_ICE)
1494 bp = qemu_malloc(sizeof(*bp));
1499 /* keep all GDB-injected breakpoints in front */
1501 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1503 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1505 breakpoint_invalidate(env, pc);
1515 /* Remove a specific breakpoint. */
1516 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1518 #if defined(TARGET_HAS_ICE)
1521 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1522 if (bp->pc == pc && bp->flags == flags) {
1523 cpu_breakpoint_remove_by_ref(env, bp);
1533 /* Remove a specific breakpoint by reference. */
1534 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1536 #if defined(TARGET_HAS_ICE)
1537 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1539 breakpoint_invalidate(env, breakpoint->pc);
1541 qemu_free(breakpoint);
1545 /* Remove all matching breakpoints. */
1546 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1548 #if defined(TARGET_HAS_ICE)
1549 CPUBreakpoint *bp, *next;
1551 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1552 if (bp->flags & mask)
1553 cpu_breakpoint_remove_by_ref(env, bp);
1558 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1559 CPU loop after each instruction */
1560 void cpu_single_step(CPUState *env, int enabled)
1562 #if defined(TARGET_HAS_ICE)
1563 if (env->singlestep_enabled != enabled) {
1564 env->singlestep_enabled = enabled;
1566 kvm_update_guest_debug(env, 0);
1568 /* must flush all the translated code to avoid inconsistencies */
1569 /* XXX: only flush what is necessary */
1576 /* enable or disable low levels log */
1577 void cpu_set_log(int log_flags)
1579 loglevel = log_flags;
1580 if (loglevel && !logfile) {
1581 logfile = fopen(logfilename, log_append ? "a" : "w");
1583 perror(logfilename);
1586 #if !defined(CONFIG_SOFTMMU)
1587 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1589 static char logfile_buf[4096];
1590 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1592 #elif !defined(_WIN32)
1593 /* Win32 doesn't support line-buffering and requires size >= 2 */
1594 setvbuf(logfile, NULL, _IOLBF, 0);
1598 if (!loglevel && logfile) {
1604 void cpu_set_log_filename(const char *filename)
1606 logfilename = strdup(filename);
1611 cpu_set_log(loglevel);
1614 static void cpu_unlink_tb(CPUState *env)
1616 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1617 problem and hope the cpu will stop of its own accord. For userspace
1618 emulation this often isn't actually as bad as it sounds. Often
1619 signals are used primarily to interrupt blocking syscalls. */
1620 TranslationBlock *tb;
1621 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1623 spin_lock(&interrupt_lock);
1624 tb = env->current_tb;
1625 /* if the cpu is currently executing code, we must unlink it and
1626 all the potentially executing TB */
1628 env->current_tb = NULL;
1629 tb_reset_jump_recursive(tb);
1631 spin_unlock(&interrupt_lock);
1634 #ifndef CONFIG_USER_ONLY
1635 /* mask must never be zero, except for A20 change call */
1636 static void tcg_handle_interrupt(CPUState *env, int mask)
1640 old_mask = env->interrupt_request;
1641 env->interrupt_request |= mask;
1644 * If called from iothread context, wake the target cpu in
1647 if (!qemu_cpu_is_self(env)) {
1653 env->icount_decr.u16.high = 0xffff;
1655 && (mask & ~old_mask) != 0) {
1656 cpu_abort(env, "Raised interrupt while not in I/O function");
1663 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1665 #else /* CONFIG_USER_ONLY */
1667 void cpu_interrupt(CPUState *env, int mask)
1669 env->interrupt_request |= mask;
1672 #endif /* CONFIG_USER_ONLY */
1674 void cpu_reset_interrupt(CPUState *env, int mask)
1676 env->interrupt_request &= ~mask;
1679 void cpu_exit(CPUState *env)
1681 env->exit_request = 1;
1685 const CPULogItem cpu_log_items[] = {
1686 { CPU_LOG_TB_OUT_ASM, "out_asm",
1687 "show generated host assembly code for each compiled TB" },
1688 { CPU_LOG_TB_IN_ASM, "in_asm",
1689 "show target assembly code for each compiled TB" },
1690 { CPU_LOG_TB_OP, "op",
1691 "show micro ops for each compiled TB" },
1692 { CPU_LOG_TB_OP_OPT, "op_opt",
1695 "before eflags optimization and "
1697 "after liveness analysis" },
1698 { CPU_LOG_INT, "int",
1699 "show interrupts/exceptions in short format" },
1700 { CPU_LOG_EXEC, "exec",
1701 "show trace before each executed TB (lots of logs)" },
1702 { CPU_LOG_TB_CPU, "cpu",
1703 "show CPU state before block translation" },
1705 { CPU_LOG_PCALL, "pcall",
1706 "show protected mode far calls/returns/exceptions" },
1707 { CPU_LOG_RESET, "cpu_reset",
1708 "show CPU state before CPU resets" },
1711 { CPU_LOG_IOPORT, "ioport",
1712 "show all i/o ports accesses" },
1717 #ifndef CONFIG_USER_ONLY
1718 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1719 = QLIST_HEAD_INITIALIZER(memory_client_list);
1721 static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1723 ram_addr_t phys_offset,
1726 CPUPhysMemoryClient *client;
1727 QLIST_FOREACH(client, &memory_client_list, list) {
1728 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
1732 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1733 target_phys_addr_t end)
1735 CPUPhysMemoryClient *client;
1736 QLIST_FOREACH(client, &memory_client_list, list) {
1737 int r = client->sync_dirty_bitmap(client, start, end);
1744 static int cpu_notify_migration_log(int enable)
1746 CPUPhysMemoryClient *client;
1747 QLIST_FOREACH(client, &memory_client_list, list) {
1748 int r = client->migration_log(client, enable);
1755 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1756 * address. Each intermediate table provides the next L2_BITs of guest
1757 * physical address space. The number of levels vary based on host and
1758 * guest configuration, making it efficient to build the final guest
1759 * physical address by seeding the L1 offset and shifting and adding in
1760 * each L2 offset as we recurse through them. */
1761 static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1762 int level, void **lp, target_phys_addr_t addr)
1770 PhysPageDesc *pd = *lp;
1771 addr <<= L2_BITS + TARGET_PAGE_BITS;
1772 for (i = 0; i < L2_SIZE; ++i) {
1773 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1774 client->set_memory(client, addr | i << TARGET_PAGE_BITS,
1775 TARGET_PAGE_SIZE, pd[i].phys_offset, false);
1780 for (i = 0; i < L2_SIZE; ++i) {
1781 phys_page_for_each_1(client, level - 1, pp + i,
1782 (addr << L2_BITS) | i);
1787 static void phys_page_for_each(CPUPhysMemoryClient *client)
1790 for (i = 0; i < P_L1_SIZE; ++i) {
1791 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1792 l1_phys_map + i, i);
1796 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1798 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1799 phys_page_for_each(client);
1802 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1804 QLIST_REMOVE(client, list);
1808 static int cmp1(const char *s1, int n, const char *s2)
1810 if (strlen(s2) != n)
1812 return memcmp(s1, s2, n) == 0;
1815 /* takes a comma separated list of log masks. Return 0 if error. */
1816 int cpu_str_to_log_mask(const char *str)
1818 const CPULogItem *item;
1825 p1 = strchr(p, ',');
1828 if(cmp1(p,p1-p,"all")) {
1829 for(item = cpu_log_items; item->mask != 0; item++) {
1833 for(item = cpu_log_items; item->mask != 0; item++) {
1834 if (cmp1(p, p1 - p, item->name))
1848 void cpu_abort(CPUState *env, const char *fmt, ...)
1855 fprintf(stderr, "qemu: fatal: ");
1856 vfprintf(stderr, fmt, ap);
1857 fprintf(stderr, "\n");
1859 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1861 cpu_dump_state(env, stderr, fprintf, 0);
1863 if (qemu_log_enabled()) {
1864 qemu_log("qemu: fatal: ");
1865 qemu_log_vprintf(fmt, ap2);
1868 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1870 log_cpu_state(env, 0);
1877 #if defined(CONFIG_USER_ONLY)
1879 struct sigaction act;
1880 sigfillset(&act.sa_mask);
1881 act.sa_handler = SIG_DFL;
1882 sigaction(SIGABRT, &act, NULL);
1888 CPUState *cpu_copy(CPUState *env)
1890 CPUState *new_env = cpu_init(env->cpu_model_str);
1891 CPUState *next_cpu = new_env->next_cpu;
1892 int cpu_index = new_env->cpu_index;
1893 #if defined(TARGET_HAS_ICE)
1898 memcpy(new_env, env, sizeof(CPUState));
1900 /* Preserve chaining and index. */
1901 new_env->next_cpu = next_cpu;
1902 new_env->cpu_index = cpu_index;
1904 /* Clone all break/watchpoints.
1905 Note: Once we support ptrace with hw-debug register access, make sure
1906 BP_CPU break/watchpoints are handled correctly on clone. */
1907 QTAILQ_INIT(&env->breakpoints);
1908 QTAILQ_INIT(&env->watchpoints);
1909 #if defined(TARGET_HAS_ICE)
1910 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1911 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1913 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1914 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1922 #if !defined(CONFIG_USER_ONLY)
1924 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1928 /* Discard jump cache entries for any tb which might potentially
1929 overlap the flushed page. */
1930 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1931 memset (&env->tb_jmp_cache[i], 0,
1932 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1934 i = tb_jmp_cache_hash_page(addr);
1935 memset (&env->tb_jmp_cache[i], 0,
1936 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1939 static CPUTLBEntry s_cputlb_empty_entry = {
1946 /* NOTE: if flush_global is true, also flush global entries (not
1948 void tlb_flush(CPUState *env, int flush_global)
1952 #if defined(DEBUG_TLB)
1953 printf("tlb_flush:\n");
1955 /* must reset current TB so that interrupts cannot modify the
1956 links while we are modifying them */
1957 env->current_tb = NULL;
1959 for(i = 0; i < CPU_TLB_SIZE; i++) {
1961 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1962 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1966 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1968 env->tlb_flush_addr = -1;
1969 env->tlb_flush_mask = 0;
1973 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1975 if (addr == (tlb_entry->addr_read &
1976 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1977 addr == (tlb_entry->addr_write &
1978 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1979 addr == (tlb_entry->addr_code &
1980 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1981 *tlb_entry = s_cputlb_empty_entry;
1985 void tlb_flush_page(CPUState *env, target_ulong addr)
1990 #if defined(DEBUG_TLB)
1991 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1993 /* Check if we need to flush due to large pages. */
1994 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1995 #if defined(DEBUG_TLB)
1996 printf("tlb_flush_page: forced full flush ("
1997 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1998 env->tlb_flush_addr, env->tlb_flush_mask);
2003 /* must reset current TB so that interrupts cannot modify the
2004 links while we are modifying them */
2005 env->current_tb = NULL;
2007 addr &= TARGET_PAGE_MASK;
2008 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2009 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2010 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2012 tlb_flush_jmp_cache(env, addr);
2015 /* update the TLBs so that writes to code in the virtual page 'addr'
2017 static void tlb_protect_code(ram_addr_t ram_addr)
2019 cpu_physical_memory_reset_dirty(ram_addr,
2020 ram_addr + TARGET_PAGE_SIZE,
2024 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2025 tested for self modifying code */
2026 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2029 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2032 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2033 unsigned long start, unsigned long length)
2036 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2037 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2038 if ((addr - start) < length) {
2039 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2044 /* Note: start and end must be within the same ram block. */
2045 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2049 unsigned long length, start1;
2052 start &= TARGET_PAGE_MASK;
2053 end = TARGET_PAGE_ALIGN(end);
2055 length = end - start;
2058 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2060 /* we modify the TLB cache so that the dirty bit will be set again
2061 when accessing the range */
2062 start1 = (unsigned long)qemu_safe_ram_ptr(start);
2063 /* Check that we don't span multiple blocks - this breaks the
2064 address comparisons below. */
2065 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2066 != (end - 1) - start) {
2070 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2072 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2073 for(i = 0; i < CPU_TLB_SIZE; i++)
2074 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2080 int cpu_physical_memory_set_dirty_tracking(int enable)
2083 in_migration = enable;
2084 ret = cpu_notify_migration_log(!!enable);
2088 int cpu_physical_memory_get_dirty_tracking(void)
2090 return in_migration;
2093 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2094 target_phys_addr_t end_addr)
2098 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2102 int cpu_physical_log_start(target_phys_addr_t start_addr,
2105 CPUPhysMemoryClient *client;
2106 QLIST_FOREACH(client, &memory_client_list, list) {
2107 if (client->log_start) {
2108 int r = client->log_start(client, start_addr, size);
2117 int cpu_physical_log_stop(target_phys_addr_t start_addr,
2120 CPUPhysMemoryClient *client;
2121 QLIST_FOREACH(client, &memory_client_list, list) {
2122 if (client->log_stop) {
2123 int r = client->log_stop(client, start_addr, size);
2132 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2134 ram_addr_t ram_addr;
2137 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2138 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2139 + tlb_entry->addend);
2140 ram_addr = qemu_ram_addr_from_host_nofail(p);
2141 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2142 tlb_entry->addr_write |= TLB_NOTDIRTY;
2147 /* update the TLB according to the current state of the dirty bits */
2148 void cpu_tlb_update_dirty(CPUState *env)
2152 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2153 for(i = 0; i < CPU_TLB_SIZE; i++)
2154 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2158 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2160 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2161 tlb_entry->addr_write = vaddr;
2164 /* update the TLB corresponding to virtual page vaddr
2165 so that it is no longer dirty */
2166 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2171 vaddr &= TARGET_PAGE_MASK;
2172 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2173 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2174 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2177 /* Our TLB does not support large pages, so remember the area covered by
2178 large pages and trigger a full TLB flush if these are invalidated. */
2179 static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2182 target_ulong mask = ~(size - 1);
2184 if (env->tlb_flush_addr == (target_ulong)-1) {
2185 env->tlb_flush_addr = vaddr & mask;
2186 env->tlb_flush_mask = mask;
2189 /* Extend the existing region to include the new page.
2190 This is a compromise between unnecessary flushes and the cost
2191 of maintaining a full variable size TLB. */
2192 mask &= env->tlb_flush_mask;
2193 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2196 env->tlb_flush_addr &= mask;
2197 env->tlb_flush_mask = mask;
2200 /* Add a new TLB entry. At most one entry for a given virtual address
2201 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2202 supplied size is only used by tlb_flush_page. */
2203 void tlb_set_page(CPUState *env, target_ulong vaddr,
2204 target_phys_addr_t paddr, int prot,
2205 int mmu_idx, target_ulong size)
2210 target_ulong address;
2211 target_ulong code_address;
2212 unsigned long addend;
2215 target_phys_addr_t iotlb;
2217 assert(size >= TARGET_PAGE_SIZE);
2218 if (size != TARGET_PAGE_SIZE) {
2219 tlb_add_large_page(env, vaddr, size);
2221 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2223 pd = IO_MEM_UNASSIGNED;
2225 pd = p->phys_offset;
2227 #if defined(DEBUG_TLB)
2228 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2229 " prot=%x idx=%d pd=0x%08lx\n",
2230 vaddr, paddr, prot, mmu_idx, pd);
2234 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2235 /* IO memory case (romd handled later) */
2236 address |= TLB_MMIO;
2238 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2239 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2241 iotlb = pd & TARGET_PAGE_MASK;
2242 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2243 iotlb |= IO_MEM_NOTDIRTY;
2245 iotlb |= IO_MEM_ROM;
2247 /* IO handlers are currently passed a physical address.
2248 It would be nice to pass an offset from the base address
2249 of that region. This would avoid having to special case RAM,
2250 and avoid full address decoding in every device.
2251 We can't use the high bits of pd for this because
2252 IO_MEM_ROMD uses these as a ram address. */
2253 iotlb = (pd & ~TARGET_PAGE_MASK);
2255 iotlb += p->region_offset;
2261 code_address = address;
2262 /* Make accesses to pages with watchpoints go via the
2263 watchpoint trap routines. */
2264 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2265 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2266 /* Avoid trapping reads of pages with a write breakpoint. */
2267 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2268 iotlb = io_mem_watch + paddr;
2269 address |= TLB_MMIO;
2275 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2276 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2277 te = &env->tlb_table[mmu_idx][index];
2278 te->addend = addend - vaddr;
2279 if (prot & PAGE_READ) {
2280 te->addr_read = address;
2285 if (prot & PAGE_EXEC) {
2286 te->addr_code = code_address;
2290 if (prot & PAGE_WRITE) {
2291 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2292 (pd & IO_MEM_ROMD)) {
2293 /* Write access calls the I/O callback. */
2294 te->addr_write = address | TLB_MMIO;
2295 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2296 !cpu_physical_memory_is_dirty(pd)) {
2297 te->addr_write = address | TLB_NOTDIRTY;
2299 te->addr_write = address;
2302 te->addr_write = -1;
2308 void tlb_flush(CPUState *env, int flush_global)
2312 void tlb_flush_page(CPUState *env, target_ulong addr)
2317 * Walks guest process memory "regions" one by one
2318 * and calls callback function 'fn' for each region.
2321 struct walk_memory_regions_data
2323 walk_memory_regions_fn fn;
2325 unsigned long start;
2329 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2330 abi_ulong end, int new_prot)
2332 if (data->start != -1ul) {
2333 int rc = data->fn(data->priv, data->start, end, data->prot);
2339 data->start = (new_prot ? end : -1ul);
2340 data->prot = new_prot;
2345 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2346 abi_ulong base, int level, void **lp)
2352 return walk_memory_regions_end(data, base, 0);
2357 for (i = 0; i < L2_SIZE; ++i) {
2358 int prot = pd[i].flags;
2360 pa = base | (i << TARGET_PAGE_BITS);
2361 if (prot != data->prot) {
2362 rc = walk_memory_regions_end(data, pa, prot);
2370 for (i = 0; i < L2_SIZE; ++i) {
2371 pa = base | ((abi_ulong)i <<
2372 (TARGET_PAGE_BITS + L2_BITS * level));
2373 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2383 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2385 struct walk_memory_regions_data data;
2393 for (i = 0; i < V_L1_SIZE; i++) {
2394 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2395 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2401 return walk_memory_regions_end(&data, 0, 0);
2404 static int dump_region(void *priv, abi_ulong start,
2405 abi_ulong end, unsigned long prot)
2407 FILE *f = (FILE *)priv;
2409 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2410 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2411 start, end, end - start,
2412 ((prot & PAGE_READ) ? 'r' : '-'),
2413 ((prot & PAGE_WRITE) ? 'w' : '-'),
2414 ((prot & PAGE_EXEC) ? 'x' : '-'));
2419 /* dump memory mappings */
2420 void page_dump(FILE *f)
2422 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2423 "start", "end", "size", "prot");
2424 walk_memory_regions(f, dump_region);
2427 int page_get_flags(target_ulong address)
2431 p = page_find(address >> TARGET_PAGE_BITS);
2437 /* Modify the flags of a page and invalidate the code if necessary.
2438 The flag PAGE_WRITE_ORG is positioned automatically depending
2439 on PAGE_WRITE. The mmap_lock should already be held. */
2440 void page_set_flags(target_ulong start, target_ulong end, int flags)
2442 target_ulong addr, len;
2444 /* This function should never be called with addresses outside the
2445 guest address space. If this assert fires, it probably indicates
2446 a missing call to h2g_valid. */
2447 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2450 assert(start < end);
2452 start = start & TARGET_PAGE_MASK;
2453 end = TARGET_PAGE_ALIGN(end);
2455 if (flags & PAGE_WRITE) {
2456 flags |= PAGE_WRITE_ORG;
2459 for (addr = start, len = end - start;
2461 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2462 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2464 /* If the write protection bit is set, then we invalidate
2466 if (!(p->flags & PAGE_WRITE) &&
2467 (flags & PAGE_WRITE) &&
2469 tb_invalidate_phys_page(addr, 0, NULL);
2475 int page_check_range(target_ulong start, target_ulong len, int flags)
2481 /* This function should never be called with addresses outside the
2482 guest address space. If this assert fires, it probably indicates
2483 a missing call to h2g_valid. */
2484 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2485 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2491 if (start + len - 1 < start) {
2492 /* We've wrapped around. */
2496 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2497 start = start & TARGET_PAGE_MASK;
2499 for (addr = start, len = end - start;
2501 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2502 p = page_find(addr >> TARGET_PAGE_BITS);
2505 if( !(p->flags & PAGE_VALID) )
2508 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2510 if (flags & PAGE_WRITE) {
2511 if (!(p->flags & PAGE_WRITE_ORG))
2513 /* unprotect the page if it was put read-only because it
2514 contains translated code */
2515 if (!(p->flags & PAGE_WRITE)) {
2516 if (!page_unprotect(addr, 0, NULL))
2525 /* called from signal handler: invalidate the code and unprotect the
2526 page. Return TRUE if the fault was successfully handled. */
2527 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2531 target_ulong host_start, host_end, addr;
2533 /* Technically this isn't safe inside a signal handler. However we
2534 know this only ever happens in a synchronous SEGV handler, so in
2535 practice it seems to be ok. */
2538 p = page_find(address >> TARGET_PAGE_BITS);
2544 /* if the page was really writable, then we change its
2545 protection back to writable */
2546 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2547 host_start = address & qemu_host_page_mask;
2548 host_end = host_start + qemu_host_page_size;
2551 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2552 p = page_find(addr >> TARGET_PAGE_BITS);
2553 p->flags |= PAGE_WRITE;
2556 /* and since the content will be modified, we must invalidate
2557 the corresponding translated code. */
2558 tb_invalidate_phys_page(addr, pc, puc);
2559 #ifdef DEBUG_TB_CHECK
2560 tb_invalidate_check(addr);
2563 mprotect((void *)g2h(host_start), qemu_host_page_size,
2573 static inline void tlb_set_dirty(CPUState *env,
2574 unsigned long addr, target_ulong vaddr)
2577 #endif /* defined(CONFIG_USER_ONLY) */
2579 #if !defined(CONFIG_USER_ONLY)
2581 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2582 typedef struct subpage_t {
2583 target_phys_addr_t base;
2584 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2585 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2588 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2589 ram_addr_t memory, ram_addr_t region_offset);
2590 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2591 ram_addr_t orig_memory,
2592 ram_addr_t region_offset);
2593 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2596 if (addr > start_addr) \
2599 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2600 if (start_addr2 > 0) \
2604 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2605 end_addr2 = TARGET_PAGE_SIZE - 1; \
2607 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2608 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2613 /* register physical memory.
2614 For RAM, 'size' must be a multiple of the target page size.
2615 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2616 io memory page. The address used when calling the IO function is
2617 the offset from the start of the region, plus region_offset. Both
2618 start_addr and region_offset are rounded down to a page boundary
2619 before calculating this offset. This should not be a problem unless
2620 the low bits of start_addr and region_offset differ. */
2621 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2623 ram_addr_t phys_offset,
2624 ram_addr_t region_offset,
2627 target_phys_addr_t addr, end_addr;
2630 ram_addr_t orig_size = size;
2634 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
2636 if (phys_offset == IO_MEM_UNASSIGNED) {
2637 region_offset = start_addr;
2639 region_offset &= TARGET_PAGE_MASK;
2640 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2641 end_addr = start_addr + (target_phys_addr_t)size;
2645 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2646 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2647 ram_addr_t orig_memory = p->phys_offset;
2648 target_phys_addr_t start_addr2, end_addr2;
2649 int need_subpage = 0;
2651 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2654 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2655 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2656 &p->phys_offset, orig_memory,
2659 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2662 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2664 p->region_offset = 0;
2666 p->phys_offset = phys_offset;
2667 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2668 (phys_offset & IO_MEM_ROMD))
2669 phys_offset += TARGET_PAGE_SIZE;
2672 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2673 p->phys_offset = phys_offset;
2674 p->region_offset = region_offset;
2675 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2676 (phys_offset & IO_MEM_ROMD)) {
2677 phys_offset += TARGET_PAGE_SIZE;
2679 target_phys_addr_t start_addr2, end_addr2;
2680 int need_subpage = 0;
2682 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2683 end_addr2, need_subpage);
2686 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2687 &p->phys_offset, IO_MEM_UNASSIGNED,
2688 addr & TARGET_PAGE_MASK);
2689 subpage_register(subpage, start_addr2, end_addr2,
2690 phys_offset, region_offset);
2691 p->region_offset = 0;
2695 region_offset += TARGET_PAGE_SIZE;
2696 addr += TARGET_PAGE_SIZE;
2697 } while (addr != end_addr);
2699 /* since each CPU stores ram addresses in its TLB cache, we must
2700 reset the modified entries */
2702 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2707 /* XXX: temporary until new memory mapping API */
2708 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2712 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2714 return IO_MEM_UNASSIGNED;
2715 return p->phys_offset;
2718 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2721 kvm_coalesce_mmio_region(addr, size);
2724 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2727 kvm_uncoalesce_mmio_region(addr, size);
2730 void qemu_flush_coalesced_mmio_buffer(void)
2733 kvm_flush_coalesced_mmio_buffer();
2736 #if defined(__linux__) && !defined(TARGET_S390X)
2738 #include <sys/vfs.h>
2740 #define HUGETLBFS_MAGIC 0x958458f6
2742 static long gethugepagesize(const char *path)
2748 ret = statfs(path, &fs);
2749 } while (ret != 0 && errno == EINTR);
2756 if (fs.f_type != HUGETLBFS_MAGIC)
2757 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2762 static void *file_ram_alloc(RAMBlock *block,
2772 unsigned long hpagesize;
2774 hpagesize = gethugepagesize(path);
2779 if (memory < hpagesize) {
2783 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2784 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2788 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2792 fd = mkstemp(filename);
2794 perror("unable to create backing store for hugepages");
2801 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2804 * ftruncate is not supported by hugetlbfs in older
2805 * hosts, so don't bother bailing out on errors.
2806 * If anything goes wrong with it under other filesystems,
2809 if (ftruncate(fd, memory))
2810 perror("ftruncate");
2813 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2814 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2815 * to sidestep this quirk.
2817 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2818 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2820 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2822 if (area == MAP_FAILED) {
2823 perror("file_ram_alloc: can't mmap RAM pages");
2832 static ram_addr_t find_ram_offset(ram_addr_t size)
2834 RAMBlock *block, *next_block;
2835 ram_addr_t offset = 0, mingap = ULONG_MAX;
2837 if (QLIST_EMPTY(&ram_list.blocks))
2840 QLIST_FOREACH(block, &ram_list.blocks, next) {
2841 ram_addr_t end, next = ULONG_MAX;
2843 end = block->offset + block->length;
2845 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2846 if (next_block->offset >= end) {
2847 next = MIN(next, next_block->offset);
2850 if (next - end >= size && next - end < mingap) {
2852 mingap = next - end;
2858 static ram_addr_t last_ram_offset(void)
2861 ram_addr_t last = 0;
2863 QLIST_FOREACH(block, &ram_list.blocks, next)
2864 last = MAX(last, block->offset + block->length);
2869 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2870 ram_addr_t size, void *host)
2872 RAMBlock *new_block, *block;
2874 size = TARGET_PAGE_ALIGN(size);
2875 new_block = qemu_mallocz(sizeof(*new_block));
2877 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2878 char *id = dev->parent_bus->info->get_dev_path(dev);
2880 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2884 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2886 QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 if (!strcmp(block->idstr, new_block->idstr)) {
2888 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2894 new_block->offset = find_ram_offset(size);
2896 new_block->host = host;
2897 new_block->flags |= RAM_PREALLOC_MASK;
2900 #if defined (__linux__) && !defined(TARGET_S390X)
2901 new_block->host = file_ram_alloc(new_block, size, mem_path);
2902 if (!new_block->host) {
2903 new_block->host = qemu_vmalloc(size);
2904 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2907 fprintf(stderr, "-mem-path option unsupported\n");
2911 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2912 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2913 an system defined value, which is at least 256GB. Larger systems
2914 have larger values. We put the guest between the end of data
2915 segment (system break) and this value. We use 32GB as a base to
2916 have enough room for the system break to grow. */
2917 new_block->host = mmap((void*)0x800000000, size,
2918 PROT_EXEC|PROT_READ|PROT_WRITE,
2919 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2920 if (new_block->host == MAP_FAILED) {
2921 fprintf(stderr, "Allocating RAM failed\n");
2925 if (xen_mapcache_enabled()) {
2926 xen_ram_alloc(new_block->offset, size);
2928 new_block->host = qemu_vmalloc(size);
2931 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2934 new_block->length = size;
2936 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2938 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2939 last_ram_offset() >> TARGET_PAGE_BITS);
2940 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2941 0xff, size >> TARGET_PAGE_BITS);
2944 kvm_setup_guest_memory(new_block->host, size);
2946 return new_block->offset;
2949 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2951 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2954 void qemu_ram_free_from_ptr(ram_addr_t addr)
2958 QLIST_FOREACH(block, &ram_list.blocks, next) {
2959 if (addr == block->offset) {
2960 QLIST_REMOVE(block, next);
2967 void qemu_ram_free(ram_addr_t addr)
2971 QLIST_FOREACH(block, &ram_list.blocks, next) {
2972 if (addr == block->offset) {
2973 QLIST_REMOVE(block, next);
2974 if (block->flags & RAM_PREALLOC_MASK) {
2976 } else if (mem_path) {
2977 #if defined (__linux__) && !defined(TARGET_S390X)
2979 munmap(block->host, block->length);
2982 qemu_vfree(block->host);
2988 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2989 munmap(block->host, block->length);
2991 if (xen_mapcache_enabled()) {
2992 qemu_invalidate_entry(block->host);
2994 qemu_vfree(block->host);
3006 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3013 QLIST_FOREACH(block, &ram_list.blocks, next) {
3014 offset = addr - block->offset;
3015 if (offset < block->length) {
3016 vaddr = block->host + offset;
3017 if (block->flags & RAM_PREALLOC_MASK) {
3021 munmap(vaddr, length);
3023 #if defined(__linux__) && !defined(TARGET_S390X)
3026 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3029 flags |= MAP_PRIVATE;
3031 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3032 flags, block->fd, offset);
3034 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3035 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3042 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3043 flags |= MAP_SHARED | MAP_ANONYMOUS;
3044 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3047 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3048 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3052 if (area != vaddr) {
3053 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3057 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3063 #endif /* !_WIN32 */
3065 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3066 With the exception of the softmmu code in this file, this should
3067 only be used for local memory (e.g. video ram) that the device owns,
3068 and knows it isn't going to access beyond the end of the block.
3070 It should not be used for general purpose DMA.
3071 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3073 void *qemu_get_ram_ptr(ram_addr_t addr)
3077 QLIST_FOREACH(block, &ram_list.blocks, next) {
3078 if (addr - block->offset < block->length) {
3079 /* Move this entry to to start of the list. */
3080 if (block != QLIST_FIRST(&ram_list.blocks)) {
3081 QLIST_REMOVE(block, next);
3082 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3084 if (xen_mapcache_enabled()) {
3085 /* We need to check if the requested address is in the RAM
3086 * because we don't want to map the entire memory in QEMU.
3088 if (block->offset == 0) {
3089 return qemu_map_cache(addr, 0, 1);
3090 } else if (block->host == NULL) {
3091 block->host = xen_map_block(block->offset, block->length);
3094 return block->host + (addr - block->offset);
3098 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3104 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3105 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3107 void *qemu_safe_ram_ptr(ram_addr_t addr)
3111 QLIST_FOREACH(block, &ram_list.blocks, next) {
3112 if (addr - block->offset < block->length) {
3113 if (xen_mapcache_enabled()) {
3114 /* We need to check if the requested address is in the RAM
3115 * because we don't want to map the entire memory in QEMU.
3117 if (block->offset == 0) {
3118 return qemu_map_cache(addr, 0, 1);
3119 } else if (block->host == NULL) {
3120 block->host = xen_map_block(block->offset, block->length);
3123 return block->host + (addr - block->offset);
3127 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3133 void qemu_put_ram_ptr(void *addr)
3135 trace_qemu_put_ram_ptr(addr);
3137 if (xen_mapcache_enabled()) {
3140 QLIST_FOREACH(block, &ram_list.blocks, next) {
3141 if (addr == block->host) {
3145 if (block && block->host) {
3146 xen_unmap_block(block->host, block->length);
3149 qemu_map_cache_unlock(addr);
3154 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3157 uint8_t *host = ptr;
3159 QLIST_FOREACH(block, &ram_list.blocks, next) {
3160 /* This case append when the block is not mapped. */
3161 if (block->host == NULL) {
3164 if (host - block->host < block->length) {
3165 *ram_addr = block->offset + (host - block->host);
3170 if (xen_mapcache_enabled()) {
3171 *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3178 /* Some of the softmmu routines need to translate from a host pointer
3179 (typically a TLB entry) back to a ram offset. */
3180 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3182 ram_addr_t ram_addr;
3184 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3185 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3191 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3193 #ifdef DEBUG_UNASSIGNED
3194 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3196 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3197 do_unassigned_access(addr, 0, 0, 0, 1);
3202 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3204 #ifdef DEBUG_UNASSIGNED
3205 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3207 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3208 do_unassigned_access(addr, 0, 0, 0, 2);
3213 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3215 #ifdef DEBUG_UNASSIGNED
3216 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3218 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3219 do_unassigned_access(addr, 0, 0, 0, 4);
3224 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3226 #ifdef DEBUG_UNASSIGNED
3227 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3229 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3230 do_unassigned_access(addr, 1, 0, 0, 1);
3234 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3236 #ifdef DEBUG_UNASSIGNED
3237 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3239 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3240 do_unassigned_access(addr, 1, 0, 0, 2);
3244 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3246 #ifdef DEBUG_UNASSIGNED
3247 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3249 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3250 do_unassigned_access(addr, 1, 0, 0, 4);
3254 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3255 unassigned_mem_readb,
3256 unassigned_mem_readw,
3257 unassigned_mem_readl,
3260 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3261 unassigned_mem_writeb,
3262 unassigned_mem_writew,
3263 unassigned_mem_writel,
3266 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3270 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3271 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3272 #if !defined(CONFIG_USER_ONLY)
3273 tb_invalidate_phys_page_fast(ram_addr, 1);
3274 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3277 stb_p(qemu_get_ram_ptr(ram_addr), val);
3278 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3279 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3280 /* we remove the notdirty callback only if the code has been
3282 if (dirty_flags == 0xff)
3283 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3286 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3290 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3291 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3292 #if !defined(CONFIG_USER_ONLY)
3293 tb_invalidate_phys_page_fast(ram_addr, 2);
3294 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3297 stw_p(qemu_get_ram_ptr(ram_addr), val);
3298 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3299 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3300 /* we remove the notdirty callback only if the code has been
3302 if (dirty_flags == 0xff)
3303 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3306 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3310 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3311 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3312 #if !defined(CONFIG_USER_ONLY)
3313 tb_invalidate_phys_page_fast(ram_addr, 4);
3314 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3317 stl_p(qemu_get_ram_ptr(ram_addr), val);
3318 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3319 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3320 /* we remove the notdirty callback only if the code has been
3322 if (dirty_flags == 0xff)
3323 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3326 static CPUReadMemoryFunc * const error_mem_read[3] = {
3327 NULL, /* never used */
3328 NULL, /* never used */
3329 NULL, /* never used */
3332 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3333 notdirty_mem_writeb,
3334 notdirty_mem_writew,
3335 notdirty_mem_writel,
3338 /* Generate a debug exception if a watchpoint has been hit. */
3339 static void check_watchpoint(int offset, int len_mask, int flags)
3341 CPUState *env = cpu_single_env;
3342 target_ulong pc, cs_base;
3343 TranslationBlock *tb;
3348 if (env->watchpoint_hit) {
3349 /* We re-entered the check after replacing the TB. Now raise
3350 * the debug interrupt so that is will trigger after the
3351 * current instruction. */
3352 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3355 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3356 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3357 if ((vaddr == (wp->vaddr & len_mask) ||
3358 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3359 wp->flags |= BP_WATCHPOINT_HIT;
3360 if (!env->watchpoint_hit) {
3361 env->watchpoint_hit = wp;
3362 tb = tb_find_pc(env->mem_io_pc);
3364 cpu_abort(env, "check_watchpoint: could not find TB for "
3365 "pc=%p", (void *)env->mem_io_pc);
3367 cpu_restore_state(tb, env, env->mem_io_pc);
3368 tb_phys_invalidate(tb, -1);
3369 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3370 env->exception_index = EXCP_DEBUG;
3372 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3373 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3375 cpu_resume_from_signal(env, NULL);
3378 wp->flags &= ~BP_WATCHPOINT_HIT;
3383 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3384 so these check for a hit then pass through to the normal out-of-line
3386 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3388 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3389 return ldub_phys(addr);
3392 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3394 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3395 return lduw_phys(addr);
3398 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3400 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3401 return ldl_phys(addr);
3404 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3407 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3408 stb_phys(addr, val);
3411 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3414 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3415 stw_phys(addr, val);
3418 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3421 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3422 stl_phys(addr, val);
3425 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3431 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3437 static inline uint32_t subpage_readlen (subpage_t *mmio,
3438 target_phys_addr_t addr,
3441 unsigned int idx = SUBPAGE_IDX(addr);
3442 #if defined(DEBUG_SUBPAGE)
3443 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3444 mmio, len, addr, idx);
3447 addr += mmio->region_offset[idx];
3448 idx = mmio->sub_io_index[idx];
3449 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3452 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3453 uint32_t value, unsigned int len)
3455 unsigned int idx = SUBPAGE_IDX(addr);
3456 #if defined(DEBUG_SUBPAGE)
3457 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3458 __func__, mmio, len, addr, idx, value);
3461 addr += mmio->region_offset[idx];
3462 idx = mmio->sub_io_index[idx];
3463 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3466 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3468 return subpage_readlen(opaque, addr, 0);
3471 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3474 subpage_writelen(opaque, addr, value, 0);
3477 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3479 return subpage_readlen(opaque, addr, 1);
3482 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3485 subpage_writelen(opaque, addr, value, 1);
3488 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3490 return subpage_readlen(opaque, addr, 2);
3493 static void subpage_writel (void *opaque, target_phys_addr_t addr,
3496 subpage_writelen(opaque, addr, value, 2);
3499 static CPUReadMemoryFunc * const subpage_read[] = {
3505 static CPUWriteMemoryFunc * const subpage_write[] = {
3511 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3512 ram_addr_t memory, ram_addr_t region_offset)
3516 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3518 idx = SUBPAGE_IDX(start);
3519 eidx = SUBPAGE_IDX(end);
3520 #if defined(DEBUG_SUBPAGE)
3521 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3522 mmio, start, end, idx, eidx, memory);
3524 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3525 memory = IO_MEM_UNASSIGNED;
3526 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527 for (; idx <= eidx; idx++) {
3528 mmio->sub_io_index[idx] = memory;
3529 mmio->region_offset[idx] = region_offset;
3535 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3536 ram_addr_t orig_memory,
3537 ram_addr_t region_offset)
3542 mmio = qemu_mallocz(sizeof(subpage_t));
3545 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3546 DEVICE_NATIVE_ENDIAN);
3547 #if defined(DEBUG_SUBPAGE)
3548 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3549 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3551 *phys = subpage_memory | IO_MEM_SUBPAGE;
3552 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3557 static int get_free_io_mem_idx(void)
3561 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3562 if (!io_mem_used[i]) {
3566 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3571 * Usually, devices operate in little endian mode. There are devices out
3572 * there that operate in big endian too. Each device gets byte swapped
3573 * mmio if plugged onto a CPU that does the other endianness.
3583 typedef struct SwapEndianContainer {
3584 CPUReadMemoryFunc *read[3];
3585 CPUWriteMemoryFunc *write[3];
3587 } SwapEndianContainer;
3589 static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3592 SwapEndianContainer *c = opaque;
3593 val = c->read[0](c->opaque, addr);
3597 static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3600 SwapEndianContainer *c = opaque;
3601 val = bswap16(c->read[1](c->opaque, addr));
3605 static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3608 SwapEndianContainer *c = opaque;
3609 val = bswap32(c->read[2](c->opaque, addr));
3613 static CPUReadMemoryFunc * const swapendian_readfn[3]={
3614 swapendian_mem_readb,
3615 swapendian_mem_readw,
3616 swapendian_mem_readl
3619 static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3622 SwapEndianContainer *c = opaque;
3623 c->write[0](c->opaque, addr, val);
3626 static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3629 SwapEndianContainer *c = opaque;
3630 c->write[1](c->opaque, addr, bswap16(val));
3633 static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3636 SwapEndianContainer *c = opaque;
3637 c->write[2](c->opaque, addr, bswap32(val));
3640 static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3641 swapendian_mem_writeb,
3642 swapendian_mem_writew,
3643 swapendian_mem_writel
3646 static void swapendian_init(int io_index)
3648 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3651 /* Swap mmio for big endian targets */
3652 c->opaque = io_mem_opaque[io_index];
3653 for (i = 0; i < 3; i++) {
3654 c->read[i] = io_mem_read[io_index][i];
3655 c->write[i] = io_mem_write[io_index][i];
3657 io_mem_read[io_index][i] = swapendian_readfn[i];
3658 io_mem_write[io_index][i] = swapendian_writefn[i];
3660 io_mem_opaque[io_index] = c;
3663 static void swapendian_del(int io_index)
3665 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3666 qemu_free(io_mem_opaque[io_index]);
3670 /* mem_read and mem_write are arrays of functions containing the
3671 function to access byte (index 0), word (index 1) and dword (index
3672 2). Functions can be omitted with a NULL function pointer.
3673 If io_index is non zero, the corresponding io zone is
3674 modified. If it is zero, a new io zone is allocated. The return
3675 value can be used with cpu_register_physical_memory(). (-1) is
3676 returned if error. */
3677 static int cpu_register_io_memory_fixed(int io_index,
3678 CPUReadMemoryFunc * const *mem_read,
3679 CPUWriteMemoryFunc * const *mem_write,
3680 void *opaque, enum device_endian endian)
3684 if (io_index <= 0) {
3685 io_index = get_free_io_mem_idx();
3689 io_index >>= IO_MEM_SHIFT;
3690 if (io_index >= IO_MEM_NB_ENTRIES)
3694 for (i = 0; i < 3; ++i) {
3695 io_mem_read[io_index][i]
3696 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3698 for (i = 0; i < 3; ++i) {
3699 io_mem_write[io_index][i]
3700 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3702 io_mem_opaque[io_index] = opaque;
3705 case DEVICE_BIG_ENDIAN:
3706 #ifndef TARGET_WORDS_BIGENDIAN
3707 swapendian_init(io_index);
3710 case DEVICE_LITTLE_ENDIAN:
3711 #ifdef TARGET_WORDS_BIGENDIAN
3712 swapendian_init(io_index);
3715 case DEVICE_NATIVE_ENDIAN:
3720 return (io_index << IO_MEM_SHIFT);
3723 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3724 CPUWriteMemoryFunc * const *mem_write,
3725 void *opaque, enum device_endian endian)
3727 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3730 void cpu_unregister_io_memory(int io_table_address)
3733 int io_index = io_table_address >> IO_MEM_SHIFT;
3735 swapendian_del(io_index);
3737 for (i=0;i < 3; i++) {
3738 io_mem_read[io_index][i] = unassigned_mem_read[i];
3739 io_mem_write[io_index][i] = unassigned_mem_write[i];
3741 io_mem_opaque[io_index] = NULL;
3742 io_mem_used[io_index] = 0;
3745 static void io_mem_init(void)
3749 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3750 unassigned_mem_write, NULL,
3751 DEVICE_NATIVE_ENDIAN);
3752 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3753 unassigned_mem_write, NULL,
3754 DEVICE_NATIVE_ENDIAN);
3755 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3756 notdirty_mem_write, NULL,
3757 DEVICE_NATIVE_ENDIAN);
3761 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3762 watch_mem_write, NULL,
3763 DEVICE_NATIVE_ENDIAN);
3766 #endif /* !defined(CONFIG_USER_ONLY) */
3768 /* physical memory access (slow version, mainly for debug) */
3769 #if defined(CONFIG_USER_ONLY)
3770 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3771 uint8_t *buf, int len, int is_write)
3778 page = addr & TARGET_PAGE_MASK;
3779 l = (page + TARGET_PAGE_SIZE) - addr;
3782 flags = page_get_flags(page);
3783 if (!(flags & PAGE_VALID))
3786 if (!(flags & PAGE_WRITE))
3788 /* XXX: this code should not depend on lock_user */
3789 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3792 unlock_user(p, addr, l);
3794 if (!(flags & PAGE_READ))
3796 /* XXX: this code should not depend on lock_user */
3797 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3800 unlock_user(p, addr, 0);
3810 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3811 int len, int is_write)
3816 target_phys_addr_t page;
3821 page = addr & TARGET_PAGE_MASK;
3822 l = (page + TARGET_PAGE_SIZE) - addr;
3825 p = phys_page_find(page >> TARGET_PAGE_BITS);
3827 pd = IO_MEM_UNASSIGNED;
3829 pd = p->phys_offset;
3833 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3834 target_phys_addr_t addr1 = addr;
3835 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3837 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3838 /* XXX: could force cpu_single_env to NULL to avoid
3840 if (l >= 4 && ((addr1 & 3) == 0)) {
3841 /* 32 bit write access */
3843 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3845 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3846 /* 16 bit write access */
3848 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3851 /* 8 bit write access */
3853 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3857 unsigned long addr1;
3858 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3860 ptr = qemu_get_ram_ptr(addr1);
3861 memcpy(ptr, buf, l);
3862 if (!cpu_physical_memory_is_dirty(addr1)) {
3863 /* invalidate code */
3864 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3866 cpu_physical_memory_set_dirty_flags(
3867 addr1, (0xff & ~CODE_DIRTY_FLAG));
3869 qemu_put_ram_ptr(ptr);
3872 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3873 !(pd & IO_MEM_ROMD)) {
3874 target_phys_addr_t addr1 = addr;
3876 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3878 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3879 if (l >= 4 && ((addr1 & 3) == 0)) {
3880 /* 32 bit read access */
3881 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3884 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3885 /* 16 bit read access */
3886 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3890 /* 8 bit read access */
3891 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3897 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3898 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3899 qemu_put_ram_ptr(ptr);
3908 /* used for ROM loading : can write in RAM and ROM */
3909 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3910 const uint8_t *buf, int len)
3914 target_phys_addr_t page;
3919 page = addr & TARGET_PAGE_MASK;
3920 l = (page + TARGET_PAGE_SIZE) - addr;
3923 p = phys_page_find(page >> TARGET_PAGE_BITS);
3925 pd = IO_MEM_UNASSIGNED;
3927 pd = p->phys_offset;
3930 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3931 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3932 !(pd & IO_MEM_ROMD)) {
3935 unsigned long addr1;
3936 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3938 ptr = qemu_get_ram_ptr(addr1);
3939 memcpy(ptr, buf, l);
3940 qemu_put_ram_ptr(ptr);
3950 target_phys_addr_t addr;
3951 target_phys_addr_t len;
3954 static BounceBuffer bounce;
3956 typedef struct MapClient {
3958 void (*callback)(void *opaque);
3959 QLIST_ENTRY(MapClient) link;
3962 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3963 = QLIST_HEAD_INITIALIZER(map_client_list);
3965 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3967 MapClient *client = qemu_malloc(sizeof(*client));
3969 client->opaque = opaque;
3970 client->callback = callback;
3971 QLIST_INSERT_HEAD(&map_client_list, client, link);
3975 void cpu_unregister_map_client(void *_client)
3977 MapClient *client = (MapClient *)_client;
3979 QLIST_REMOVE(client, link);
3983 static void cpu_notify_map_clients(void)
3987 while (!QLIST_EMPTY(&map_client_list)) {
3988 client = QLIST_FIRST(&map_client_list);
3989 client->callback(client->opaque);
3990 cpu_unregister_map_client(client);
3994 /* Map a physical memory region into a host virtual address.
3995 * May map a subset of the requested range, given by and returned in *plen.
3996 * May return NULL if resources needed to perform the mapping are exhausted.
3997 * Use only for reads OR writes - not for read-modify-write operations.
3998 * Use cpu_register_map_client() to know when retrying the map operation is
3999 * likely to succeed.
4001 void *cpu_physical_memory_map(target_phys_addr_t addr,
4002 target_phys_addr_t *plen,
4005 target_phys_addr_t len = *plen;
4006 target_phys_addr_t done = 0;
4008 uint8_t *ret = NULL;
4010 target_phys_addr_t page;
4013 unsigned long addr1;
4016 page = addr & TARGET_PAGE_MASK;
4017 l = (page + TARGET_PAGE_SIZE) - addr;
4020 p = phys_page_find(page >> TARGET_PAGE_BITS);
4022 pd = IO_MEM_UNASSIGNED;
4024 pd = p->phys_offset;
4027 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4028 if (done || bounce.buffer) {
4031 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4035 cpu_physical_memory_read(addr, bounce.buffer, l);
4037 ptr = bounce.buffer;
4039 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4040 ptr = qemu_get_ram_ptr(addr1);
4044 } else if (ret + done != ptr) {
4056 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4057 * Will also mark the memory as dirty if is_write == 1. access_len gives
4058 * the amount of memory that was actually read or written by the caller.
4060 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4061 int is_write, target_phys_addr_t access_len)
4063 if (buffer != bounce.buffer) {
4065 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
4066 while (access_len) {
4068 l = TARGET_PAGE_SIZE;
4071 if (!cpu_physical_memory_is_dirty(addr1)) {
4072 /* invalidate code */
4073 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4075 cpu_physical_memory_set_dirty_flags(
4076 addr1, (0xff & ~CODE_DIRTY_FLAG));
4082 if (xen_mapcache_enabled()) {
4083 uint8_t *buffer1 = buffer;
4084 uint8_t *end_buffer = buffer + len;
4086 while (buffer1 < end_buffer) {
4087 qemu_put_ram_ptr(buffer1);
4088 buffer1 += TARGET_PAGE_SIZE;
4094 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4096 qemu_vfree(bounce.buffer);
4097 bounce.buffer = NULL;
4098 cpu_notify_map_clients();
4101 /* warning: addr must be aligned */
4102 uint32_t ldl_phys(target_phys_addr_t addr)
4110 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4112 pd = IO_MEM_UNASSIGNED;
4114 pd = p->phys_offset;
4117 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4118 !(pd & IO_MEM_ROMD)) {
4120 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4122 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4123 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4126 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4127 (addr & ~TARGET_PAGE_MASK);
4133 /* warning: addr must be aligned */
4134 uint64_t ldq_phys(target_phys_addr_t addr)
4142 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4144 pd = IO_MEM_UNASSIGNED;
4146 pd = p->phys_offset;
4149 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4150 !(pd & IO_MEM_ROMD)) {
4152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4154 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4155 #ifdef TARGET_WORDS_BIGENDIAN
4156 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4157 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4159 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4160 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4164 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4165 (addr & ~TARGET_PAGE_MASK);
4172 uint32_t ldub_phys(target_phys_addr_t addr)
4175 cpu_physical_memory_read(addr, &val, 1);
4179 /* warning: addr must be aligned */
4180 uint32_t lduw_phys(target_phys_addr_t addr)
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4190 pd = IO_MEM_UNASSIGNED;
4192 pd = p->phys_offset;
4195 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4196 !(pd & IO_MEM_ROMD)) {
4198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4200 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4201 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4204 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4205 (addr & ~TARGET_PAGE_MASK);
4211 /* warning: addr must be aligned. The ram page is not masked as dirty
4212 and the code inside is not invalidated. It is useful if the dirty
4213 bits are used to track modified PTEs */
4214 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4221 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4223 pd = IO_MEM_UNASSIGNED;
4225 pd = p->phys_offset;
4228 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4229 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4231 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4232 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4234 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4235 ptr = qemu_get_ram_ptr(addr1);
4238 if (unlikely(in_migration)) {
4239 if (!cpu_physical_memory_is_dirty(addr1)) {
4240 /* invalidate code */
4241 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4243 cpu_physical_memory_set_dirty_flags(
4244 addr1, (0xff & ~CODE_DIRTY_FLAG));
4250 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4257 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4259 pd = IO_MEM_UNASSIGNED;
4261 pd = p->phys_offset;
4264 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4265 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4267 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4268 #ifdef TARGET_WORDS_BIGENDIAN
4269 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4270 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4272 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4273 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4276 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4277 (addr & ~TARGET_PAGE_MASK);
4282 /* warning: addr must be aligned */
4283 void stl_phys(target_phys_addr_t addr, uint32_t val)
4290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4292 pd = IO_MEM_UNASSIGNED;
4294 pd = p->phys_offset;
4297 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4298 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4300 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4301 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4303 unsigned long addr1;
4304 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4306 ptr = qemu_get_ram_ptr(addr1);
4308 if (!cpu_physical_memory_is_dirty(addr1)) {
4309 /* invalidate code */
4310 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4312 cpu_physical_memory_set_dirty_flags(addr1,
4313 (0xff & ~CODE_DIRTY_FLAG));
4319 void stb_phys(target_phys_addr_t addr, uint32_t val)
4322 cpu_physical_memory_write(addr, &v, 1);
4325 /* warning: addr must be aligned */
4326 void stw_phys(target_phys_addr_t addr, uint32_t val)
4333 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4335 pd = IO_MEM_UNASSIGNED;
4337 pd = p->phys_offset;
4340 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4341 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4343 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4344 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4346 unsigned long addr1;
4347 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4349 ptr = qemu_get_ram_ptr(addr1);
4351 if (!cpu_physical_memory_is_dirty(addr1)) {
4352 /* invalidate code */
4353 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4355 cpu_physical_memory_set_dirty_flags(addr1,
4356 (0xff & ~CODE_DIRTY_FLAG));
4362 void stq_phys(target_phys_addr_t addr, uint64_t val)
4365 cpu_physical_memory_write(addr, &val, 8);
4368 /* virtual memory access for debug (includes writing to ROM) */
4369 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4370 uint8_t *buf, int len, int is_write)
4373 target_phys_addr_t phys_addr;
4377 page = addr & TARGET_PAGE_MASK;
4378 phys_addr = cpu_get_phys_page_debug(env, page);
4379 /* if no physical page mapped, return an error */
4380 if (phys_addr == -1)
4382 l = (page + TARGET_PAGE_SIZE) - addr;
4385 phys_addr += (addr & ~TARGET_PAGE_MASK);
4387 cpu_physical_memory_write_rom(phys_addr, buf, l);
4389 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4398 /* in deterministic execution mode, instructions doing device I/Os
4399 must be at the end of the TB */
4400 void cpu_io_recompile(CPUState *env, void *retaddr)
4402 TranslationBlock *tb;
4404 target_ulong pc, cs_base;
4407 tb = tb_find_pc((unsigned long)retaddr);
4409 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4412 n = env->icount_decr.u16.low + tb->icount;
4413 cpu_restore_state(tb, env, (unsigned long)retaddr);
4414 /* Calculate how many instructions had been executed before the fault
4416 n = n - env->icount_decr.u16.low;
4417 /* Generate a new TB ending on the I/O insn. */
4419 /* On MIPS and SH, delay slot instructions can only be restarted if
4420 they were already the first instruction in the TB. If this is not
4421 the first instruction in a TB then re-execute the preceding
4423 #if defined(TARGET_MIPS)
4424 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4425 env->active_tc.PC -= 4;
4426 env->icount_decr.u16.low++;
4427 env->hflags &= ~MIPS_HFLAG_BMASK;
4429 #elif defined(TARGET_SH4)
4430 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4433 env->icount_decr.u16.low++;
4434 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4437 /* This should never happen. */
4438 if (n > CF_COUNT_MASK)
4439 cpu_abort(env, "TB too big during recompile");
4441 cflags = n | CF_LAST_IO;
4443 cs_base = tb->cs_base;
4445 tb_phys_invalidate(tb, -1);
4446 /* FIXME: In theory this could raise an exception. In practice
4447 we have already translated the block once so it's probably ok. */
4448 tb_gen_code(env, pc, cs_base, flags, cflags);
4449 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4450 the first in the TB) then we end up generating a whole new TB and
4451 repeating the fault, which is horribly inefficient.
4452 Better would be to execute just this insn uncached, or generate a
4454 cpu_resume_from_signal(env, NULL);
4457 #if !defined(CONFIG_USER_ONLY)
4459 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4461 int i, target_code_size, max_target_code_size;
4462 int direct_jmp_count, direct_jmp2_count, cross_page;
4463 TranslationBlock *tb;
4465 target_code_size = 0;
4466 max_target_code_size = 0;
4468 direct_jmp_count = 0;
4469 direct_jmp2_count = 0;
4470 for(i = 0; i < nb_tbs; i++) {
4472 target_code_size += tb->size;
4473 if (tb->size > max_target_code_size)
4474 max_target_code_size = tb->size;
4475 if (tb->page_addr[1] != -1)
4477 if (tb->tb_next_offset[0] != 0xffff) {
4479 if (tb->tb_next_offset[1] != 0xffff) {
4480 direct_jmp2_count++;
4484 /* XXX: avoid using doubles ? */
4485 cpu_fprintf(f, "Translation buffer state:\n");
4486 cpu_fprintf(f, "gen code size %td/%ld\n",
4487 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4488 cpu_fprintf(f, "TB count %d/%d\n",
4489 nb_tbs, code_gen_max_blocks);
4490 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4491 nb_tbs ? target_code_size / nb_tbs : 0,
4492 max_target_code_size);
4493 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4494 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4495 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4496 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4498 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4499 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4501 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4503 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4504 cpu_fprintf(f, "\nStatistics:\n");
4505 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4506 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4507 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4508 tcg_dump_info(f, cpu_fprintf);
4511 #define MMUSUFFIX _cmmu
4512 #define GETPC() NULL
4513 #define env cpu_single_env
4514 #define SOFTMMU_CODE_ACCESS
4517 #include "softmmu_template.h"
4520 #include "softmmu_template.h"
4523 #include "softmmu_template.h"
4526 #include "softmmu_template.h"