4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration;
59 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 static MemoryRegion *system_memory;
62 static MemoryRegion *system_io;
64 AddressSpace address_space_io;
65 AddressSpace address_space_memory;
67 MemoryRegion io_mem_rom, io_mem_notdirty;
68 static MemoryRegion io_mem_unassigned;
72 CPUArchState *first_cpu;
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUArchState *,cpu_single_env);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry;
85 struct PhysPageEntry {
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
95 PhysPageEntry phys_map;
96 MemoryListener listener;
100 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101 typedef struct subpage_t {
105 uint16_t sub_section[TARGET_PAGE_SIZE];
108 static MemoryRegionSection *phys_sections;
109 static unsigned phys_sections_nb, phys_sections_nb_alloc;
110 static uint16_t phys_section_unassigned;
111 static uint16_t phys_section_notdirty;
112 static uint16_t phys_section_rom;
113 static uint16_t phys_section_watch;
115 /* Simple allocator for PhysPageEntry nodes */
116 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
119 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
121 static void io_mem_init(void);
122 static void memory_map_init(void);
123 static void *qemu_safe_ram_ptr(ram_addr_t addr);
125 static MemoryRegion io_mem_watch;
128 #if !defined(CONFIG_USER_ONLY)
130 static void phys_map_node_reserve(unsigned nodes)
132 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
133 typedef PhysPageEntry Node[L2_SIZE];
134 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136 phys_map_nodes_nb + nodes);
137 phys_map_nodes = g_renew(Node, phys_map_nodes,
138 phys_map_nodes_nb_alloc);
142 static uint16_t phys_map_node_alloc(void)
147 ret = phys_map_nodes_nb++;
148 assert(ret != PHYS_MAP_NODE_NIL);
149 assert(ret != phys_map_nodes_nb_alloc);
150 for (i = 0; i < L2_SIZE; ++i) {
151 phys_map_nodes[ret][i].is_leaf = 0;
152 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
157 static void phys_map_nodes_reset(void)
159 phys_map_nodes_nb = 0;
163 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164 hwaddr *nb, uint16_t leaf,
169 hwaddr step = (hwaddr)1 << (level * L2_BITS);
171 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
172 lp->ptr = phys_map_node_alloc();
173 p = phys_map_nodes[lp->ptr];
175 for (i = 0; i < L2_SIZE; i++) {
177 p[i].ptr = phys_section_unassigned;
181 p = phys_map_nodes[lp->ptr];
183 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
185 while (*nb && lp < &p[L2_SIZE]) {
186 if ((*index & (step - 1)) == 0 && *nb >= step) {
192 phys_page_set_level(lp, index, nb, leaf, level - 1);
198 static void phys_page_set(AddressSpaceDispatch *d,
199 hwaddr index, hwaddr nb,
202 /* Wildly overreserve - it doesn't matter much. */
203 phys_map_node_reserve(3 * P_L2_LEVELS);
205 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
208 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
210 PhysPageEntry lp = d->phys_map;
214 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
215 if (lp.ptr == PHYS_MAP_NODE_NIL) {
216 return &phys_sections[phys_section_unassigned];
218 p = phys_map_nodes[lp.ptr];
219 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
221 return &phys_sections[lp.ptr];
224 bool memory_region_is_unassigned(MemoryRegion *mr)
226 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
227 && mr != &io_mem_watch;
230 static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
232 bool resolve_subpage)
234 MemoryRegionSection *section;
237 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
238 if (resolve_subpage && section->mr->subpage) {
239 subpage = container_of(section->mr, subpage_t, iomem);
240 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
245 static MemoryRegionSection *
246 address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
247 hwaddr *plen, bool resolve_subpage)
249 MemoryRegionSection *section;
252 section = address_space_lookup_region(as, addr, resolve_subpage);
253 /* Compute offset within MemoryRegionSection */
254 addr -= section->offset_within_address_space;
256 /* Compute offset within MemoryRegion */
257 *xlat = addr + section->offset_within_region;
259 diff = int128_sub(section->mr->size, int128_make64(addr));
260 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
264 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
265 hwaddr *xlat, hwaddr *plen,
269 MemoryRegionSection *section;
274 section = address_space_translate_internal(as, addr, &addr, plen, true);
277 if (!mr->iommu_ops) {
281 iotlb = mr->iommu_ops->translate(mr, addr);
282 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
283 | (addr & iotlb.addr_mask));
284 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
285 if (!(iotlb.perm & (1 << is_write))) {
286 mr = &io_mem_unassigned;
290 as = iotlb.target_as;
298 MemoryRegionSection *
299 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
302 MemoryRegionSection *section;
303 section = address_space_translate_internal(as, addr, xlat, plen, false);
305 assert(!section->mr->iommu_ops);
310 void cpu_exec_init_all(void)
312 #if !defined(CONFIG_USER_ONLY)
313 qemu_mutex_init(&ram_list.mutex);
319 #if !defined(CONFIG_USER_ONLY)
321 static int cpu_common_post_load(void *opaque, int version_id)
323 CPUState *cpu = opaque;
325 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
326 version_id is increased. */
327 cpu->interrupt_request &= ~0x01;
328 tlb_flush(cpu->env_ptr, 1);
333 const VMStateDescription vmstate_cpu_common = {
334 .name = "cpu_common",
336 .minimum_version_id = 1,
337 .minimum_version_id_old = 1,
338 .post_load = cpu_common_post_load,
339 .fields = (VMStateField []) {
340 VMSTATE_UINT32(halted, CPUState),
341 VMSTATE_UINT32(interrupt_request, CPUState),
342 VMSTATE_END_OF_LIST()
348 CPUState *qemu_get_cpu(int index)
350 CPUArchState *env = first_cpu;
351 CPUState *cpu = NULL;
354 cpu = ENV_GET_CPU(env);
355 if (cpu->cpu_index == index) {
361 return env ? cpu : NULL;
364 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
366 CPUArchState *env = first_cpu;
369 func(ENV_GET_CPU(env), data);
374 void cpu_exec_init(CPUArchState *env)
376 CPUState *cpu = ENV_GET_CPU(env);
377 CPUClass *cc = CPU_GET_CLASS(cpu);
381 #if defined(CONFIG_USER_ONLY)
384 env->next_cpu = NULL;
387 while (*penv != NULL) {
388 penv = &(*penv)->next_cpu;
391 cpu->cpu_index = cpu_index;
393 QTAILQ_INIT(&env->breakpoints);
394 QTAILQ_INIT(&env->watchpoints);
395 #ifndef CONFIG_USER_ONLY
396 cpu->thread_id = qemu_get_thread_id();
399 #if defined(CONFIG_USER_ONLY)
402 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
403 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
404 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
405 cpu_save, cpu_load, env);
406 assert(cc->vmsd == NULL);
408 if (cc->vmsd != NULL) {
409 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 #if defined(TARGET_HAS_ICE)
414 #if defined(CONFIG_USER_ONLY)
415 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
417 tb_invalidate_phys_page_range(pc, pc + 1, 0);
420 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
422 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
423 (pc & ~TARGET_PAGE_MASK));
426 #endif /* TARGET_HAS_ICE */
428 #if defined(CONFIG_USER_ONLY)
429 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
434 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
435 int flags, CPUWatchpoint **watchpoint)
440 /* Add a watchpoint. */
441 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
442 int flags, CPUWatchpoint **watchpoint)
444 target_ulong len_mask = ~(len - 1);
447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
448 if ((len & (len - 1)) || (addr & ~len_mask) ||
449 len == 0 || len > TARGET_PAGE_SIZE) {
450 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
454 wp = g_malloc(sizeof(*wp));
457 wp->len_mask = len_mask;
460 /* keep all GDB-injected watchpoints in front */
462 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
464 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
466 tlb_flush_page(env, addr);
473 /* Remove a specific watchpoint. */
474 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
477 target_ulong len_mask = ~(len - 1);
480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
481 if (addr == wp->vaddr && len_mask == wp->len_mask
482 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
483 cpu_watchpoint_remove_by_ref(env, wp);
490 /* Remove a specific watchpoint by reference. */
491 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
493 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
495 tlb_flush_page(env, watchpoint->vaddr);
500 /* Remove all matching watchpoints. */
501 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
503 CPUWatchpoint *wp, *next;
505 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
506 if (wp->flags & mask)
507 cpu_watchpoint_remove_by_ref(env, wp);
512 /* Add a breakpoint. */
513 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
514 CPUBreakpoint **breakpoint)
516 #if defined(TARGET_HAS_ICE)
519 bp = g_malloc(sizeof(*bp));
524 /* keep all GDB-injected breakpoints in front */
526 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
528 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
530 breakpoint_invalidate(env, pc);
540 /* Remove a specific breakpoint. */
541 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
543 #if defined(TARGET_HAS_ICE)
546 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
547 if (bp->pc == pc && bp->flags == flags) {
548 cpu_breakpoint_remove_by_ref(env, bp);
558 /* Remove a specific breakpoint by reference. */
559 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
561 #if defined(TARGET_HAS_ICE)
562 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
564 breakpoint_invalidate(env, breakpoint->pc);
570 /* Remove all matching breakpoints. */
571 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
573 #if defined(TARGET_HAS_ICE)
574 CPUBreakpoint *bp, *next;
576 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
577 if (bp->flags & mask)
578 cpu_breakpoint_remove_by_ref(env, bp);
583 /* enable or disable single step mode. EXCP_DEBUG is returned by the
584 CPU loop after each instruction */
585 void cpu_single_step(CPUArchState *env, int enabled)
587 #if defined(TARGET_HAS_ICE)
588 if (env->singlestep_enabled != enabled) {
589 env->singlestep_enabled = enabled;
591 kvm_update_guest_debug(env, 0);
593 /* must flush all the translated code to avoid inconsistencies */
594 /* XXX: only flush what is necessary */
601 void cpu_exit(CPUArchState *env)
603 CPUState *cpu = ENV_GET_CPU(env);
605 cpu->exit_request = 1;
606 cpu->tcg_exit_req = 1;
609 void cpu_abort(CPUArchState *env, const char *fmt, ...)
616 fprintf(stderr, "qemu: fatal: ");
617 vfprintf(stderr, fmt, ap);
618 fprintf(stderr, "\n");
619 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
620 if (qemu_log_enabled()) {
621 qemu_log("qemu: fatal: ");
622 qemu_log_vprintf(fmt, ap2);
624 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
630 #if defined(CONFIG_USER_ONLY)
632 struct sigaction act;
633 sigfillset(&act.sa_mask);
634 act.sa_handler = SIG_DFL;
635 sigaction(SIGABRT, &act, NULL);
641 CPUArchState *cpu_copy(CPUArchState *env)
643 CPUArchState *new_env = cpu_init(env->cpu_model_str);
644 CPUArchState *next_cpu = new_env->next_cpu;
645 #if defined(TARGET_HAS_ICE)
650 memcpy(new_env, env, sizeof(CPUArchState));
652 /* Preserve chaining. */
653 new_env->next_cpu = next_cpu;
655 /* Clone all break/watchpoints.
656 Note: Once we support ptrace with hw-debug register access, make sure
657 BP_CPU break/watchpoints are handled correctly on clone. */
658 QTAILQ_INIT(&env->breakpoints);
659 QTAILQ_INIT(&env->watchpoints);
660 #if defined(TARGET_HAS_ICE)
661 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
662 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
664 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
665 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
673 #if !defined(CONFIG_USER_ONLY)
674 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
679 /* we modify the TLB cache so that the dirty bit will be set again
680 when accessing the range */
681 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
682 /* Check that we don't span multiple blocks - this breaks the
683 address comparisons below. */
684 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
685 != (end - 1) - start) {
688 cpu_tlb_reset_dirty_all(start1, length);
692 /* Note: start and end must be within the same ram block. */
693 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
698 start &= TARGET_PAGE_MASK;
699 end = TARGET_PAGE_ALIGN(end);
701 length = end - start;
704 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
707 tlb_reset_dirty_range_all(start, end, length);
711 static int cpu_physical_memory_set_dirty_tracking(int enable)
714 in_migration = enable;
718 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
719 MemoryRegionSection *section,
721 hwaddr paddr, hwaddr xlat,
723 target_ulong *address)
728 if (memory_region_is_ram(section->mr)) {
730 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
732 if (!section->readonly) {
733 iotlb |= phys_section_notdirty;
735 iotlb |= phys_section_rom;
738 iotlb = section - phys_sections;
742 /* Make accesses to pages with watchpoints go via the
743 watchpoint trap routines. */
744 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
745 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
746 /* Avoid trapping reads of pages with a write breakpoint. */
747 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
748 iotlb = phys_section_watch + paddr;
749 *address |= TLB_MMIO;
757 #endif /* defined(CONFIG_USER_ONLY) */
759 #if !defined(CONFIG_USER_ONLY)
761 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
763 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
764 static void destroy_page_desc(uint16_t section_index)
766 MemoryRegionSection *section = &phys_sections[section_index];
767 MemoryRegion *mr = section->mr;
770 subpage_t *subpage = container_of(mr, subpage_t, iomem);
771 memory_region_destroy(&subpage->iomem);
776 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
781 if (lp->ptr == PHYS_MAP_NODE_NIL) {
785 p = phys_map_nodes[lp->ptr];
786 for (i = 0; i < L2_SIZE; ++i) {
788 destroy_l2_mapping(&p[i], level - 1);
790 destroy_page_desc(p[i].ptr);
794 lp->ptr = PHYS_MAP_NODE_NIL;
797 static void destroy_all_mappings(AddressSpaceDispatch *d)
799 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
800 phys_map_nodes_reset();
803 static uint16_t phys_section_add(MemoryRegionSection *section)
805 /* The physical section number is ORed with a page-aligned
806 * pointer to produce the iotlb entries. Thus it should
807 * never overflow into the page-aligned value.
809 assert(phys_sections_nb < TARGET_PAGE_SIZE);
811 if (phys_sections_nb == phys_sections_nb_alloc) {
812 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
813 phys_sections = g_renew(MemoryRegionSection, phys_sections,
814 phys_sections_nb_alloc);
816 phys_sections[phys_sections_nb] = *section;
817 return phys_sections_nb++;
820 static void phys_sections_clear(void)
822 phys_sections_nb = 0;
825 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
828 hwaddr base = section->offset_within_address_space
830 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
831 MemoryRegionSection subsection = {
832 .offset_within_address_space = base,
833 .size = int128_make64(TARGET_PAGE_SIZE),
837 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
839 if (!(existing->mr->subpage)) {
840 subpage = subpage_init(d->as, base);
841 subsection.mr = &subpage->iomem;
842 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
843 phys_section_add(&subsection));
845 subpage = container_of(existing->mr, subpage_t, iomem);
847 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
848 end = start + int128_get64(section->size) - 1;
849 subpage_register(subpage, start, end, phys_section_add(section));
853 static void register_multipage(AddressSpaceDispatch *d,
854 MemoryRegionSection *section)
856 hwaddr start_addr = section->offset_within_address_space;
857 uint16_t section_index = phys_section_add(section);
858 uint64_t num_pages = int128_get64(int128_rshift(section->size,
862 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
865 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
867 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
868 MemoryRegionSection now = *section, remain = *section;
869 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
871 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
872 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
873 - now.offset_within_address_space;
875 now.size = int128_min(int128_make64(left), now.size);
876 register_subpage(d, &now);
878 now.size = int128_zero();
880 while (int128_ne(remain.size, now.size)) {
881 remain.size = int128_sub(remain.size, now.size);
882 remain.offset_within_address_space += int128_get64(now.size);
883 remain.offset_within_region += int128_get64(now.size);
885 if (int128_lt(remain.size, page_size)) {
886 register_subpage(d, &now);
887 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
888 now.size = page_size;
889 register_subpage(d, &now);
891 now.size = int128_and(now.size, int128_neg(page_size));
892 register_multipage(d, &now);
897 void qemu_flush_coalesced_mmio_buffer(void)
900 kvm_flush_coalesced_mmio_buffer();
903 void qemu_mutex_lock_ramlist(void)
905 qemu_mutex_lock(&ram_list.mutex);
908 void qemu_mutex_unlock_ramlist(void)
910 qemu_mutex_unlock(&ram_list.mutex);
913 #if defined(__linux__) && !defined(TARGET_S390X)
917 #define HUGETLBFS_MAGIC 0x958458f6
919 static long gethugepagesize(const char *path)
925 ret = statfs(path, &fs);
926 } while (ret != 0 && errno == EINTR);
933 if (fs.f_type != HUGETLBFS_MAGIC)
934 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
939 static void *file_ram_alloc(RAMBlock *block,
944 char *sanitized_name;
951 unsigned long hpagesize;
953 hpagesize = gethugepagesize(path);
958 if (memory < hpagesize) {
962 if (kvm_enabled() && !kvm_has_sync_mmu()) {
963 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
967 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
968 sanitized_name = g_strdup(block->mr->name);
969 for (c = sanitized_name; *c != '\0'; c++) {
974 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
976 g_free(sanitized_name);
978 fd = mkstemp(filename);
980 perror("unable to create backing store for hugepages");
987 memory = (memory+hpagesize-1) & ~(hpagesize-1);
990 * ftruncate is not supported by hugetlbfs in older
991 * hosts, so don't bother bailing out on errors.
992 * If anything goes wrong with it under other filesystems,
995 if (ftruncate(fd, memory))
999 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
1000 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
1001 * to sidestep this quirk.
1003 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
1004 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
1006 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1008 if (area == MAP_FAILED) {
1009 perror("file_ram_alloc: can't mmap RAM pages");
1018 static ram_addr_t find_ram_offset(ram_addr_t size)
1020 RAMBlock *block, *next_block;
1021 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1023 assert(size != 0); /* it would hand out same offset multiple times */
1025 if (QTAILQ_EMPTY(&ram_list.blocks))
1028 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1029 ram_addr_t end, next = RAM_ADDR_MAX;
1031 end = block->offset + block->length;
1033 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1034 if (next_block->offset >= end) {
1035 next = MIN(next, next_block->offset);
1038 if (next - end >= size && next - end < mingap) {
1040 mingap = next - end;
1044 if (offset == RAM_ADDR_MAX) {
1045 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1053 ram_addr_t last_ram_offset(void)
1056 ram_addr_t last = 0;
1058 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1059 last = MAX(last, block->offset + block->length);
1064 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1067 QemuOpts *machine_opts;
1069 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1070 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1072 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1073 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1075 perror("qemu_madvise");
1076 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1077 "but dump_guest_core=off specified\n");
1082 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1084 RAMBlock *new_block, *block;
1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088 if (block->offset == addr) {
1094 assert(!new_block->idstr[0]);
1097 char *id = qdev_get_dev_path(dev);
1099 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1103 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1105 /* This assumes the iothread lock is taken here too. */
1106 qemu_mutex_lock_ramlist();
1107 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1108 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1109 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1114 qemu_mutex_unlock_ramlist();
1117 static int memory_try_enable_merging(void *addr, size_t len)
1121 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1122 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1123 /* disabled by the user */
1127 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1130 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1133 RAMBlock *block, *new_block;
1135 size = TARGET_PAGE_ALIGN(size);
1136 new_block = g_malloc0(sizeof(*new_block));
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
1141 new_block->offset = find_ram_offset(size);
1143 new_block->host = host;
1144 new_block->flags |= RAM_PREALLOC_MASK;
1147 #if defined (__linux__) && !defined(TARGET_S390X)
1148 new_block->host = file_ram_alloc(new_block, size, mem_path);
1149 if (!new_block->host) {
1150 new_block->host = qemu_anon_ram_alloc(size);
1151 memory_try_enable_merging(new_block->host, size);
1154 fprintf(stderr, "-mem-path option unsupported\n");
1158 if (xen_enabled()) {
1159 xen_ram_alloc(new_block->offset, size, mr);
1160 } else if (kvm_enabled()) {
1161 /* some s390/kvm configurations have special constraints */
1162 new_block->host = kvm_ram_alloc(size);
1164 new_block->host = qemu_anon_ram_alloc(size);
1166 memory_try_enable_merging(new_block->host, size);
1169 new_block->length = size;
1171 /* Keep the list sorted from biggest to smallest block. */
1172 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1173 if (block->length < new_block->length) {
1178 QTAILQ_INSERT_BEFORE(block, new_block, next);
1180 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1182 ram_list.mru_block = NULL;
1185 qemu_mutex_unlock_ramlist();
1187 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1188 last_ram_offset() >> TARGET_PAGE_BITS);
1189 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1190 0, size >> TARGET_PAGE_BITS);
1191 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1193 qemu_ram_setup_dump(new_block->host, size);
1194 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1197 kvm_setup_guest_memory(new_block->host, size);
1199 return new_block->offset;
1202 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1204 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1207 void qemu_ram_free_from_ptr(ram_addr_t addr)
1211 /* This assumes the iothread lock is taken here too. */
1212 qemu_mutex_lock_ramlist();
1213 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1214 if (addr == block->offset) {
1215 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1216 ram_list.mru_block = NULL;
1222 qemu_mutex_unlock_ramlist();
1225 void qemu_ram_free(ram_addr_t addr)
1229 /* This assumes the iothread lock is taken here too. */
1230 qemu_mutex_lock_ramlist();
1231 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1232 if (addr == block->offset) {
1233 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1234 ram_list.mru_block = NULL;
1236 if (block->flags & RAM_PREALLOC_MASK) {
1238 } else if (mem_path) {
1239 #if defined (__linux__) && !defined(TARGET_S390X)
1241 munmap(block->host, block->length);
1244 qemu_anon_ram_free(block->host, block->length);
1250 if (xen_enabled()) {
1251 xen_invalidate_map_cache_entry(block->host);
1253 qemu_anon_ram_free(block->host, block->length);
1260 qemu_mutex_unlock_ramlist();
1265 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1272 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1273 offset = addr - block->offset;
1274 if (offset < block->length) {
1275 vaddr = block->host + offset;
1276 if (block->flags & RAM_PREALLOC_MASK) {
1280 munmap(vaddr, length);
1282 #if defined(__linux__) && !defined(TARGET_S390X)
1285 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1288 flags |= MAP_PRIVATE;
1290 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1291 flags, block->fd, offset);
1293 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1294 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1301 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1302 flags |= MAP_SHARED | MAP_ANONYMOUS;
1303 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1306 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1307 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1311 if (area != vaddr) {
1312 fprintf(stderr, "Could not remap addr: "
1313 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1317 memory_try_enable_merging(vaddr, length);
1318 qemu_ram_setup_dump(vaddr, length);
1324 #endif /* !_WIN32 */
1326 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1334 void *qemu_get_ram_ptr(ram_addr_t addr)
1338 /* The list is protected by the iothread lock here. */
1339 block = ram_list.mru_block;
1340 if (block && addr - block->offset < block->length) {
1343 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1344 if (addr - block->offset < block->length) {
1349 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1353 ram_list.mru_block = block;
1354 if (xen_enabled()) {
1355 /* We need to check if the requested address is in the RAM
1356 * because we don't want to map the entire memory in QEMU.
1357 * In that case just map until the end of the page.
1359 if (block->offset == 0) {
1360 return xen_map_cache(addr, 0, 0);
1361 } else if (block->host == NULL) {
1363 xen_map_cache(block->offset, block->length, 1);
1366 return block->host + (addr - block->offset);
1369 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1370 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1372 * ??? Is this still necessary?
1374 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1378 /* The list is protected by the iothread lock here. */
1379 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1380 if (addr - block->offset < block->length) {
1381 if (xen_enabled()) {
1382 /* We need to check if the requested address is in the RAM
1383 * because we don't want to map the entire memory in QEMU.
1384 * In that case just map until the end of the page.
1386 if (block->offset == 0) {
1387 return xen_map_cache(addr, 0, 0);
1388 } else if (block->host == NULL) {
1390 xen_map_cache(block->offset, block->length, 1);
1393 return block->host + (addr - block->offset);
1397 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1403 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1404 * but takes a size argument */
1405 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1410 if (xen_enabled()) {
1411 return xen_map_cache(addr, *size, 1);
1415 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1416 if (addr - block->offset < block->length) {
1417 if (addr - block->offset + *size > block->length)
1418 *size = block->length - addr + block->offset;
1419 return block->host + (addr - block->offset);
1423 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1428 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1431 uint8_t *host = ptr;
1433 if (xen_enabled()) {
1434 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1438 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1439 /* This case append when the block is not mapped. */
1440 if (block->host == NULL) {
1443 if (host - block->host < block->length) {
1444 *ram_addr = block->offset + (host - block->host);
1452 /* Some of the softmmu routines need to translate from a host pointer
1453 (typically a TLB entry) back to a ram offset. */
1454 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1456 ram_addr_t ram_addr;
1458 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1459 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1465 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1466 uint64_t val, unsigned size)
1469 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1470 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1471 tb_invalidate_phys_page_fast(ram_addr, size);
1472 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1476 stb_p(qemu_get_ram_ptr(ram_addr), val);
1479 stw_p(qemu_get_ram_ptr(ram_addr), val);
1482 stl_p(qemu_get_ram_ptr(ram_addr), val);
1487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1488 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1489 /* we remove the notdirty callback only if the code has been
1491 if (dirty_flags == 0xff)
1492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1495 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1496 unsigned size, bool is_write)
1501 static const MemoryRegionOps notdirty_mem_ops = {
1502 .write = notdirty_mem_write,
1503 .valid.accepts = notdirty_mem_accepts,
1504 .endianness = DEVICE_NATIVE_ENDIAN,
1507 /* Generate a debug exception if a watchpoint has been hit. */
1508 static void check_watchpoint(int offset, int len_mask, int flags)
1510 CPUArchState *env = cpu_single_env;
1511 target_ulong pc, cs_base;
1516 if (env->watchpoint_hit) {
1517 /* We re-entered the check after replacing the TB. Now raise
1518 * the debug interrupt so that is will trigger after the
1519 * current instruction. */
1520 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1523 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1524 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1525 if ((vaddr == (wp->vaddr & len_mask) ||
1526 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1527 wp->flags |= BP_WATCHPOINT_HIT;
1528 if (!env->watchpoint_hit) {
1529 env->watchpoint_hit = wp;
1530 tb_check_watchpoint(env);
1531 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1532 env->exception_index = EXCP_DEBUG;
1535 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1536 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1537 cpu_resume_from_signal(env, NULL);
1541 wp->flags &= ~BP_WATCHPOINT_HIT;
1546 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1547 so these check for a hit then pass through to the normal out-of-line
1549 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1552 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1554 case 1: return ldub_phys(addr);
1555 case 2: return lduw_phys(addr);
1556 case 4: return ldl_phys(addr);
1561 static void watch_mem_write(void *opaque, hwaddr addr,
1562 uint64_t val, unsigned size)
1564 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1567 stb_phys(addr, val);
1570 stw_phys(addr, val);
1573 stl_phys(addr, val);
1579 static const MemoryRegionOps watch_mem_ops = {
1580 .read = watch_mem_read,
1581 .write = watch_mem_write,
1582 .endianness = DEVICE_NATIVE_ENDIAN,
1585 static uint64_t subpage_read(void *opaque, hwaddr addr,
1588 subpage_t *subpage = opaque;
1591 #if defined(DEBUG_SUBPAGE)
1592 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1593 subpage, len, addr);
1595 address_space_read(subpage->as, addr + subpage->base, buf, len);
1608 static void subpage_write(void *opaque, hwaddr addr,
1609 uint64_t value, unsigned len)
1611 subpage_t *subpage = opaque;
1614 #if defined(DEBUG_SUBPAGE)
1615 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1616 " value %"PRIx64"\n",
1617 __func__, subpage, len, addr, value);
1632 address_space_write(subpage->as, addr + subpage->base, buf, len);
1635 static bool subpage_accepts(void *opaque, hwaddr addr,
1636 unsigned size, bool is_write)
1638 subpage_t *subpage = opaque;
1639 #if defined(DEBUG_SUBPAGE)
1640 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1641 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1644 return address_space_access_valid(subpage->as, addr + subpage->base,
1648 static const MemoryRegionOps subpage_ops = {
1649 .read = subpage_read,
1650 .write = subpage_write,
1651 .valid.accepts = subpage_accepts,
1652 .endianness = DEVICE_NATIVE_ENDIAN,
1655 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1660 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1662 idx = SUBPAGE_IDX(start);
1663 eidx = SUBPAGE_IDX(end);
1664 #if defined(DEBUG_SUBPAGE)
1665 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1666 mmio, start, end, idx, eidx, memory);
1668 for (; idx <= eidx; idx++) {
1669 mmio->sub_section[idx] = section;
1675 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1679 mmio = g_malloc0(sizeof(subpage_t));
1683 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1684 "subpage", TARGET_PAGE_SIZE);
1685 mmio->iomem.subpage = true;
1686 #if defined(DEBUG_SUBPAGE)
1687 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1688 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1690 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1695 static uint16_t dummy_section(MemoryRegion *mr)
1697 MemoryRegionSection section = {
1699 .offset_within_address_space = 0,
1700 .offset_within_region = 0,
1701 .size = int128_2_64(),
1704 return phys_section_add(§ion);
1707 MemoryRegion *iotlb_to_region(hwaddr index)
1709 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1712 static void io_mem_init(void)
1714 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1715 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1716 "unassigned", UINT64_MAX);
1717 memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
1718 "notdirty", UINT64_MAX);
1719 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1720 "watch", UINT64_MAX);
1723 static void mem_begin(MemoryListener *listener)
1725 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1727 destroy_all_mappings(d);
1728 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1731 static void core_begin(MemoryListener *listener)
1733 phys_sections_clear();
1734 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1735 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1736 phys_section_rom = dummy_section(&io_mem_rom);
1737 phys_section_watch = dummy_section(&io_mem_watch);
1740 static void tcg_commit(MemoryListener *listener)
1744 /* since each CPU stores ram addresses in its TLB cache, we must
1745 reset the modified entries */
1747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1752 static void core_log_global_start(MemoryListener *listener)
1754 cpu_physical_memory_set_dirty_tracking(1);
1757 static void core_log_global_stop(MemoryListener *listener)
1759 cpu_physical_memory_set_dirty_tracking(0);
1762 static void io_region_add(MemoryListener *listener,
1763 MemoryRegionSection *section)
1765 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1767 mrio->mr = section->mr;
1768 mrio->offset = section->offset_within_region;
1769 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1770 section->offset_within_address_space,
1771 int128_get64(section->size));
1772 ioport_register(&mrio->iorange);
1775 static void io_region_del(MemoryListener *listener,
1776 MemoryRegionSection *section)
1778 isa_unassign_ioport(section->offset_within_address_space,
1779 int128_get64(section->size));
1782 static MemoryListener core_memory_listener = {
1783 .begin = core_begin,
1784 .log_global_start = core_log_global_start,
1785 .log_global_stop = core_log_global_stop,
1789 static MemoryListener io_memory_listener = {
1790 .region_add = io_region_add,
1791 .region_del = io_region_del,
1795 static MemoryListener tcg_memory_listener = {
1796 .commit = tcg_commit,
1799 void address_space_init_dispatch(AddressSpace *as)
1801 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1803 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1804 d->listener = (MemoryListener) {
1806 .region_add = mem_add,
1807 .region_nop = mem_add,
1812 memory_listener_register(&d->listener, as);
1815 void address_space_destroy_dispatch(AddressSpace *as)
1817 AddressSpaceDispatch *d = as->dispatch;
1819 memory_listener_unregister(&d->listener);
1820 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1822 as->dispatch = NULL;
1825 static void memory_map_init(void)
1827 system_memory = g_malloc(sizeof(*system_memory));
1828 memory_region_init(system_memory, "system", INT64_MAX);
1829 address_space_init(&address_space_memory, system_memory, "memory");
1831 system_io = g_malloc(sizeof(*system_io));
1832 memory_region_init(system_io, "io", 65536);
1833 address_space_init(&address_space_io, system_io, "I/O");
1835 memory_listener_register(&core_memory_listener, &address_space_memory);
1836 memory_listener_register(&io_memory_listener, &address_space_io);
1837 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1840 MemoryRegion *get_system_memory(void)
1842 return system_memory;
1845 MemoryRegion *get_system_io(void)
1850 #endif /* !defined(CONFIG_USER_ONLY) */
1852 /* physical memory access (slow version, mainly for debug) */
1853 #if defined(CONFIG_USER_ONLY)
1854 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1855 uint8_t *buf, int len, int is_write)
1862 page = addr & TARGET_PAGE_MASK;
1863 l = (page + TARGET_PAGE_SIZE) - addr;
1866 flags = page_get_flags(page);
1867 if (!(flags & PAGE_VALID))
1870 if (!(flags & PAGE_WRITE))
1872 /* XXX: this code should not depend on lock_user */
1873 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1876 unlock_user(p, addr, l);
1878 if (!(flags & PAGE_READ))
1880 /* XXX: this code should not depend on lock_user */
1881 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1884 unlock_user(p, addr, 0);
1895 static void invalidate_and_set_dirty(hwaddr addr,
1898 if (!cpu_physical_memory_is_dirty(addr)) {
1899 /* invalidate code */
1900 tb_invalidate_phys_page_range(addr, addr + length, 0);
1902 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1904 xen_modified_memory(addr, length);
1907 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1909 if (memory_region_is_ram(mr)) {
1910 return !(is_write && mr->readonly);
1912 if (memory_region_is_romd(mr)) {
1919 static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
1921 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
1924 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
1930 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1931 int len, bool is_write)
1942 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1945 if (!memory_access_is_direct(mr, is_write)) {
1946 l = memory_access_size(mr, l, addr1);
1947 /* XXX: could force cpu_single_env to NULL to avoid
1950 /* 32 bit write access */
1952 error |= io_mem_write(mr, addr1, val, 4);
1953 } else if (l == 2) {
1954 /* 16 bit write access */
1956 error |= io_mem_write(mr, addr1, val, 2);
1958 /* 8 bit write access */
1960 error |= io_mem_write(mr, addr1, val, 1);
1963 addr1 += memory_region_get_ram_addr(mr);
1965 ptr = qemu_get_ram_ptr(addr1);
1966 memcpy(ptr, buf, l);
1967 invalidate_and_set_dirty(addr1, l);
1970 if (!memory_access_is_direct(mr, is_write)) {
1972 l = memory_access_size(mr, l, addr1);
1974 /* 32 bit read access */
1975 error |= io_mem_read(mr, addr1, &val, 4);
1977 } else if (l == 2) {
1978 /* 16 bit read access */
1979 error |= io_mem_read(mr, addr1, &val, 2);
1982 /* 8 bit read access */
1983 error |= io_mem_read(mr, addr1, &val, 1);
1988 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1989 memcpy(buf, ptr, l);
2000 bool address_space_write(AddressSpace *as, hwaddr addr,
2001 const uint8_t *buf, int len)
2003 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2006 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2008 return address_space_rw(as, addr, buf, len, false);
2012 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2013 int len, int is_write)
2015 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2018 /* used for ROM loading : can write in RAM and ROM */
2019 void cpu_physical_memory_write_rom(hwaddr addr,
2020 const uint8_t *buf, int len)
2029 mr = address_space_translate(&address_space_memory,
2030 addr, &addr1, &l, true);
2032 if (!(memory_region_is_ram(mr) ||
2033 memory_region_is_romd(mr))) {
2036 addr1 += memory_region_get_ram_addr(mr);
2038 ptr = qemu_get_ram_ptr(addr1);
2039 memcpy(ptr, buf, l);
2040 invalidate_and_set_dirty(addr1, l);
2054 static BounceBuffer bounce;
2056 typedef struct MapClient {
2058 void (*callback)(void *opaque);
2059 QLIST_ENTRY(MapClient) link;
2062 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2063 = QLIST_HEAD_INITIALIZER(map_client_list);
2065 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2067 MapClient *client = g_malloc(sizeof(*client));
2069 client->opaque = opaque;
2070 client->callback = callback;
2071 QLIST_INSERT_HEAD(&map_client_list, client, link);
2075 static void cpu_unregister_map_client(void *_client)
2077 MapClient *client = (MapClient *)_client;
2079 QLIST_REMOVE(client, link);
2083 static void cpu_notify_map_clients(void)
2087 while (!QLIST_EMPTY(&map_client_list)) {
2088 client = QLIST_FIRST(&map_client_list);
2089 client->callback(client->opaque);
2090 cpu_unregister_map_client(client);
2094 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2101 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2102 if (!memory_access_is_direct(mr, is_write)) {
2103 l = memory_access_size(mr, l, addr);
2104 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2115 /* Map a physical memory region into a host virtual address.
2116 * May map a subset of the requested range, given by and returned in *plen.
2117 * May return NULL if resources needed to perform the mapping are exhausted.
2118 * Use only for reads OR writes - not for read-modify-write operations.
2119 * Use cpu_register_map_client() to know when retrying the map operation is
2120 * likely to succeed.
2122 void *address_space_map(AddressSpace *as,
2131 ram_addr_t raddr = RAM_ADDR_MAX;
2137 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2139 if (!memory_access_is_direct(mr, is_write)) {
2140 if (todo || bounce.buffer) {
2143 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2147 address_space_read(as, addr, bounce.buffer, l);
2151 return bounce.buffer;
2154 raddr = memory_region_get_ram_addr(mr) + xlat;
2156 if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
2166 ret = qemu_ram_ptr_length(raddr, &rlen);
2171 /* Unmaps a memory region previously mapped by address_space_map().
2172 * Will also mark the memory as dirty if is_write == 1. access_len gives
2173 * the amount of memory that was actually read or written by the caller.
2175 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2176 int is_write, hwaddr access_len)
2178 if (buffer != bounce.buffer) {
2180 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2181 while (access_len) {
2183 l = TARGET_PAGE_SIZE;
2186 invalidate_and_set_dirty(addr1, l);
2191 if (xen_enabled()) {
2192 xen_invalidate_map_cache_entry(buffer);
2197 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2199 qemu_vfree(bounce.buffer);
2200 bounce.buffer = NULL;
2201 cpu_notify_map_clients();
2204 void *cpu_physical_memory_map(hwaddr addr,
2208 return address_space_map(&address_space_memory, addr, plen, is_write);
2211 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2212 int is_write, hwaddr access_len)
2214 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2217 /* warning: addr must be aligned */
2218 static inline uint32_t ldl_phys_internal(hwaddr addr,
2219 enum device_endian endian)
2227 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2229 if (l < 4 || !memory_access_is_direct(mr, false)) {
2231 io_mem_read(mr, addr1, &val, 4);
2232 #if defined(TARGET_WORDS_BIGENDIAN)
2233 if (endian == DEVICE_LITTLE_ENDIAN) {
2237 if (endian == DEVICE_BIG_ENDIAN) {
2243 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2247 case DEVICE_LITTLE_ENDIAN:
2248 val = ldl_le_p(ptr);
2250 case DEVICE_BIG_ENDIAN:
2251 val = ldl_be_p(ptr);
2261 uint32_t ldl_phys(hwaddr addr)
2263 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2266 uint32_t ldl_le_phys(hwaddr addr)
2268 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2271 uint32_t ldl_be_phys(hwaddr addr)
2273 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2276 /* warning: addr must be aligned */
2277 static inline uint64_t ldq_phys_internal(hwaddr addr,
2278 enum device_endian endian)
2286 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2288 if (l < 8 || !memory_access_is_direct(mr, false)) {
2290 io_mem_read(mr, addr1, &val, 8);
2291 #if defined(TARGET_WORDS_BIGENDIAN)
2292 if (endian == DEVICE_LITTLE_ENDIAN) {
2296 if (endian == DEVICE_BIG_ENDIAN) {
2302 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2306 case DEVICE_LITTLE_ENDIAN:
2307 val = ldq_le_p(ptr);
2309 case DEVICE_BIG_ENDIAN:
2310 val = ldq_be_p(ptr);
2320 uint64_t ldq_phys(hwaddr addr)
2322 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2325 uint64_t ldq_le_phys(hwaddr addr)
2327 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2330 uint64_t ldq_be_phys(hwaddr addr)
2332 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2336 uint32_t ldub_phys(hwaddr addr)
2339 cpu_physical_memory_read(addr, &val, 1);
2343 /* warning: addr must be aligned */
2344 static inline uint32_t lduw_phys_internal(hwaddr addr,
2345 enum device_endian endian)
2353 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2355 if (l < 2 || !memory_access_is_direct(mr, false)) {
2357 io_mem_read(mr, addr1, &val, 2);
2358 #if defined(TARGET_WORDS_BIGENDIAN)
2359 if (endian == DEVICE_LITTLE_ENDIAN) {
2363 if (endian == DEVICE_BIG_ENDIAN) {
2369 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2373 case DEVICE_LITTLE_ENDIAN:
2374 val = lduw_le_p(ptr);
2376 case DEVICE_BIG_ENDIAN:
2377 val = lduw_be_p(ptr);
2387 uint32_t lduw_phys(hwaddr addr)
2389 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2392 uint32_t lduw_le_phys(hwaddr addr)
2394 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2397 uint32_t lduw_be_phys(hwaddr addr)
2399 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2402 /* warning: addr must be aligned. The ram page is not masked as dirty
2403 and the code inside is not invalidated. It is useful if the dirty
2404 bits are used to track modified PTEs */
2405 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2412 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2414 if (l < 4 || !memory_access_is_direct(mr, true)) {
2415 io_mem_write(mr, addr1, val, 4);
2417 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2418 ptr = qemu_get_ram_ptr(addr1);
2421 if (unlikely(in_migration)) {
2422 if (!cpu_physical_memory_is_dirty(addr1)) {
2423 /* invalidate code */
2424 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2426 cpu_physical_memory_set_dirty_flags(
2427 addr1, (0xff & ~CODE_DIRTY_FLAG));
2433 /* warning: addr must be aligned */
2434 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2435 enum device_endian endian)
2442 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2444 if (l < 4 || !memory_access_is_direct(mr, true)) {
2445 #if defined(TARGET_WORDS_BIGENDIAN)
2446 if (endian == DEVICE_LITTLE_ENDIAN) {
2450 if (endian == DEVICE_BIG_ENDIAN) {
2454 io_mem_write(mr, addr1, val, 4);
2457 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2458 ptr = qemu_get_ram_ptr(addr1);
2460 case DEVICE_LITTLE_ENDIAN:
2463 case DEVICE_BIG_ENDIAN:
2470 invalidate_and_set_dirty(addr1, 4);
2474 void stl_phys(hwaddr addr, uint32_t val)
2476 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2479 void stl_le_phys(hwaddr addr, uint32_t val)
2481 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2484 void stl_be_phys(hwaddr addr, uint32_t val)
2486 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2490 void stb_phys(hwaddr addr, uint32_t val)
2493 cpu_physical_memory_write(addr, &v, 1);
2496 /* warning: addr must be aligned */
2497 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2498 enum device_endian endian)
2505 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2507 if (l < 2 || !memory_access_is_direct(mr, true)) {
2508 #if defined(TARGET_WORDS_BIGENDIAN)
2509 if (endian == DEVICE_LITTLE_ENDIAN) {
2513 if (endian == DEVICE_BIG_ENDIAN) {
2517 io_mem_write(mr, addr1, val, 2);
2520 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2521 ptr = qemu_get_ram_ptr(addr1);
2523 case DEVICE_LITTLE_ENDIAN:
2526 case DEVICE_BIG_ENDIAN:
2533 invalidate_and_set_dirty(addr1, 2);
2537 void stw_phys(hwaddr addr, uint32_t val)
2539 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2542 void stw_le_phys(hwaddr addr, uint32_t val)
2544 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2547 void stw_be_phys(hwaddr addr, uint32_t val)
2549 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2553 void stq_phys(hwaddr addr, uint64_t val)
2556 cpu_physical_memory_write(addr, &val, 8);
2559 void stq_le_phys(hwaddr addr, uint64_t val)
2561 val = cpu_to_le64(val);
2562 cpu_physical_memory_write(addr, &val, 8);
2565 void stq_be_phys(hwaddr addr, uint64_t val)
2567 val = cpu_to_be64(val);
2568 cpu_physical_memory_write(addr, &val, 8);
2571 /* virtual memory access for debug (includes writing to ROM) */
2572 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2573 uint8_t *buf, int len, int is_write)
2580 page = addr & TARGET_PAGE_MASK;
2581 phys_addr = cpu_get_phys_page_debug(env, page);
2582 /* if no physical page mapped, return an error */
2583 if (phys_addr == -1)
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2588 phys_addr += (addr & ~TARGET_PAGE_MASK);
2590 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2601 #if !defined(CONFIG_USER_ONLY)
2604 * A helper function for the _utterly broken_ virtio device model to find out if
2605 * it's running on a big endian machine. Don't do this at home kids!
2607 bool virtio_is_big_endian(void);
2608 bool virtio_is_big_endian(void)
2610 #if defined(TARGET_WORDS_BIGENDIAN)
2619 #ifndef CONFIG_USER_ONLY
2620 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2625 mr = address_space_translate(&address_space_memory,
2626 phys_addr, &phys_addr, &l, false);
2628 return !(memory_region_is_ram(mr) ||
2629 memory_region_is_romd(mr));
2632 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2636 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2637 func(block->host, block->offset, block->length, opaque);