4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration;
60 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 static MemoryRegion *system_memory;
63 static MemoryRegion *system_io;
65 AddressSpace address_space_io;
66 AddressSpace address_space_memory;
67 DMAContext dma_context_memory;
69 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
70 static MemoryRegion io_mem_subpage_ram;
74 CPUArchState *first_cpu;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState *,cpu_single_env);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection *phys_sections;
86 static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 static uint16_t phys_section_unassigned;
88 static uint16_t phys_section_notdirty;
89 static uint16_t phys_section_rom;
90 static uint16_t phys_section_watch;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr);
102 static MemoryRegion io_mem_watch;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes)
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
119 static uint16_t phys_map_node_alloc(void)
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
127 for (i = 0; i < L2_SIZE; ++i) {
128 phys_map_nodes[ret][i].is_leaf = 0;
129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb = 0;
140 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
152 for (i = 0; i < L2_SIZE; i++) {
154 p[i].ptr = phys_section_unassigned;
158 p = phys_map_nodes[lp->ptr];
160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
162 while (*nb && lp < &p[L2_SIZE]) {
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
175 static void phys_page_set(AddressSpaceDispatch *d,
176 hwaddr index, hwaddr nb,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS);
182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
185 MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
187 PhysPageEntry lp = d->phys_map;
190 uint16_t s_index = phys_section_unassigned;
192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
196 p = phys_map_nodes[lp.ptr];
197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
202 return &phys_sections[s_index];
205 bool memory_region_is_unassigned(MemoryRegion *mr)
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list.mutex);
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque, int version_id)
226 CPUArchState *env = opaque;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
236 static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
245 VMSTATE_END_OF_LIST()
249 #define vmstate_cpu_common vmstate_dummy
252 CPUState *qemu_get_cpu(int index)
254 CPUArchState *env = first_cpu;
255 CPUState *cpu = NULL;
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
265 return env ? cpu : NULL;
268 void cpu_exec_init(CPUArchState *env)
270 CPUState *cpu = ENV_GET_CPU(env);
271 CPUClass *cc = CPU_GET_CLASS(cpu);
275 #if defined(CONFIG_USER_ONLY)
278 env->next_cpu = NULL;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
285 cpu->cpu_index = cpu_index;
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289 #ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
293 #if defined(CONFIG_USER_ONLY)
296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
297 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
300 assert(cc->vmsd == NULL);
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
307 #if defined(TARGET_HAS_ICE)
308 #if defined(CONFIG_USER_ONLY)
309 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
314 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
320 #endif /* TARGET_HAS_ICE */
322 #if defined(CONFIG_USER_ONLY)
323 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
328 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
329 int flags, CPUWatchpoint **watchpoint)
334 /* Add a watchpoint. */
335 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
336 int flags, CPUWatchpoint **watchpoint)
338 target_ulong len_mask = ~(len - 1);
341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
348 wp = g_malloc(sizeof(*wp));
351 wp->len_mask = len_mask;
354 /* keep all GDB-injected watchpoints in front */
356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
360 tlb_flush_page(env, addr);
367 /* Remove a specific watchpoint. */
368 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
371 target_ulong len_mask = ~(len - 1);
374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
375 if (addr == wp->vaddr && len_mask == wp->len_mask
376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
377 cpu_watchpoint_remove_by_ref(env, wp);
384 /* Remove a specific watchpoint by reference. */
385 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
389 tlb_flush_page(env, watchpoint->vaddr);
394 /* Remove all matching watchpoints. */
395 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
397 CPUWatchpoint *wp, *next;
399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
406 /* Add a breakpoint. */
407 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
408 CPUBreakpoint **breakpoint)
410 #if defined(TARGET_HAS_ICE)
413 bp = g_malloc(sizeof(*bp));
418 /* keep all GDB-injected breakpoints in front */
420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
424 breakpoint_invalidate(env, pc);
434 /* Remove a specific breakpoint. */
435 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
437 #if defined(TARGET_HAS_ICE)
440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
452 /* Remove a specific breakpoint by reference. */
453 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
455 #if defined(TARGET_HAS_ICE)
456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
458 breakpoint_invalidate(env, breakpoint->pc);
464 /* Remove all matching breakpoints. */
465 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
467 #if defined(TARGET_HAS_ICE)
468 CPUBreakpoint *bp, *next;
470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
479 void cpu_single_step(CPUArchState *env, int enabled)
481 #if defined(TARGET_HAS_ICE)
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
485 kvm_update_guest_debug(env, 0);
487 /* must flush all the translated code to avoid inconsistencies */
488 /* XXX: only flush what is necessary */
495 void cpu_reset_interrupt(CPUArchState *env, int mask)
497 env->interrupt_request &= ~mask;
500 void cpu_exit(CPUArchState *env)
502 CPUState *cpu = ENV_GET_CPU(env);
504 cpu->exit_request = 1;
505 cpu->tcg_exit_req = 1;
508 void cpu_abort(CPUArchState *env, const char *fmt, ...)
515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
529 #if defined(CONFIG_USER_ONLY)
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
540 CPUArchState *cpu_copy(CPUArchState *env)
542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
544 #if defined(TARGET_HAS_ICE)
549 memcpy(new_env, env, sizeof(CPUArchState));
551 /* Preserve chaining. */
552 new_env->next_cpu = next_cpu;
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
559 #if defined(TARGET_HAS_ICE)
560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
572 #if !defined(CONFIG_USER_ONLY)
573 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
581 /* Check that we don't span multiple blocks - this breaks the
582 address comparisons below. */
583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
584 != (end - 1) - start) {
587 cpu_tlb_reset_dirty_all(start1, length);
591 /* Note: start and end must be within the same ram block. */
592 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
600 length = end - start;
603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
606 tlb_reset_dirty_range_all(start, end, length);
610 static int cpu_physical_memory_set_dirty_tracking(int enable)
613 in_migration = enable;
617 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
618 MemoryRegionSection *section,
622 target_ulong *address)
627 if (memory_region_is_ram(section->mr)) {
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
630 + memory_region_section_addr(section, paddr);
631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
634 iotlb |= phys_section_rom;
637 /* IO handlers are currently passed a physical address.
638 It would be nice to pass an offset from the base address
639 of that region. This would avoid having to special case RAM,
640 and avoid full address decoding in every device.
641 We can't use the high bits of pd for this because
642 IO_MEM_ROMD uses these as a ram address. */
643 iotlb = section - phys_sections;
644 iotlb += memory_region_section_addr(section, paddr);
647 /* Make accesses to pages with watchpoints go via the
648 watchpoint trap routines. */
649 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
650 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
651 /* Avoid trapping reads of pages with a write breakpoint. */
652 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
653 iotlb = phys_section_watch + paddr;
654 *address |= TLB_MMIO;
662 #endif /* defined(CONFIG_USER_ONLY) */
664 #if !defined(CONFIG_USER_ONLY)
666 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
667 typedef struct subpage_t {
670 uint16_t sub_section[TARGET_PAGE_SIZE];
673 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
675 static subpage_t *subpage_init(hwaddr base);
676 static void destroy_page_desc(uint16_t section_index)
678 MemoryRegionSection *section = &phys_sections[section_index];
679 MemoryRegion *mr = section->mr;
682 subpage_t *subpage = container_of(mr, subpage_t, iomem);
683 memory_region_destroy(&subpage->iomem);
688 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
693 if (lp->ptr == PHYS_MAP_NODE_NIL) {
697 p = phys_map_nodes[lp->ptr];
698 for (i = 0; i < L2_SIZE; ++i) {
700 destroy_l2_mapping(&p[i], level - 1);
702 destroy_page_desc(p[i].ptr);
706 lp->ptr = PHYS_MAP_NODE_NIL;
709 static void destroy_all_mappings(AddressSpaceDispatch *d)
711 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
712 phys_map_nodes_reset();
715 static uint16_t phys_section_add(MemoryRegionSection *section)
717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
726 static void phys_sections_clear(void)
728 phys_sections_nb = 0;
731 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
734 hwaddr base = section->offset_within_address_space
736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
745 if (!(existing->mr->subpage)) {
746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
749 phys_section_add(&subsection));
751 subpage = container_of(existing->mr, subpage_t, iomem);
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
754 end = start + section->size - 1;
755 subpage_register(subpage, start, end, phys_section_add(section));
759 static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
761 hwaddr start_addr = section->offset_within_address_space;
762 ram_addr_t size = section->size;
764 uint16_t section_index = phys_section_add(section);
769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
773 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
775 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
776 MemoryRegionSection now = *section, remain = *section;
778 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
779 || (now.size < TARGET_PAGE_SIZE)) {
780 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
781 - now.offset_within_address_space,
783 register_subpage(d, &now);
784 remain.size -= now.size;
785 remain.offset_within_address_space += now.size;
786 remain.offset_within_region += now.size;
788 while (remain.size >= TARGET_PAGE_SIZE) {
790 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
791 now.size = TARGET_PAGE_SIZE;
792 register_subpage(d, &now);
794 now.size &= TARGET_PAGE_MASK;
795 register_multipage(d, &now);
797 remain.size -= now.size;
798 remain.offset_within_address_space += now.size;
799 remain.offset_within_region += now.size;
803 register_subpage(d, &now);
807 void qemu_flush_coalesced_mmio_buffer(void)
810 kvm_flush_coalesced_mmio_buffer();
813 void qemu_mutex_lock_ramlist(void)
815 qemu_mutex_lock(&ram_list.mutex);
818 void qemu_mutex_unlock_ramlist(void)
820 qemu_mutex_unlock(&ram_list.mutex);
823 #if defined(__linux__) && !defined(TARGET_S390X)
827 #define HUGETLBFS_MAGIC 0x958458f6
829 static long gethugepagesize(const char *path)
835 ret = statfs(path, &fs);
836 } while (ret != 0 && errno == EINTR);
843 if (fs.f_type != HUGETLBFS_MAGIC)
844 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
849 static void *file_ram_alloc(RAMBlock *block,
859 unsigned long hpagesize;
861 hpagesize = gethugepagesize(path);
866 if (memory < hpagesize) {
870 if (kvm_enabled() && !kvm_has_sync_mmu()) {
871 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
875 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
877 fd = mkstemp(filename);
879 perror("unable to create backing store for hugepages");
886 memory = (memory+hpagesize-1) & ~(hpagesize-1);
889 * ftruncate is not supported by hugetlbfs in older
890 * hosts, so don't bother bailing out on errors.
891 * If anything goes wrong with it under other filesystems,
894 if (ftruncate(fd, memory))
898 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
899 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
900 * to sidestep this quirk.
902 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
903 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
905 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
907 if (area == MAP_FAILED) {
908 perror("file_ram_alloc: can't mmap RAM pages");
917 static ram_addr_t find_ram_offset(ram_addr_t size)
919 RAMBlock *block, *next_block;
920 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
922 if (QTAILQ_EMPTY(&ram_list.blocks))
925 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
926 ram_addr_t end, next = RAM_ADDR_MAX;
928 end = block->offset + block->length;
930 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
931 if (next_block->offset >= end) {
932 next = MIN(next, next_block->offset);
935 if (next - end >= size && next - end < mingap) {
941 if (offset == RAM_ADDR_MAX) {
942 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
950 ram_addr_t last_ram_offset(void)
955 QTAILQ_FOREACH(block, &ram_list.blocks, next)
956 last = MAX(last, block->offset + block->length);
961 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
964 QemuOpts *machine_opts;
966 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
967 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
969 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
970 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
972 perror("qemu_madvise");
973 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
974 "but dump_guest_core=off specified\n");
979 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
981 RAMBlock *new_block, *block;
984 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
985 if (block->offset == addr) {
991 assert(!new_block->idstr[0]);
994 char *id = qdev_get_dev_path(dev);
996 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1000 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1002 /* This assumes the iothread lock is taken here too. */
1003 qemu_mutex_lock_ramlist();
1004 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1005 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1006 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1011 qemu_mutex_unlock_ramlist();
1014 static int memory_try_enable_merging(void *addr, size_t len)
1018 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1019 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1020 /* disabled by the user */
1024 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1027 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1030 RAMBlock *block, *new_block;
1032 size = TARGET_PAGE_ALIGN(size);
1033 new_block = g_malloc0(sizeof(*new_block));
1035 /* This assumes the iothread lock is taken here too. */
1036 qemu_mutex_lock_ramlist();
1038 new_block->offset = find_ram_offset(size);
1040 new_block->host = host;
1041 new_block->flags |= RAM_PREALLOC_MASK;
1044 #if defined (__linux__) && !defined(TARGET_S390X)
1045 new_block->host = file_ram_alloc(new_block, size, mem_path);
1046 if (!new_block->host) {
1047 new_block->host = qemu_vmalloc(size);
1048 memory_try_enable_merging(new_block->host, size);
1051 fprintf(stderr, "-mem-path option unsupported\n");
1055 if (xen_enabled()) {
1056 xen_ram_alloc(new_block->offset, size, mr);
1057 } else if (kvm_enabled()) {
1058 /* some s390/kvm configurations have special constraints */
1059 new_block->host = kvm_vmalloc(size);
1061 new_block->host = qemu_vmalloc(size);
1063 memory_try_enable_merging(new_block->host, size);
1066 new_block->length = size;
1068 /* Keep the list sorted from biggest to smallest block. */
1069 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1070 if (block->length < new_block->length) {
1075 QTAILQ_INSERT_BEFORE(block, new_block, next);
1077 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1079 ram_list.mru_block = NULL;
1082 qemu_mutex_unlock_ramlist();
1084 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1085 last_ram_offset() >> TARGET_PAGE_BITS);
1086 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1087 0, size >> TARGET_PAGE_BITS);
1088 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1090 qemu_ram_setup_dump(new_block->host, size);
1091 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1094 kvm_setup_guest_memory(new_block->host, size);
1096 return new_block->offset;
1099 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1101 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1104 void qemu_ram_free_from_ptr(ram_addr_t addr)
1108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
1110 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1111 if (addr == block->offset) {
1112 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1113 ram_list.mru_block = NULL;
1119 qemu_mutex_unlock_ramlist();
1122 void qemu_ram_free(ram_addr_t addr)
1126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
1128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1129 if (addr == block->offset) {
1130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1131 ram_list.mru_block = NULL;
1133 if (block->flags & RAM_PREALLOC_MASK) {
1135 } else if (mem_path) {
1136 #if defined (__linux__) && !defined(TARGET_S390X)
1138 munmap(block->host, block->length);
1141 qemu_vfree(block->host);
1147 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1148 munmap(block->host, block->length);
1150 if (xen_enabled()) {
1151 xen_invalidate_map_cache_entry(block->host);
1153 qemu_vfree(block->host);
1161 qemu_mutex_unlock_ramlist();
1166 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1173 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1174 offset = addr - block->offset;
1175 if (offset < block->length) {
1176 vaddr = block->host + offset;
1177 if (block->flags & RAM_PREALLOC_MASK) {
1181 munmap(vaddr, length);
1183 #if defined(__linux__) && !defined(TARGET_S390X)
1186 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1189 flags |= MAP_PRIVATE;
1191 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1192 flags, block->fd, offset);
1194 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1195 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1202 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1203 flags |= MAP_SHARED | MAP_ANONYMOUS;
1204 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1207 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1208 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1212 if (area != vaddr) {
1213 fprintf(stderr, "Could not remap addr: "
1214 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1218 memory_try_enable_merging(vaddr, length);
1219 qemu_ram_setup_dump(vaddr, length);
1225 #endif /* !_WIN32 */
1227 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1228 With the exception of the softmmu code in this file, this should
1229 only be used for local memory (e.g. video ram) that the device owns,
1230 and knows it isn't going to access beyond the end of the block.
1232 It should not be used for general purpose DMA.
1233 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1235 void *qemu_get_ram_ptr(ram_addr_t addr)
1239 /* The list is protected by the iothread lock here. */
1240 block = ram_list.mru_block;
1241 if (block && addr - block->offset < block->length) {
1244 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1245 if (addr - block->offset < block->length) {
1250 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1254 ram_list.mru_block = block;
1255 if (xen_enabled()) {
1256 /* We need to check if the requested address is in the RAM
1257 * because we don't want to map the entire memory in QEMU.
1258 * In that case just map until the end of the page.
1260 if (block->offset == 0) {
1261 return xen_map_cache(addr, 0, 0);
1262 } else if (block->host == NULL) {
1264 xen_map_cache(block->offset, block->length, 1);
1267 return block->host + (addr - block->offset);
1270 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1271 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1273 * ??? Is this still necessary?
1275 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1279 /* The list is protected by the iothread lock here. */
1280 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1281 if (addr - block->offset < block->length) {
1282 if (xen_enabled()) {
1283 /* We need to check if the requested address is in the RAM
1284 * because we don't want to map the entire memory in QEMU.
1285 * In that case just map until the end of the page.
1287 if (block->offset == 0) {
1288 return xen_map_cache(addr, 0, 0);
1289 } else if (block->host == NULL) {
1291 xen_map_cache(block->offset, block->length, 1);
1294 return block->host + (addr - block->offset);
1298 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1304 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1305 * but takes a size argument */
1306 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1311 if (xen_enabled()) {
1312 return xen_map_cache(addr, *size, 1);
1316 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1317 if (addr - block->offset < block->length) {
1318 if (addr - block->offset + *size > block->length)
1319 *size = block->length - addr + block->offset;
1320 return block->host + (addr - block->offset);
1324 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1329 void qemu_put_ram_ptr(void *addr)
1331 trace_qemu_put_ram_ptr(addr);
1334 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1337 uint8_t *host = ptr;
1339 if (xen_enabled()) {
1340 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1344 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1345 /* This case append when the block is not mapped. */
1346 if (block->host == NULL) {
1349 if (host - block->host < block->length) {
1350 *ram_addr = block->offset + (host - block->host);
1358 /* Some of the softmmu routines need to translate from a host pointer
1359 (typically a TLB entry) back to a ram offset. */
1360 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1362 ram_addr_t ram_addr;
1364 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1365 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1371 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1374 #ifdef DEBUG_UNASSIGNED
1375 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1377 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1378 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1383 static void unassigned_mem_write(void *opaque, hwaddr addr,
1384 uint64_t val, unsigned size)
1386 #ifdef DEBUG_UNASSIGNED
1387 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1389 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1390 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1394 static const MemoryRegionOps unassigned_mem_ops = {
1395 .read = unassigned_mem_read,
1396 .write = unassigned_mem_write,
1397 .endianness = DEVICE_NATIVE_ENDIAN,
1400 static uint64_t error_mem_read(void *opaque, hwaddr addr,
1406 static void error_mem_write(void *opaque, hwaddr addr,
1407 uint64_t value, unsigned size)
1412 static const MemoryRegionOps error_mem_ops = {
1413 .read = error_mem_read,
1414 .write = error_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
1418 static const MemoryRegionOps rom_mem_ops = {
1419 .read = error_mem_read,
1420 .write = unassigned_mem_write,
1421 .endianness = DEVICE_NATIVE_ENDIAN,
1424 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1425 uint64_t val, unsigned size)
1428 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1429 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1430 #if !defined(CONFIG_USER_ONLY)
1431 tb_invalidate_phys_page_fast(ram_addr, size);
1432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1437 stb_p(qemu_get_ram_ptr(ram_addr), val);
1440 stw_p(qemu_get_ram_ptr(ram_addr), val);
1443 stl_p(qemu_get_ram_ptr(ram_addr), val);
1448 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1449 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1450 /* we remove the notdirty callback only if the code has been
1452 if (dirty_flags == 0xff)
1453 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1456 static const MemoryRegionOps notdirty_mem_ops = {
1457 .read = error_mem_read,
1458 .write = notdirty_mem_write,
1459 .endianness = DEVICE_NATIVE_ENDIAN,
1462 /* Generate a debug exception if a watchpoint has been hit. */
1463 static void check_watchpoint(int offset, int len_mask, int flags)
1465 CPUArchState *env = cpu_single_env;
1466 target_ulong pc, cs_base;
1471 if (env->watchpoint_hit) {
1472 /* We re-entered the check after replacing the TB. Now raise
1473 * the debug interrupt so that is will trigger after the
1474 * current instruction. */
1475 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1478 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1480 if ((vaddr == (wp->vaddr & len_mask) ||
1481 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1482 wp->flags |= BP_WATCHPOINT_HIT;
1483 if (!env->watchpoint_hit) {
1484 env->watchpoint_hit = wp;
1485 tb_check_watchpoint(env);
1486 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1487 env->exception_index = EXCP_DEBUG;
1490 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1491 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1492 cpu_resume_from_signal(env, NULL);
1496 wp->flags &= ~BP_WATCHPOINT_HIT;
1501 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1502 so these check for a hit then pass through to the normal out-of-line
1504 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1507 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1509 case 1: return ldub_phys(addr);
1510 case 2: return lduw_phys(addr);
1511 case 4: return ldl_phys(addr);
1516 static void watch_mem_write(void *opaque, hwaddr addr,
1517 uint64_t val, unsigned size)
1519 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1522 stb_phys(addr, val);
1525 stw_phys(addr, val);
1528 stl_phys(addr, val);
1534 static const MemoryRegionOps watch_mem_ops = {
1535 .read = watch_mem_read,
1536 .write = watch_mem_write,
1537 .endianness = DEVICE_NATIVE_ENDIAN,
1540 static uint64_t subpage_read(void *opaque, hwaddr addr,
1543 subpage_t *mmio = opaque;
1544 unsigned int idx = SUBPAGE_IDX(addr);
1545 MemoryRegionSection *section;
1546 #if defined(DEBUG_SUBPAGE)
1547 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1548 mmio, len, addr, idx);
1551 section = &phys_sections[mmio->sub_section[idx]];
1553 addr -= section->offset_within_address_space;
1554 addr += section->offset_within_region;
1555 return io_mem_read(section->mr, addr, len);
1558 static void subpage_write(void *opaque, hwaddr addr,
1559 uint64_t value, unsigned len)
1561 subpage_t *mmio = opaque;
1562 unsigned int idx = SUBPAGE_IDX(addr);
1563 MemoryRegionSection *section;
1564 #if defined(DEBUG_SUBPAGE)
1565 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1566 " idx %d value %"PRIx64"\n",
1567 __func__, mmio, len, addr, idx, value);
1570 section = &phys_sections[mmio->sub_section[idx]];
1572 addr -= section->offset_within_address_space;
1573 addr += section->offset_within_region;
1574 io_mem_write(section->mr, addr, value, len);
1577 static const MemoryRegionOps subpage_ops = {
1578 .read = subpage_read,
1579 .write = subpage_write,
1580 .endianness = DEVICE_NATIVE_ENDIAN,
1583 static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1586 ram_addr_t raddr = addr;
1587 void *ptr = qemu_get_ram_ptr(raddr);
1589 case 1: return ldub_p(ptr);
1590 case 2: return lduw_p(ptr);
1591 case 4: return ldl_p(ptr);
1596 static void subpage_ram_write(void *opaque, hwaddr addr,
1597 uint64_t value, unsigned size)
1599 ram_addr_t raddr = addr;
1600 void *ptr = qemu_get_ram_ptr(raddr);
1602 case 1: return stb_p(ptr, value);
1603 case 2: return stw_p(ptr, value);
1604 case 4: return stl_p(ptr, value);
1609 static const MemoryRegionOps subpage_ram_ops = {
1610 .read = subpage_ram_read,
1611 .write = subpage_ram_write,
1612 .endianness = DEVICE_NATIVE_ENDIAN,
1615 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1620 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1622 idx = SUBPAGE_IDX(start);
1623 eidx = SUBPAGE_IDX(end);
1624 #if defined(DEBUG_SUBPAGE)
1625 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1626 mmio, start, end, idx, eidx, memory);
1628 if (memory_region_is_ram(phys_sections[section].mr)) {
1629 MemoryRegionSection new_section = phys_sections[section];
1630 new_section.mr = &io_mem_subpage_ram;
1631 section = phys_section_add(&new_section);
1633 for (; idx <= eidx; idx++) {
1634 mmio->sub_section[idx] = section;
1640 static subpage_t *subpage_init(hwaddr base)
1644 mmio = g_malloc0(sizeof(subpage_t));
1647 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1648 "subpage", TARGET_PAGE_SIZE);
1649 mmio->iomem.subpage = true;
1650 #if defined(DEBUG_SUBPAGE)
1651 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1652 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1654 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1659 static uint16_t dummy_section(MemoryRegion *mr)
1661 MemoryRegionSection section = {
1663 .offset_within_address_space = 0,
1664 .offset_within_region = 0,
1668 return phys_section_add(§ion);
1671 MemoryRegion *iotlb_to_region(hwaddr index)
1673 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1676 static void io_mem_init(void)
1678 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
1679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
1684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
1686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
1690 static void mem_begin(MemoryListener *listener)
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1698 static void core_begin(MemoryListener *listener)
1700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
1707 static void tcg_commit(MemoryListener *listener)
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1719 static void core_log_global_start(MemoryListener *listener)
1721 cpu_physical_memory_set_dirty_tracking(1);
1724 static void core_log_global_stop(MemoryListener *listener)
1726 cpu_physical_memory_set_dirty_tracking(0);
1729 static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1737 section->offset_within_address_space, section->size);
1738 ioport_register(&mrio->iorange);
1741 static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1747 static MemoryListener core_memory_listener = {
1748 .begin = core_begin,
1749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
1754 static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
1760 static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1764 void address_space_init_dispatch(AddressSpace *as)
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1776 memory_listener_register(&d->listener, as);
1779 void address_space_destroy_dispatch(AddressSpace *as)
1781 AddressSpaceDispatch *d = as->dispatch;
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1786 as->dispatch = NULL;
1789 static void memory_map_init(void)
1791 system_memory = g_malloc(sizeof(*system_memory));
1792 memory_region_init(system_memory, "system", INT64_MAX);
1793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
1796 system_io = g_malloc(sizeof(*system_io));
1797 memory_region_init(system_io, "io", 65536);
1798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
1801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1809 MemoryRegion *get_system_memory(void)
1811 return system_memory;
1814 MemoryRegion *get_system_io(void)
1819 #endif /* !defined(CONFIG_USER_ONLY) */
1821 /* physical memory access (slow version, mainly for debug) */
1822 #if defined(CONFIG_USER_ONLY)
1823 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1824 uint8_t *buf, int len, int is_write)
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
1839 if (!(flags & PAGE_WRITE))
1841 /* XXX: this code should not depend on lock_user */
1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1845 unlock_user(p, addr, l);
1847 if (!(flags & PAGE_READ))
1849 /* XXX: this code should not depend on lock_user */
1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1853 unlock_user(p, addr, 0);
1864 static void invalidate_and_set_dirty(hwaddr addr,
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1873 xen_modified_memory(addr, length);
1876 void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1877 int len, bool is_write)
1879 AddressSpaceDispatch *d = as->dispatch;
1884 MemoryRegionSection *section;
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1894 if (!memory_region_is_ram(section->mr)) {
1896 addr1 = memory_region_section_addr(section, addr);
1897 /* XXX: could force cpu_single_env to NULL to avoid
1899 if (l >= 4 && ((addr1 & 3) == 0)) {
1900 /* 32 bit write access */
1902 io_mem_write(section->mr, addr1, val, 4);
1904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1905 /* 16 bit write access */
1907 io_mem_write(section->mr, addr1, val, 2);
1910 /* 8 bit write access */
1912 io_mem_write(section->mr, addr1, val, 1);
1915 } else if (!section->readonly) {
1917 addr1 = memory_region_get_ram_addr(section->mr)
1918 + memory_region_section_addr(section, addr);
1920 ptr = qemu_get_ram_ptr(addr1);
1921 memcpy(ptr, buf, l);
1922 invalidate_and_set_dirty(addr1, l);
1923 qemu_put_ram_ptr(ptr);
1926 if (!(memory_region_is_ram(section->mr) ||
1927 memory_region_is_romd(section->mr))) {
1930 addr1 = memory_region_section_addr(section, addr);
1931 if (l >= 4 && ((addr1 & 3) == 0)) {
1932 /* 32 bit read access */
1933 val = io_mem_read(section->mr, addr1, 4);
1936 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1937 /* 16 bit read access */
1938 val = io_mem_read(section->mr, addr1, 2);
1942 /* 8 bit read access */
1943 val = io_mem_read(section->mr, addr1, 1);
1949 ptr = qemu_get_ram_ptr(section->mr->ram_addr
1950 + memory_region_section_addr(section,
1952 memcpy(buf, ptr, l);
1953 qemu_put_ram_ptr(ptr);
1962 void address_space_write(AddressSpace *as, hwaddr addr,
1963 const uint8_t *buf, int len)
1965 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1969 * address_space_read: read from an address space.
1971 * @as: #AddressSpace to be accessed
1972 * @addr: address within that address space
1973 * @buf: buffer with the data transferred
1975 void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1977 address_space_rw(as, addr, buf, len, false);
1981 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1982 int len, int is_write)
1984 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1987 /* used for ROM loading : can write in RAM and ROM */
1988 void cpu_physical_memory_write_rom(hwaddr addr,
1989 const uint8_t *buf, int len)
1991 AddressSpaceDispatch *d = address_space_memory.dispatch;
1995 MemoryRegionSection *section;
1998 page = addr & TARGET_PAGE_MASK;
1999 l = (page + TARGET_PAGE_SIZE) - addr;
2002 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2004 if (!(memory_region_is_ram(section->mr) ||
2005 memory_region_is_romd(section->mr))) {
2008 unsigned long addr1;
2009 addr1 = memory_region_get_ram_addr(section->mr)
2010 + memory_region_section_addr(section, addr);
2012 ptr = qemu_get_ram_ptr(addr1);
2013 memcpy(ptr, buf, l);
2014 invalidate_and_set_dirty(addr1, l);
2015 qemu_put_ram_ptr(ptr);
2029 static BounceBuffer bounce;
2031 typedef struct MapClient {
2033 void (*callback)(void *opaque);
2034 QLIST_ENTRY(MapClient) link;
2037 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
2040 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2042 MapClient *client = g_malloc(sizeof(*client));
2044 client->opaque = opaque;
2045 client->callback = callback;
2046 QLIST_INSERT_HEAD(&map_client_list, client, link);
2050 static void cpu_unregister_map_client(void *_client)
2052 MapClient *client = (MapClient *)_client;
2054 QLIST_REMOVE(client, link);
2058 static void cpu_notify_map_clients(void)
2062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
2064 client->callback(client->opaque);
2065 cpu_unregister_map_client(client);
2069 /* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
2073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
2076 void *address_space_map(AddressSpace *as,
2081 AddressSpaceDispatch *d = as->dispatch;
2086 MemoryRegionSection *section;
2087 ram_addr_t raddr = RAM_ADDR_MAX;
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2099 if (todo || bounce.buffer) {
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2106 address_space_read(as, addr, bounce.buffer, l);
2110 return bounce.buffer;
2113 raddr = memory_region_get_ram_addr(section->mr)
2114 + memory_region_section_addr(section, addr);
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2127 /* Unmaps a memory region previously mapped by address_space_map().
2128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2131 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
2134 if (buffer != bounce.buffer) {
2136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2137 while (access_len) {
2139 l = TARGET_PAGE_SIZE;
2142 invalidate_and_set_dirty(addr1, l);
2147 if (xen_enabled()) {
2148 xen_invalidate_map_cache_entry(buffer);
2153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2155 qemu_vfree(bounce.buffer);
2156 bounce.buffer = NULL;
2157 cpu_notify_map_clients();
2160 void *cpu_physical_memory_map(hwaddr addr,
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2167 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2173 /* warning: addr must be aligned */
2174 static inline uint32_t ldl_phys_internal(hwaddr addr,
2175 enum device_endian endian)
2179 MemoryRegionSection *section;
2181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
2186 addr = memory_region_section_addr(section, addr);
2187 val = io_mem_read(section->mr, addr, 4);
2188 #if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2193 if (endian == DEVICE_BIG_ENDIAN) {
2199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2201 + memory_region_section_addr(section, addr));
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2217 uint32_t ldl_phys(hwaddr addr)
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2222 uint32_t ldl_le_phys(hwaddr addr)
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2227 uint32_t ldl_be_phys(hwaddr addr)
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2232 /* warning: addr must be aligned */
2233 static inline uint64_t ldq_phys_internal(hwaddr addr,
2234 enum device_endian endian)
2238 MemoryRegionSection *section;
2240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
2245 addr = memory_region_section_addr(section, addr);
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
2249 #ifdef TARGET_WORDS_BIGENDIAN
2250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
2253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2260 + memory_region_section_addr(section, addr));
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2276 uint64_t ldq_phys(hwaddr addr)
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2281 uint64_t ldq_le_phys(hwaddr addr)
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2286 uint64_t ldq_be_phys(hwaddr addr)
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2292 uint32_t ldub_phys(hwaddr addr)
2295 cpu_physical_memory_read(addr, &val, 1);
2299 /* warning: addr must be aligned */
2300 static inline uint32_t lduw_phys_internal(hwaddr addr,
2301 enum device_endian endian)
2305 MemoryRegionSection *section;
2307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
2312 addr = memory_region_section_addr(section, addr);
2313 val = io_mem_read(section->mr, addr, 2);
2314 #if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2319 if (endian == DEVICE_BIG_ENDIAN) {
2325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2327 + memory_region_section_addr(section, addr));
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2343 uint32_t lduw_phys(hwaddr addr)
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2348 uint32_t lduw_le_phys(hwaddr addr)
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2353 uint32_t lduw_be_phys(hwaddr addr)
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2358 /* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
2361 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2364 MemoryRegionSection *section;
2366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2368 if (!memory_region_is_ram(section->mr) || section->readonly) {
2369 addr = memory_region_section_addr(section, addr);
2370 if (memory_region_is_ram(section->mr)) {
2371 section = &phys_sections[phys_section_rom];
2373 io_mem_write(section->mr, addr, val, 4);
2375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2377 + memory_region_section_addr(section, addr);
2378 ptr = qemu_get_ram_ptr(addr1);
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
2393 void stq_phys_notdirty(hwaddr addr, uint64_t val)
2396 MemoryRegionSection *section;
2398 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2400 if (!memory_region_is_ram(section->mr) || section->readonly) {
2401 addr = memory_region_section_addr(section, addr);
2402 if (memory_region_is_ram(section->mr)) {
2403 section = &phys_sections[phys_section_rom];
2405 #ifdef TARGET_WORDS_BIGENDIAN
2406 io_mem_write(section->mr, addr, val >> 32, 4);
2407 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
2409 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2410 io_mem_write(section->mr, addr + 4, val >> 32, 4);
2413 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2415 + memory_region_section_addr(section, addr));
2420 /* warning: addr must be aligned */
2421 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2422 enum device_endian endian)
2425 MemoryRegionSection *section;
2427 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2429 if (!memory_region_is_ram(section->mr) || section->readonly) {
2430 addr = memory_region_section_addr(section, addr);
2431 if (memory_region_is_ram(section->mr)) {
2432 section = &phys_sections[phys_section_rom];
2434 #if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian == DEVICE_LITTLE_ENDIAN) {
2439 if (endian == DEVICE_BIG_ENDIAN) {
2443 io_mem_write(section->mr, addr, val, 4);
2445 unsigned long addr1;
2446 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2447 + memory_region_section_addr(section, addr);
2449 ptr = qemu_get_ram_ptr(addr1);
2451 case DEVICE_LITTLE_ENDIAN:
2454 case DEVICE_BIG_ENDIAN:
2461 invalidate_and_set_dirty(addr1, 4);
2465 void stl_phys(hwaddr addr, uint32_t val)
2467 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2470 void stl_le_phys(hwaddr addr, uint32_t val)
2472 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2475 void stl_be_phys(hwaddr addr, uint32_t val)
2477 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2481 void stb_phys(hwaddr addr, uint32_t val)
2484 cpu_physical_memory_write(addr, &v, 1);
2487 /* warning: addr must be aligned */
2488 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2489 enum device_endian endian)
2492 MemoryRegionSection *section;
2494 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2496 if (!memory_region_is_ram(section->mr) || section->readonly) {
2497 addr = memory_region_section_addr(section, addr);
2498 if (memory_region_is_ram(section->mr)) {
2499 section = &phys_sections[phys_section_rom];
2501 #if defined(TARGET_WORDS_BIGENDIAN)
2502 if (endian == DEVICE_LITTLE_ENDIAN) {
2506 if (endian == DEVICE_BIG_ENDIAN) {
2510 io_mem_write(section->mr, addr, val, 2);
2512 unsigned long addr1;
2513 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2514 + memory_region_section_addr(section, addr);
2516 ptr = qemu_get_ram_ptr(addr1);
2518 case DEVICE_LITTLE_ENDIAN:
2521 case DEVICE_BIG_ENDIAN:
2528 invalidate_and_set_dirty(addr1, 2);
2532 void stw_phys(hwaddr addr, uint32_t val)
2534 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2537 void stw_le_phys(hwaddr addr, uint32_t val)
2539 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2542 void stw_be_phys(hwaddr addr, uint32_t val)
2544 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2548 void stq_phys(hwaddr addr, uint64_t val)
2551 cpu_physical_memory_write(addr, &val, 8);
2554 void stq_le_phys(hwaddr addr, uint64_t val)
2556 val = cpu_to_le64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2560 void stq_be_phys(hwaddr addr, uint64_t val)
2562 val = cpu_to_be64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2566 /* virtual memory access for debug (includes writing to ROM) */
2567 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2568 uint8_t *buf, int len, int is_write)
2575 page = addr & TARGET_PAGE_MASK;
2576 phys_addr = cpu_get_phys_page_debug(env, page);
2577 /* if no physical page mapped, return an error */
2578 if (phys_addr == -1)
2580 l = (page + TARGET_PAGE_SIZE) - addr;
2583 phys_addr += (addr & ~TARGET_PAGE_MASK);
2585 cpu_physical_memory_write_rom(phys_addr, buf, l);
2587 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2596 #if !defined(CONFIG_USER_ONLY)
2599 * A helper function for the _utterly broken_ virtio device model to find out if
2600 * it's running on a big endian machine. Don't do this at home kids!
2602 bool virtio_is_big_endian(void);
2603 bool virtio_is_big_endian(void)
2605 #if defined(TARGET_WORDS_BIGENDIAN)
2614 #ifndef CONFIG_USER_ONLY
2615 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2617 MemoryRegionSection *section;
2619 section = phys_page_find(address_space_memory.dispatch,
2620 phys_addr >> TARGET_PAGE_BITS);
2622 return !(memory_region_is_ram(section->mr) ||
2623 memory_region_is_romd(section->mr));