4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/hax.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
53 #include "exec/ram_addr.h"
55 #include "qemu/range.h"
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 static bool in_migration;
62 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
64 static MemoryRegion *system_memory;
65 static MemoryRegion *system_io;
67 AddressSpace address_space_io;
68 AddressSpace address_space_memory;
70 MemoryRegion io_mem_rom, io_mem_notdirty;
71 static MemoryRegion io_mem_unassigned;
73 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
74 #define RAM_PREALLOC (1 << 0)
76 /* RAM is mmap-ed with MAP_SHARED */
77 #define RAM_SHARED (1 << 1)
81 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
82 /* current CPU in the current thread. It is only valid inside
84 DEFINE_TLS(CPUState *, current_cpu);
85 /* 0 = Do not count executed instructions.
86 1 = Precise instruction counting.
87 2 = Adaptive rate instruction counting. */
90 #if !defined(CONFIG_USER_ONLY)
92 typedef struct PhysPageEntry PhysPageEntry;
94 struct PhysPageEntry {
95 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
97 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
101 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
103 /* Size of the L2 (and L3, etc) page tables. */
104 #define ADDR_SPACE_BITS 64
107 #define P_L2_SIZE (1 << P_L2_BITS)
109 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
111 typedef PhysPageEntry Node[P_L2_SIZE];
113 typedef struct PhysPageMap {
114 unsigned sections_nb;
115 unsigned sections_nb_alloc;
117 unsigned nodes_nb_alloc;
119 MemoryRegionSection *sections;
122 struct AddressSpaceDispatch {
123 /* This is a multi-level map on the physical address space.
124 * The bottom level has pointers to MemoryRegionSections.
126 PhysPageEntry phys_map;
131 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
132 typedef struct subpage_t {
136 uint16_t sub_section[TARGET_PAGE_SIZE];
139 #define PHYS_SECTION_UNASSIGNED 0
140 #define PHYS_SECTION_NOTDIRTY 1
141 #define PHYS_SECTION_ROM 2
142 #define PHYS_SECTION_WATCH 3
144 static void io_mem_init(void);
145 static void memory_map_init(void);
146 static void tcg_commit(MemoryListener *listener);
148 static MemoryRegion io_mem_watch;
151 #if !defined(CONFIG_USER_ONLY)
153 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
155 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
157 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
158 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
162 static uint32_t phys_map_node_alloc(PhysPageMap *map)
167 ret = map->nodes_nb++;
168 assert(ret != PHYS_MAP_NODE_NIL);
169 assert(ret != map->nodes_nb_alloc);
170 for (i = 0; i < P_L2_SIZE; ++i) {
171 map->nodes[ret][i].skip = 1;
172 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
177 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
178 hwaddr *index, hwaddr *nb, uint16_t leaf,
183 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
185 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
186 lp->ptr = phys_map_node_alloc(map);
187 p = map->nodes[lp->ptr];
189 for (i = 0; i < P_L2_SIZE; i++) {
191 p[i].ptr = PHYS_SECTION_UNASSIGNED;
195 p = map->nodes[lp->ptr];
197 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
199 while (*nb && lp < &p[P_L2_SIZE]) {
200 if ((*index & (step - 1)) == 0 && *nb >= step) {
206 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
212 static void phys_page_set(AddressSpaceDispatch *d,
213 hwaddr index, hwaddr nb,
216 /* Wildly overreserve - it doesn't matter much. */
217 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
219 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
222 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
223 * and update our entry so we can skip it and go directly to the destination.
225 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
227 unsigned valid_ptr = P_L2_SIZE;
232 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 for (i = 0; i < P_L2_SIZE; i++) {
238 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
245 phys_page_compact(&p[i], nodes, compacted);
249 /* We can only compress if there's only one child. */
254 assert(valid_ptr < P_L2_SIZE);
256 /* Don't compress if it won't fit in the # of bits we have. */
257 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
261 lp->ptr = p[valid_ptr].ptr;
262 if (!p[valid_ptr].skip) {
263 /* If our only child is a leaf, make this a leaf. */
264 /* By design, we should have made this node a leaf to begin with so we
265 * should never reach here.
266 * But since it's so simple to handle this, let's do it just in case we
271 lp->skip += p[valid_ptr].skip;
275 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
277 DECLARE_BITMAP(compacted, nodes_nb);
279 if (d->phys_map.skip) {
280 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
284 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
285 Node *nodes, MemoryRegionSection *sections)
288 hwaddr index = addr >> TARGET_PAGE_BITS;
291 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
292 if (lp.ptr == PHYS_MAP_NODE_NIL) {
293 return §ions[PHYS_SECTION_UNASSIGNED];
296 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
299 if (sections[lp.ptr].size.hi ||
300 range_covers_byte(sections[lp.ptr].offset_within_address_space,
301 sections[lp.ptr].size.lo, addr)) {
302 return §ions[lp.ptr];
304 return §ions[PHYS_SECTION_UNASSIGNED];
308 bool memory_region_is_unassigned(MemoryRegion *mr)
310 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
311 && mr != &io_mem_watch;
314 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
316 bool resolve_subpage)
318 MemoryRegionSection *section;
321 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
322 if (resolve_subpage && section->mr->subpage) {
323 subpage = container_of(section->mr, subpage_t, iomem);
324 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
329 static MemoryRegionSection *
330 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
331 hwaddr *plen, bool resolve_subpage)
333 MemoryRegionSection *section;
336 section = address_space_lookup_region(d, addr, resolve_subpage);
337 /* Compute offset within MemoryRegionSection */
338 addr -= section->offset_within_address_space;
340 /* Compute offset within MemoryRegion */
341 *xlat = addr + section->offset_within_region;
343 diff = int128_sub(section->mr->size, int128_make64(addr));
344 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
348 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
350 if (memory_region_is_ram(mr)) {
351 return !(is_write && mr->readonly);
353 if (memory_region_is_romd(mr)) {
360 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
361 hwaddr *xlat, hwaddr *plen,
365 MemoryRegionSection *section;
370 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
373 if (!mr->iommu_ops) {
377 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
378 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
379 | (addr & iotlb.addr_mask));
380 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
381 if (!(iotlb.perm & (1 << is_write))) {
382 mr = &io_mem_unassigned;
386 as = iotlb.target_as;
389 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
390 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
391 len = MIN(page, len);
399 MemoryRegionSection *
400 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
403 MemoryRegionSection *section;
404 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
406 assert(!section->mr->iommu_ops);
411 void cpu_exec_init_all(void)
413 #if !defined(CONFIG_USER_ONLY)
414 qemu_mutex_init(&ram_list.mutex);
420 #if !defined(CONFIG_USER_ONLY)
422 static int cpu_common_post_load(void *opaque, int version_id)
424 CPUState *cpu = opaque;
426 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
427 version_id is increased. */
428 cpu->interrupt_request &= ~0x01;
434 static int cpu_common_pre_load(void *opaque)
436 CPUState *cpu = opaque;
438 cpu->exception_index = 0;
443 static bool cpu_common_exception_index_needed(void *opaque)
445 CPUState *cpu = opaque;
447 return cpu->exception_index != 0;
450 static const VMStateDescription vmstate_cpu_common_exception_index = {
451 .name = "cpu_common/exception_index",
453 .minimum_version_id = 1,
454 .fields = (VMStateField[]) {
455 VMSTATE_INT32(exception_index, CPUState),
456 VMSTATE_END_OF_LIST()
460 const VMStateDescription vmstate_cpu_common = {
461 .name = "cpu_common",
463 .minimum_version_id = 1,
464 .pre_load = cpu_common_pre_load,
465 .post_load = cpu_common_post_load,
466 .fields = (VMStateField[]) {
467 VMSTATE_UINT32(halted, CPUState),
468 VMSTATE_UINT32(interrupt_request, CPUState),
469 VMSTATE_END_OF_LIST()
471 .subsections = (VMStateSubsection[]) {
473 .vmsd = &vmstate_cpu_common_exception_index,
474 .needed = cpu_common_exception_index_needed,
483 CPUState *qemu_get_cpu(int index)
488 if (cpu->cpu_index == index) {
496 #if !defined(CONFIG_USER_ONLY)
497 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
499 /* We only support one address space per cpu at the moment. */
500 assert(cpu->as == as);
502 if (cpu->tcg_as_listener) {
503 memory_listener_unregister(cpu->tcg_as_listener);
505 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
507 cpu->tcg_as_listener->commit = tcg_commit;
508 memory_listener_register(cpu->tcg_as_listener, as);
512 void cpu_exec_init(CPUArchState *env)
514 CPUState *cpu = ENV_GET_CPU(env);
515 CPUClass *cc = CPU_GET_CLASS(cpu);
519 #if defined(CONFIG_USER_ONLY)
523 CPU_FOREACH(some_cpu) {
526 cpu->cpu_index = cpu_index;
528 QTAILQ_INIT(&cpu->breakpoints);
529 QTAILQ_INIT(&cpu->watchpoints);
530 #ifndef CONFIG_USER_ONLY
531 cpu->as = &address_space_memory;
532 cpu->thread_id = qemu_get_thread_id();
534 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
535 #if defined(CONFIG_USER_ONLY)
538 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
539 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
541 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
542 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
543 cpu_save, cpu_load, env);
544 assert(cc->vmsd == NULL);
545 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
547 if (cc->vmsd != NULL) {
548 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
552 #if defined(TARGET_HAS_ICE)
553 #if defined(CONFIG_USER_ONLY)
554 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
556 tb_invalidate_phys_page_range(pc, pc + 1, 0);
559 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
561 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
563 tb_invalidate_phys_addr(cpu->as,
564 phys | (pc & ~TARGET_PAGE_MASK));
568 #endif /* TARGET_HAS_ICE */
570 #if defined(CONFIG_USER_ONLY)
571 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
576 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
582 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
586 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
587 int flags, CPUWatchpoint **watchpoint)
592 /* Add a watchpoint. */
593 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
594 int flags, CPUWatchpoint **watchpoint)
598 /* forbid ranges which are empty or run off the end of the address space */
599 if (len == 0 || (addr + len - 1) < addr) {
600 error_report("tried to set invalid watchpoint at %"
601 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
604 wp = g_malloc(sizeof(*wp));
610 /* keep all GDB-injected watchpoints in front */
611 if (flags & BP_GDB) {
612 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
614 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 tlb_flush_page(cpu, addr);
624 /* Remove a specific watchpoint. */
625 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
630 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
631 if (addr == wp->vaddr && len == wp->len
632 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
633 cpu_watchpoint_remove_by_ref(cpu, wp);
640 /* Remove a specific watchpoint by reference. */
641 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
643 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
645 tlb_flush_page(cpu, watchpoint->vaddr);
650 /* Remove all matching watchpoints. */
651 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
653 CPUWatchpoint *wp, *next;
655 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
656 if (wp->flags & mask) {
657 cpu_watchpoint_remove_by_ref(cpu, wp);
662 /* Return true if this watchpoint address matches the specified
663 * access (ie the address range covered by the watchpoint overlaps
664 * partially or completely with the address range covered by the
667 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
671 /* We know the lengths are non-zero, but a little caution is
672 * required to avoid errors in the case where the range ends
673 * exactly at the top of the address space and so addr + len
674 * wraps round to zero.
676 vaddr wpend = wp->vaddr + wp->len - 1;
677 vaddr addrend = addr + len - 1;
679 return !(addr > wpend || wp->vaddr > addrend);
684 /* Add a breakpoint. */
685 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
686 CPUBreakpoint **breakpoint)
688 #if defined(TARGET_HAS_ICE)
691 bp = g_malloc(sizeof(*bp));
696 /* keep all GDB-injected breakpoints in front */
697 if (flags & BP_GDB) {
698 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
700 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
703 breakpoint_invalidate(cpu, pc);
714 /* Remove a specific breakpoint. */
715 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
717 #if defined(TARGET_HAS_ICE)
720 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
721 if (bp->pc == pc && bp->flags == flags) {
722 cpu_breakpoint_remove_by_ref(cpu, bp);
732 /* Remove a specific breakpoint by reference. */
733 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
735 #if defined(TARGET_HAS_ICE)
736 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
738 breakpoint_invalidate(cpu, breakpoint->pc);
744 /* Remove all matching breakpoints. */
745 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
747 #if defined(TARGET_HAS_ICE)
748 CPUBreakpoint *bp, *next;
750 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
751 if (bp->flags & mask) {
752 cpu_breakpoint_remove_by_ref(cpu, bp);
758 /* enable or disable single step mode. EXCP_DEBUG is returned by the
759 CPU loop after each instruction */
760 void cpu_single_step(CPUState *cpu, int enabled)
762 #if defined(TARGET_HAS_ICE)
763 if (cpu->singlestep_enabled != enabled) {
764 cpu->singlestep_enabled = enabled;
766 kvm_update_guest_debug(cpu, 0);
768 /* must flush all the translated code to avoid inconsistencies */
769 /* XXX: only flush what is necessary */
770 CPUArchState *env = cpu->env_ptr;
777 void cpu_abort(CPUState *cpu, const char *fmt, ...)
784 fprintf(stderr, "qemu: fatal: ");
785 vfprintf(stderr, fmt, ap);
786 fprintf(stderr, "\n");
787 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
788 if (qemu_log_enabled()) {
789 qemu_log("qemu: fatal: ");
790 qemu_log_vprintf(fmt, ap2);
792 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
798 #if defined(CONFIG_USER_ONLY)
800 struct sigaction act;
801 sigfillset(&act.sa_mask);
802 act.sa_handler = SIG_DFL;
803 sigaction(SIGABRT, &act, NULL);
809 #if !defined(CONFIG_USER_ONLY)
810 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
814 /* The list is protected by the iothread lock here. */
815 block = ram_list.mru_block;
816 if (block && addr - block->offset < block->length) {
819 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
820 if (addr - block->offset < block->length) {
825 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
829 ram_list.mru_block = block;
833 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
839 end = TARGET_PAGE_ALIGN(start + length);
840 start &= TARGET_PAGE_MASK;
842 block = qemu_get_ram_block(start);
843 assert(block == qemu_get_ram_block(end - 1));
844 start1 = (uintptr_t)block->host + (start - block->offset);
845 cpu_tlb_reset_dirty_all(start1, length);
848 /* Note: start and end must be within the same ram block. */
849 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
854 cpu_physical_memory_clear_dirty_range(start, length, client);
857 tlb_reset_dirty_range_all(start, length);
861 static void cpu_physical_memory_set_dirty_tracking(bool enable)
863 in_migration = enable;
866 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
867 MemoryRegionSection *section,
869 hwaddr paddr, hwaddr xlat,
871 target_ulong *address)
876 if (memory_region_is_ram(section->mr)) {
878 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
880 if (!section->readonly) {
881 iotlb |= PHYS_SECTION_NOTDIRTY;
883 iotlb |= PHYS_SECTION_ROM;
886 iotlb = section - section->address_space->dispatch->map.sections;
890 /* Make accesses to pages with watchpoints go via the
891 watchpoint trap routines. */
892 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
893 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
894 /* Avoid trapping reads of pages with a write breakpoint. */
895 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
896 iotlb = PHYS_SECTION_WATCH + paddr;
897 *address |= TLB_MMIO;
905 #endif /* defined(CONFIG_USER_ONLY) */
907 #if !defined(CONFIG_USER_ONLY)
909 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
911 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
913 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
917 * Set a custom physical guest memory alloator.
918 * Accelerators with unusual needs may need this. Hopefully, we can
919 * get rid of it eventually.
921 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
923 phys_mem_alloc = alloc;
926 static uint16_t phys_section_add(PhysPageMap *map,
927 MemoryRegionSection *section)
929 /* The physical section number is ORed with a page-aligned
930 * pointer to produce the iotlb entries. Thus it should
931 * never overflow into the page-aligned value.
933 assert(map->sections_nb < TARGET_PAGE_SIZE);
935 if (map->sections_nb == map->sections_nb_alloc) {
936 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
937 map->sections = g_renew(MemoryRegionSection, map->sections,
938 map->sections_nb_alloc);
940 map->sections[map->sections_nb] = *section;
941 memory_region_ref(section->mr);
942 return map->sections_nb++;
945 static void phys_section_destroy(MemoryRegion *mr)
947 memory_region_unref(mr);
950 subpage_t *subpage = container_of(mr, subpage_t, iomem);
951 object_unref(OBJECT(&subpage->iomem));
956 static void phys_sections_free(PhysPageMap *map)
958 while (map->sections_nb > 0) {
959 MemoryRegionSection *section = &map->sections[--map->sections_nb];
960 phys_section_destroy(section->mr);
962 g_free(map->sections);
966 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
969 hwaddr base = section->offset_within_address_space
971 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
972 d->map.nodes, d->map.sections);
973 MemoryRegionSection subsection = {
974 .offset_within_address_space = base,
975 .size = int128_make64(TARGET_PAGE_SIZE),
979 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
981 if (!(existing->mr->subpage)) {
982 subpage = subpage_init(d->as, base);
983 subsection.address_space = d->as;
984 subsection.mr = &subpage->iomem;
985 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
986 phys_section_add(&d->map, &subsection));
988 subpage = container_of(existing->mr, subpage_t, iomem);
990 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
991 end = start + int128_get64(section->size) - 1;
992 subpage_register(subpage, start, end,
993 phys_section_add(&d->map, section));
997 static void register_multipage(AddressSpaceDispatch *d,
998 MemoryRegionSection *section)
1000 hwaddr start_addr = section->offset_within_address_space;
1001 uint16_t section_index = phys_section_add(&d->map, section);
1002 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1006 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1009 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1011 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1012 AddressSpaceDispatch *d = as->next_dispatch;
1013 MemoryRegionSection now = *section, remain = *section;
1014 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1016 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1017 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1018 - now.offset_within_address_space;
1020 now.size = int128_min(int128_make64(left), now.size);
1021 register_subpage(d, &now);
1023 now.size = int128_zero();
1025 while (int128_ne(remain.size, now.size)) {
1026 remain.size = int128_sub(remain.size, now.size);
1027 remain.offset_within_address_space += int128_get64(now.size);
1028 remain.offset_within_region += int128_get64(now.size);
1030 if (int128_lt(remain.size, page_size)) {
1031 register_subpage(d, &now);
1032 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1033 now.size = page_size;
1034 register_subpage(d, &now);
1036 now.size = int128_and(now.size, int128_neg(page_size));
1037 register_multipage(d, &now);
1042 void qemu_flush_coalesced_mmio_buffer(void)
1045 kvm_flush_coalesced_mmio_buffer();
1048 void qemu_mutex_lock_ramlist(void)
1050 qemu_mutex_lock(&ram_list.mutex);
1053 void qemu_mutex_unlock_ramlist(void)
1055 qemu_mutex_unlock(&ram_list.mutex);
1060 #include <sys/vfs.h>
1062 #define HUGETLBFS_MAGIC 0x958458f6
1064 static long gethugepagesize(const char *path, Error **errp)
1070 ret = statfs(path, &fs);
1071 } while (ret != 0 && errno == EINTR);
1074 error_setg_errno(errp, errno, "failed to get page size of file %s",
1079 if (fs.f_type != HUGETLBFS_MAGIC)
1080 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1085 static void *file_ram_alloc(RAMBlock *block,
1091 char *sanitized_name;
1096 Error *local_err = NULL;
1098 hpagesize = gethugepagesize(path, &local_err);
1100 error_propagate(errp, local_err);
1103 block->mr->align = hpagesize;
1105 if (memory < hpagesize) {
1106 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1107 "or larger than huge page size 0x%" PRIx64,
1112 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1114 "host lacks kvm mmu notifiers, -mem-path unsupported");
1118 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1119 sanitized_name = g_strdup(memory_region_name(block->mr));
1120 for (c = sanitized_name; *c != '\0'; c++) {
1125 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1127 g_free(sanitized_name);
1129 fd = mkstemp(filename);
1131 error_setg_errno(errp, errno,
1132 "unable to create backing store for hugepages");
1139 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1142 * ftruncate is not supported by hugetlbfs in older
1143 * hosts, so don't bother bailing out on errors.
1144 * If anything goes wrong with it under other filesystems,
1147 if (ftruncate(fd, memory)) {
1148 perror("ftruncate");
1151 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1152 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1154 if (area == MAP_FAILED) {
1155 error_setg_errno(errp, errno,
1156 "unable to map backing store for hugepages");
1162 os_mem_prealloc(fd, area, memory);
1170 error_report("%s\n", error_get_pretty(*errp));
1177 static ram_addr_t find_ram_offset(ram_addr_t size)
1179 RAMBlock *block, *next_block;
1180 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1182 assert(size != 0); /* it would hand out same offset multiple times */
1184 if (QTAILQ_EMPTY(&ram_list.blocks))
1187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1188 ram_addr_t end, next = RAM_ADDR_MAX;
1190 end = block->offset + block->length;
1192 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1193 if (next_block->offset >= end) {
1194 next = MIN(next, next_block->offset);
1197 if (next - end >= size && next - end < mingap) {
1199 mingap = next - end;
1203 if (offset == RAM_ADDR_MAX) {
1204 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1212 ram_addr_t last_ram_offset(void)
1215 ram_addr_t last = 0;
1217 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1218 last = MAX(last, block->offset + block->length);
1223 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1227 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1228 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1229 "dump-guest-core", true)) {
1230 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1232 perror("qemu_madvise");
1233 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1234 "but dump_guest_core=off specified\n");
1239 static RAMBlock *find_ram_block(ram_addr_t addr)
1243 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1244 if (block->offset == addr) {
1252 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1254 RAMBlock *new_block = find_ram_block(addr);
1258 assert(!new_block->idstr[0]);
1261 char *id = qdev_get_dev_path(dev);
1263 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1267 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1269 /* This assumes the iothread lock is taken here too. */
1270 qemu_mutex_lock_ramlist();
1271 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1272 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1273 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1278 qemu_mutex_unlock_ramlist();
1281 void qemu_ram_unset_idstr(ram_addr_t addr)
1283 RAMBlock *block = find_ram_block(addr);
1286 memset(block->idstr, 0, sizeof(block->idstr));
1290 static int memory_try_enable_merging(void *addr, size_t len)
1292 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1293 /* disabled by the user */
1297 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1300 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1303 ram_addr_t old_ram_size, new_ram_size;
1305 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1307 /* This assumes the iothread lock is taken here too. */
1308 qemu_mutex_lock_ramlist();
1309 new_block->offset = find_ram_offset(new_block->length);
1311 if (!new_block->host) {
1312 if (xen_enabled()) {
1313 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1315 new_block->host = phys_mem_alloc(new_block->length,
1316 &new_block->mr->align);
1317 if (!new_block->host) {
1318 error_setg_errno(errp, errno,
1319 "cannot set up guest memory '%s'",
1320 memory_region_name(new_block->mr));
1321 qemu_mutex_unlock_ramlist();
1324 memory_try_enable_merging(new_block->host, new_block->length);
1328 /* Keep the list sorted from biggest to smallest block. */
1329 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1330 if (block->length < new_block->length) {
1335 QTAILQ_INSERT_BEFORE(block, new_block, next);
1337 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1339 ram_list.mru_block = NULL;
1342 qemu_mutex_unlock_ramlist();
1344 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1346 if (new_ram_size > old_ram_size) {
1348 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1349 ram_list.dirty_memory[i] =
1350 bitmap_zero_extend(ram_list.dirty_memory[i],
1351 old_ram_size, new_ram_size);
1354 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
1356 qemu_ram_setup_dump(new_block->host, new_block->length);
1357 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1358 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
1360 if (kvm_enabled()) {
1361 kvm_setup_guest_memory(new_block->host, new_block->length);
1365 * In Hax, the qemu allocate the virtual address, and HAX kernel
1366 * populate the memory with physical memory. Currently we have no
1367 * paging, so user should make sure enough free memory in advance
1369 if (hax_enabled()) {
1370 int ret = hax_populate_ram((uint64_t)(uintptr_t)new_block->host,
1373 fprintf(stderr, "HAX failed to populate ram\n");
1379 return new_block->offset;
1383 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1384 bool share, const char *mem_path,
1387 RAMBlock *new_block;
1389 Error *local_err = NULL;
1391 if (xen_enabled()) {
1392 error_setg(errp, "-mem-path not supported with Xen");
1396 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1398 * file_ram_alloc() needs to allocate just like
1399 * phys_mem_alloc, but we haven't bothered to provide
1403 "-mem-path not supported with this accelerator");
1407 size = TARGET_PAGE_ALIGN(size);
1408 new_block = g_malloc0(sizeof(*new_block));
1410 new_block->length = size;
1411 new_block->flags = share ? RAM_SHARED : 0;
1412 new_block->host = file_ram_alloc(new_block, size,
1414 if (!new_block->host) {
1419 addr = ram_block_add(new_block, &local_err);
1422 error_propagate(errp, local_err);
1429 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1430 MemoryRegion *mr, Error **errp)
1432 RAMBlock *new_block;
1434 Error *local_err = NULL;
1436 size = TARGET_PAGE_ALIGN(size);
1437 new_block = g_malloc0(sizeof(*new_block));
1439 new_block->length = size;
1441 new_block->host = host;
1443 new_block->flags |= RAM_PREALLOC;
1445 addr = ram_block_add(new_block, &local_err);
1448 error_propagate(errp, local_err);
1454 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1456 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
1459 void qemu_ram_free_from_ptr(ram_addr_t addr)
1463 /* This assumes the iothread lock is taken here too. */
1464 qemu_mutex_lock_ramlist();
1465 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1466 if (addr == block->offset) {
1467 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1468 ram_list.mru_block = NULL;
1474 qemu_mutex_unlock_ramlist();
1477 void qemu_ram_free(ram_addr_t addr)
1481 /* This assumes the iothread lock is taken here too. */
1482 qemu_mutex_lock_ramlist();
1483 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1484 if (addr == block->offset) {
1485 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1486 ram_list.mru_block = NULL;
1488 if (block->flags & RAM_PREALLOC) {
1490 } else if (xen_enabled()) {
1491 xen_invalidate_map_cache_entry(block->host);
1493 } else if (block->fd >= 0) {
1494 munmap(block->host, block->length);
1498 qemu_anon_ram_free(block->host, block->length);
1504 qemu_mutex_unlock_ramlist();
1509 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1516 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1517 offset = addr - block->offset;
1518 if (offset < block->length) {
1519 vaddr = block->host + offset;
1520 if (block->flags & RAM_PREALLOC) {
1522 } else if (xen_enabled()) {
1526 munmap(vaddr, length);
1527 if (block->fd >= 0) {
1528 flags |= (block->flags & RAM_SHARED ?
1529 MAP_SHARED : MAP_PRIVATE);
1530 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1531 flags, block->fd, offset);
1534 * Remap needs to match alloc. Accelerators that
1535 * set phys_mem_alloc never remap. If they did,
1536 * we'd need a remap hook here.
1538 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1540 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1541 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1544 if (area != vaddr) {
1545 fprintf(stderr, "Could not remap addr: "
1546 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1550 memory_try_enable_merging(vaddr, length);
1551 qemu_ram_setup_dump(vaddr, length);
1557 #endif /* !_WIN32 */
1559 int qemu_get_ram_fd(ram_addr_t addr)
1561 RAMBlock *block = qemu_get_ram_block(addr);
1566 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1568 RAMBlock *block = qemu_get_ram_block(addr);
1573 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1574 With the exception of the softmmu code in this file, this should
1575 only be used for local memory (e.g. video ram) that the device owns,
1576 and knows it isn't going to access beyond the end of the block.
1578 It should not be used for general purpose DMA.
1579 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1581 void *qemu_get_ram_ptr(ram_addr_t addr)
1583 RAMBlock *block = qemu_get_ram_block(addr);
1585 if (xen_enabled()) {
1586 /* We need to check if the requested address is in the RAM
1587 * because we don't want to map the entire memory in QEMU.
1588 * In that case just map until the end of the page.
1590 if (block->offset == 0) {
1591 return xen_map_cache(addr, 0, 0);
1592 } else if (block->host == NULL) {
1594 xen_map_cache(block->offset, block->length, 1);
1597 return block->host + (addr - block->offset);
1600 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1601 * but takes a size argument */
1602 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1607 if (xen_enabled()) {
1608 return xen_map_cache(addr, *size, 1);
1612 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1613 if (addr - block->offset < block->length) {
1614 if (addr - block->offset + *size > block->length)
1615 *size = block->length - addr + block->offset;
1616 return block->host + (addr - block->offset);
1620 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1625 /* Some of the softmmu routines need to translate from a host pointer
1626 (typically a TLB entry) back to a ram offset. */
1627 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1630 uint8_t *host = ptr;
1632 if (xen_enabled()) {
1633 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1634 return qemu_get_ram_block(*ram_addr)->mr;
1637 block = ram_list.mru_block;
1638 if (block && block->host && host - block->host < block->length) {
1642 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1643 /* This case append when the block is not mapped. */
1644 if (block->host == NULL) {
1647 if (host - block->host < block->length) {
1655 *ram_addr = block->offset + (host - block->host);
1659 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1660 uint64_t val, unsigned size)
1662 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1663 tb_invalidate_phys_page_fast(ram_addr, size);
1667 stb_p(qemu_get_ram_ptr(ram_addr), val);
1670 stw_p(qemu_get_ram_ptr(ram_addr), val);
1673 stl_p(qemu_get_ram_ptr(ram_addr), val);
1678 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1679 /* we remove the notdirty callback only if the code has been
1681 if (!cpu_physical_memory_is_clean(ram_addr)) {
1682 CPUArchState *env = current_cpu->env_ptr;
1683 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1687 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1688 unsigned size, bool is_write)
1693 static const MemoryRegionOps notdirty_mem_ops = {
1694 .write = notdirty_mem_write,
1695 .valid.accepts = notdirty_mem_accepts,
1696 .endianness = DEVICE_NATIVE_ENDIAN,
1699 /* Generate a debug exception if a watchpoint has been hit. */
1700 static void check_watchpoint(int offset, int len, int flags)
1702 CPUState *cpu = current_cpu;
1703 CPUArchState *env = cpu->env_ptr;
1704 target_ulong pc, cs_base;
1709 if (cpu->watchpoint_hit) {
1710 /* We re-entered the check after replacing the TB. Now raise
1711 * the debug interrupt so that is will trigger after the
1712 * current instruction. */
1713 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1716 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1717 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1718 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1719 && (wp->flags & flags)) {
1720 if (flags == BP_MEM_READ) {
1721 wp->flags |= BP_WATCHPOINT_HIT_READ;
1723 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1725 wp->hitaddr = vaddr;
1726 if (!cpu->watchpoint_hit) {
1727 cpu->watchpoint_hit = wp;
1728 tb_check_watchpoint(cpu);
1729 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1730 cpu->exception_index = EXCP_DEBUG;
1733 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1734 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1735 cpu_resume_from_signal(cpu, NULL);
1739 wp->flags &= ~BP_WATCHPOINT_HIT;
1744 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1745 so these check for a hit then pass through to the normal out-of-line
1747 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1750 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1752 case 1: return ldub_phys(&address_space_memory, addr);
1753 case 2: return lduw_phys(&address_space_memory, addr);
1754 case 4: return ldl_phys(&address_space_memory, addr);
1759 static void watch_mem_write(void *opaque, hwaddr addr,
1760 uint64_t val, unsigned size)
1762 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1765 stb_phys(&address_space_memory, addr, val);
1768 stw_phys(&address_space_memory, addr, val);
1771 stl_phys(&address_space_memory, addr, val);
1777 static const MemoryRegionOps watch_mem_ops = {
1778 .read = watch_mem_read,
1779 .write = watch_mem_write,
1780 .endianness = DEVICE_NATIVE_ENDIAN,
1783 static uint64_t subpage_read(void *opaque, hwaddr addr,
1786 subpage_t *subpage = opaque;
1789 #if defined(DEBUG_SUBPAGE)
1790 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1791 subpage, len, addr);
1793 address_space_read(subpage->as, addr + subpage->base, buf, len);
1806 static void subpage_write(void *opaque, hwaddr addr,
1807 uint64_t value, unsigned len)
1809 subpage_t *subpage = opaque;
1812 #if defined(DEBUG_SUBPAGE)
1813 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1814 " value %"PRIx64"\n",
1815 __func__, subpage, len, addr, value);
1830 address_space_write(subpage->as, addr + subpage->base, buf, len);
1833 static bool subpage_accepts(void *opaque, hwaddr addr,
1834 unsigned len, bool is_write)
1836 subpage_t *subpage = opaque;
1837 #if defined(DEBUG_SUBPAGE)
1838 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1839 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1842 return address_space_access_valid(subpage->as, addr + subpage->base,
1846 static const MemoryRegionOps subpage_ops = {
1847 .read = subpage_read,
1848 .write = subpage_write,
1849 .valid.accepts = subpage_accepts,
1850 .endianness = DEVICE_NATIVE_ENDIAN,
1853 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1858 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1860 idx = SUBPAGE_IDX(start);
1861 eidx = SUBPAGE_IDX(end);
1862 #if defined(DEBUG_SUBPAGE)
1863 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1864 __func__, mmio, start, end, idx, eidx, section);
1866 for (; idx <= eidx; idx++) {
1867 mmio->sub_section[idx] = section;
1873 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1877 mmio = g_malloc0(sizeof(subpage_t));
1881 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1882 NULL, TARGET_PAGE_SIZE);
1883 mmio->iomem.subpage = true;
1884 #if defined(DEBUG_SUBPAGE)
1885 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1886 mmio, base, TARGET_PAGE_SIZE);
1888 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1893 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1897 MemoryRegionSection section = {
1898 .address_space = as,
1900 .offset_within_address_space = 0,
1901 .offset_within_region = 0,
1902 .size = int128_2_64(),
1905 return phys_section_add(map, §ion);
1908 MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1910 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1913 static void io_mem_init(void)
1915 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
1916 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1918 memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
1920 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1924 static void mem_begin(MemoryListener *listener)
1926 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1927 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1930 n = dummy_section(&d->map, as, &io_mem_unassigned);
1931 assert(n == PHYS_SECTION_UNASSIGNED);
1932 n = dummy_section(&d->map, as, &io_mem_notdirty);
1933 assert(n == PHYS_SECTION_NOTDIRTY);
1934 n = dummy_section(&d->map, as, &io_mem_rom);
1935 assert(n == PHYS_SECTION_ROM);
1936 n = dummy_section(&d->map, as, &io_mem_watch);
1937 assert(n == PHYS_SECTION_WATCH);
1939 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1941 as->next_dispatch = d;
1944 static void mem_commit(MemoryListener *listener)
1946 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1947 AddressSpaceDispatch *cur = as->dispatch;
1948 AddressSpaceDispatch *next = as->next_dispatch;
1950 phys_page_compact_all(next, next->map.nodes_nb);
1952 as->dispatch = next;
1955 phys_sections_free(&cur->map);
1960 static void tcg_commit(MemoryListener *listener)
1964 /* since each CPU stores ram addresses in its TLB cache, we must
1965 reset the modified entries */
1968 /* FIXME: Disentangle the cpu.h circular files deps so we can
1969 directly get the right CPU from listener. */
1970 if (cpu->tcg_as_listener != listener) {
1977 static void core_log_global_start(MemoryListener *listener)
1979 cpu_physical_memory_set_dirty_tracking(true);
1982 static void core_log_global_stop(MemoryListener *listener)
1984 cpu_physical_memory_set_dirty_tracking(false);
1987 static MemoryListener core_memory_listener = {
1988 .log_global_start = core_log_global_start,
1989 .log_global_stop = core_log_global_stop,
1993 void address_space_init_dispatch(AddressSpace *as)
1995 as->dispatch = NULL;
1996 as->dispatch_listener = (MemoryListener) {
1998 .commit = mem_commit,
1999 .region_add = mem_add,
2000 .region_nop = mem_add,
2003 memory_listener_register(&as->dispatch_listener, as);
2006 void address_space_destroy_dispatch(AddressSpace *as)
2008 AddressSpaceDispatch *d = as->dispatch;
2010 memory_listener_unregister(&as->dispatch_listener);
2012 as->dispatch = NULL;
2015 static void memory_map_init(void)
2017 system_memory = g_malloc(sizeof(*system_memory));
2019 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2020 address_space_init(&address_space_memory, system_memory, "memory");
2022 system_io = g_malloc(sizeof(*system_io));
2023 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2025 address_space_init(&address_space_io, system_io, "I/O");
2027 memory_listener_register(&core_memory_listener, &address_space_memory);
2030 MemoryRegion *get_system_memory(void)
2032 return system_memory;
2035 MemoryRegion *get_system_io(void)
2040 #endif /* !defined(CONFIG_USER_ONLY) */
2042 /* physical memory access (slow version, mainly for debug) */
2043 #if defined(CONFIG_USER_ONLY)
2044 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2045 uint8_t *buf, int len, int is_write)
2052 page = addr & TARGET_PAGE_MASK;
2053 l = (page + TARGET_PAGE_SIZE) - addr;
2056 flags = page_get_flags(page);
2057 if (!(flags & PAGE_VALID))
2060 if (!(flags & PAGE_WRITE))
2062 /* XXX: this code should not depend on lock_user */
2063 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2066 unlock_user(p, addr, l);
2068 if (!(flags & PAGE_READ))
2070 /* XXX: this code should not depend on lock_user */
2071 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2074 unlock_user(p, addr, 0);
2085 static void invalidate_and_set_dirty(hwaddr addr,
2088 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2089 tb_invalidate_phys_range(addr, addr + length, 0);
2090 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2092 xen_modified_memory(addr, length);
2095 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2097 unsigned access_size_max = mr->ops->valid.max_access_size;
2099 /* Regions are assumed to support 1-4 byte accesses unless
2100 otherwise specified. */
2101 if (access_size_max == 0) {
2102 access_size_max = 4;
2105 /* Bound the maximum access by the alignment of the address. */
2106 if (!mr->ops->impl.unaligned) {
2107 unsigned align_size_max = addr & -addr;
2108 if (align_size_max != 0 && align_size_max < access_size_max) {
2109 access_size_max = align_size_max;
2113 /* Don't attempt accesses larger than the maximum. */
2114 if (l > access_size_max) {
2115 l = access_size_max;
2118 l = 1 << (qemu_fls(l) - 1);
2124 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2125 int len, bool is_write)
2136 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2139 if (!memory_access_is_direct(mr, is_write)) {
2140 l = memory_access_size(mr, l, addr1);
2141 /* XXX: could force current_cpu to NULL to avoid
2145 /* 64 bit write access */
2147 error |= io_mem_write(mr, addr1, val, 8);
2150 /* 32 bit write access */
2152 error |= io_mem_write(mr, addr1, val, 4);
2155 /* 16 bit write access */
2157 error |= io_mem_write(mr, addr1, val, 2);
2160 /* 8 bit write access */
2162 error |= io_mem_write(mr, addr1, val, 1);
2168 addr1 += memory_region_get_ram_addr(mr);
2170 ptr = qemu_get_ram_ptr(addr1);
2171 memcpy(ptr, buf, l);
2172 invalidate_and_set_dirty(addr1, l);
2175 if (!memory_access_is_direct(mr, is_write)) {
2177 l = memory_access_size(mr, l, addr1);
2180 /* 64 bit read access */
2181 error |= io_mem_read(mr, addr1, &val, 8);
2185 /* 32 bit read access */
2186 error |= io_mem_read(mr, addr1, &val, 4);
2190 /* 16 bit read access */
2191 error |= io_mem_read(mr, addr1, &val, 2);
2195 /* 8 bit read access */
2196 error |= io_mem_read(mr, addr1, &val, 1);
2204 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2205 memcpy(buf, ptr, l);
2216 bool address_space_write(AddressSpace *as, hwaddr addr,
2217 const uint8_t *buf, int len)
2219 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2222 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2224 return address_space_rw(as, addr, buf, len, false);
2228 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2229 int len, int is_write)
2231 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2234 enum write_rom_type {
2239 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2240 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2249 mr = address_space_translate(as, addr, &addr1, &l, true);
2251 if (!(memory_region_is_ram(mr) ||
2252 memory_region_is_romd(mr))) {
2255 addr1 += memory_region_get_ram_addr(mr);
2257 ptr = qemu_get_ram_ptr(addr1);
2260 memcpy(ptr, buf, l);
2261 invalidate_and_set_dirty(addr1, l);
2264 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2274 /* used for ROM loading : can write in RAM and ROM */
2275 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2276 const uint8_t *buf, int len)
2278 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2281 void cpu_flush_icache_range(hwaddr start, int len)
2284 * This function should do the same thing as an icache flush that was
2285 * triggered from within the guest. For TCG we are always cache coherent,
2286 * so there is no need to flush anything. For KVM / Xen we need to flush
2287 * the host's instruction cache at least.
2289 if (tcg_enabled()) {
2293 cpu_physical_memory_write_rom_internal(&address_space_memory,
2294 start, NULL, len, FLUSH_CACHE);
2304 static BounceBuffer bounce;
2306 typedef struct MapClient {
2308 void (*callback)(void *opaque);
2309 QLIST_ENTRY(MapClient) link;
2312 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2313 = QLIST_HEAD_INITIALIZER(map_client_list);
2315 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2317 MapClient *client = g_malloc(sizeof(*client));
2319 client->opaque = opaque;
2320 client->callback = callback;
2321 QLIST_INSERT_HEAD(&map_client_list, client, link);
2325 static void cpu_unregister_map_client(void *_client)
2327 MapClient *client = (MapClient *)_client;
2329 QLIST_REMOVE(client, link);
2333 static void cpu_notify_map_clients(void)
2337 while (!QLIST_EMPTY(&map_client_list)) {
2338 client = QLIST_FIRST(&map_client_list);
2339 client->callback(client->opaque);
2340 cpu_unregister_map_client(client);
2344 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2351 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2352 if (!memory_access_is_direct(mr, is_write)) {
2353 l = memory_access_size(mr, l, addr);
2354 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2365 /* Map a physical memory region into a host virtual address.
2366 * May map a subset of the requested range, given by and returned in *plen.
2367 * May return NULL if resources needed to perform the mapping are exhausted.
2368 * Use only for reads OR writes - not for read-modify-write operations.
2369 * Use cpu_register_map_client() to know when retrying the map operation is
2370 * likely to succeed.
2372 void *address_space_map(AddressSpace *as,
2379 hwaddr l, xlat, base;
2380 MemoryRegion *mr, *this_mr;
2388 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2389 if (!memory_access_is_direct(mr, is_write)) {
2390 if (bounce.buffer) {
2393 /* Avoid unbounded allocations */
2394 l = MIN(l, TARGET_PAGE_SIZE);
2395 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2399 memory_region_ref(mr);
2402 address_space_read(as, addr, bounce.buffer, l);
2406 return bounce.buffer;
2410 raddr = memory_region_get_ram_addr(mr);
2421 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2422 if (this_mr != mr || xlat != base + done) {
2427 memory_region_ref(mr);
2429 return qemu_ram_ptr_length(raddr + base, plen);
2432 /* Unmaps a memory region previously mapped by address_space_map().
2433 * Will also mark the memory as dirty if is_write == 1. access_len gives
2434 * the amount of memory that was actually read or written by the caller.
2436 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2437 int is_write, hwaddr access_len)
2439 if (buffer != bounce.buffer) {
2443 mr = qemu_ram_addr_from_host(buffer, &addr1);
2446 invalidate_and_set_dirty(addr1, access_len);
2448 if (xen_enabled()) {
2449 xen_invalidate_map_cache_entry(buffer);
2451 memory_region_unref(mr);
2455 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2457 qemu_vfree(bounce.buffer);
2458 bounce.buffer = NULL;
2459 memory_region_unref(bounce.mr);
2460 cpu_notify_map_clients();
2463 void *cpu_physical_memory_map(hwaddr addr,
2467 return address_space_map(&address_space_memory, addr, plen, is_write);
2470 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2471 int is_write, hwaddr access_len)
2473 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2476 /* warning: addr must be aligned */
2477 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2478 enum device_endian endian)
2486 mr = address_space_translate(as, addr, &addr1, &l, false);
2487 if (l < 4 || !memory_access_is_direct(mr, false)) {
2489 io_mem_read(mr, addr1, &val, 4);
2490 #if defined(TARGET_WORDS_BIGENDIAN)
2491 if (endian == DEVICE_LITTLE_ENDIAN) {
2495 if (endian == DEVICE_BIG_ENDIAN) {
2501 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2505 case DEVICE_LITTLE_ENDIAN:
2506 val = ldl_le_p(ptr);
2508 case DEVICE_BIG_ENDIAN:
2509 val = ldl_be_p(ptr);
2519 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2521 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2524 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2526 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2529 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2531 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2534 /* warning: addr must be aligned */
2535 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2536 enum device_endian endian)
2544 mr = address_space_translate(as, addr, &addr1, &l,
2546 if (l < 8 || !memory_access_is_direct(mr, false)) {
2548 io_mem_read(mr, addr1, &val, 8);
2549 #if defined(TARGET_WORDS_BIGENDIAN)
2550 if (endian == DEVICE_LITTLE_ENDIAN) {
2554 if (endian == DEVICE_BIG_ENDIAN) {
2560 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2564 case DEVICE_LITTLE_ENDIAN:
2565 val = ldq_le_p(ptr);
2567 case DEVICE_BIG_ENDIAN:
2568 val = ldq_be_p(ptr);
2578 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2580 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2583 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2585 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2588 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2590 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2594 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2597 address_space_rw(as, addr, &val, 1, 0);
2601 /* warning: addr must be aligned */
2602 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2603 enum device_endian endian)
2611 mr = address_space_translate(as, addr, &addr1, &l,
2613 if (l < 2 || !memory_access_is_direct(mr, false)) {
2615 io_mem_read(mr, addr1, &val, 2);
2616 #if defined(TARGET_WORDS_BIGENDIAN)
2617 if (endian == DEVICE_LITTLE_ENDIAN) {
2621 if (endian == DEVICE_BIG_ENDIAN) {
2627 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2631 case DEVICE_LITTLE_ENDIAN:
2632 val = lduw_le_p(ptr);
2634 case DEVICE_BIG_ENDIAN:
2635 val = lduw_be_p(ptr);
2645 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2647 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2650 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2652 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2655 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2657 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2660 /* warning: addr must be aligned. The ram page is not masked as dirty
2661 and the code inside is not invalidated. It is useful if the dirty
2662 bits are used to track modified PTEs */
2663 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2670 mr = address_space_translate(as, addr, &addr1, &l,
2672 if (l < 4 || !memory_access_is_direct(mr, true)) {
2673 io_mem_write(mr, addr1, val, 4);
2675 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2676 ptr = qemu_get_ram_ptr(addr1);
2679 if (unlikely(in_migration)) {
2680 if (cpu_physical_memory_is_clean(addr1)) {
2681 /* invalidate code */
2682 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2684 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2690 /* warning: addr must be aligned */
2691 static inline void stl_phys_internal(AddressSpace *as,
2692 hwaddr addr, uint32_t val,
2693 enum device_endian endian)
2700 mr = address_space_translate(as, addr, &addr1, &l,
2702 if (l < 4 || !memory_access_is_direct(mr, true)) {
2703 #if defined(TARGET_WORDS_BIGENDIAN)
2704 if (endian == DEVICE_LITTLE_ENDIAN) {
2708 if (endian == DEVICE_BIG_ENDIAN) {
2712 io_mem_write(mr, addr1, val, 4);
2715 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2716 ptr = qemu_get_ram_ptr(addr1);
2718 case DEVICE_LITTLE_ENDIAN:
2721 case DEVICE_BIG_ENDIAN:
2728 invalidate_and_set_dirty(addr1, 4);
2732 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2734 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2737 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2739 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2742 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2744 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2748 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2751 address_space_rw(as, addr, &v, 1, 1);
2754 /* warning: addr must be aligned */
2755 static inline void stw_phys_internal(AddressSpace *as,
2756 hwaddr addr, uint32_t val,
2757 enum device_endian endian)
2764 mr = address_space_translate(as, addr, &addr1, &l, true);
2765 if (l < 2 || !memory_access_is_direct(mr, true)) {
2766 #if defined(TARGET_WORDS_BIGENDIAN)
2767 if (endian == DEVICE_LITTLE_ENDIAN) {
2771 if (endian == DEVICE_BIG_ENDIAN) {
2775 io_mem_write(mr, addr1, val, 2);
2778 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2779 ptr = qemu_get_ram_ptr(addr1);
2781 case DEVICE_LITTLE_ENDIAN:
2784 case DEVICE_BIG_ENDIAN:
2791 invalidate_and_set_dirty(addr1, 2);
2795 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2797 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2800 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2802 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2805 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2807 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2811 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2814 address_space_rw(as, addr, (void *) &val, 8, 1);
2817 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2819 val = cpu_to_le64(val);
2820 address_space_rw(as, addr, (void *) &val, 8, 1);
2823 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2825 val = cpu_to_be64(val);
2826 address_space_rw(as, addr, (void *) &val, 8, 1);
2829 /* virtual memory access for debug (includes writing to ROM) */
2830 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2831 uint8_t *buf, int len, int is_write)
2838 page = addr & TARGET_PAGE_MASK;
2839 phys_addr = cpu_get_phys_page_debug(cpu, page);
2840 /* if no physical page mapped, return an error */
2841 if (phys_addr == -1)
2843 l = (page + TARGET_PAGE_SIZE) - addr;
2846 phys_addr += (addr & ~TARGET_PAGE_MASK);
2848 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2850 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2861 * A helper function for the _utterly broken_ virtio device model to find out if
2862 * it's running on a big endian machine. Don't do this at home kids!
2864 bool target_words_bigendian(void);
2865 bool target_words_bigendian(void)
2867 #if defined(TARGET_WORDS_BIGENDIAN)
2874 #ifndef CONFIG_USER_ONLY
2875 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2880 mr = address_space_translate(&address_space_memory,
2881 phys_addr, &phys_addr, &l, false);
2883 return !(memory_region_is_ram(mr) ||
2884 memory_region_is_romd(mr));
2887 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2891 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2892 func(block->host, block->offset, block->length, opaque);