#include "hw/audiodev.h"
#include "kvm.h"
#include "migration.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "hw/smbios.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "hw/pcspk.h"
#include "qemu/page_cache.h"
#include "qemu-config.h"
*/
#include "monitor.h"
-#include "cpu-common.h"
+#include "exec/cpu-common.h"
#include "kvm.h"
#include "balloon.h"
#include "trace.h"
#include <stdlib.h>
#endif /* DEBUG_REMAP */
-#include "qemu-user-types.h"
+#include "exec/user/abitypes.h"
enum BSDType {
target_freebsd,
#include "syscall_defs.h"
#include "syscall.h"
#include "target_signal.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#if defined(CONFIG_USE_NPTL)
#define THREAD __thread
+++ /dev/null
-/*
- * defines common to all virtual CPUs
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CPU_ALL_H
-#define CPU_ALL_H
-
-#include "qemu-common.h"
-#include "qemu-tls.h"
-#include "cpu-common.h"
-
-/* some important defines:
- *
- * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
- * memory accesses.
- *
- * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
- * otherwise little endian.
- *
- * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
- *
- * TARGET_WORDS_BIGENDIAN : same for target cpu
- */
-
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
-#define BSWAP_NEEDED
-#endif
-
-#ifdef BSWAP_NEEDED
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return bswap16(s);
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return bswap32(s);
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return bswap64(s);
-}
-
-static inline void tswap16s(uint16_t *s)
-{
- *s = bswap16(*s);
-}
-
-static inline void tswap32s(uint32_t *s)
-{
- *s = bswap32(*s);
-}
-
-static inline void tswap64s(uint64_t *s)
-{
- *s = bswap64(*s);
-}
-
-#else
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return s;
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return s;
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return s;
-}
-
-static inline void tswap16s(uint16_t *s)
-{
-}
-
-static inline void tswap32s(uint32_t *s)
-{
-}
-
-static inline void tswap64s(uint64_t *s)
-{
-}
-
-#endif
-
-#if TARGET_LONG_SIZE == 4
-#define tswapl(s) tswap32(s)
-#define tswapls(s) tswap32s((uint32_t *)(s))
-#define bswaptls(s) bswap32s(s)
-#else
-#define tswapl(s) tswap64(s)
-#define tswapls(s) tswap64s((uint64_t *)(s))
-#define bswaptls(s) bswap64s(s)
-#endif
-
-/* CPU memory access without any memory or io remapping */
-
-/*
- * the generic syntax for the memory accesses is:
- *
- * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
- *
- * store: st{type}{size}{endian}_{access_type}(ptr, val)
- *
- * type is:
- * (empty): integer access
- * f : float access
- *
- * sign is:
- * (empty): for floats or 32 bit size
- * u : unsigned
- * s : signed
- *
- * size is:
- * b: 8 bits
- * w: 16 bits
- * l: 32 bits
- * q: 64 bits
- *
- * endian is:
- * (empty): target cpu endianness or 8 bit access
- * r : reversed target cpu endianness (not implemented yet)
- * be : big endian (not implemented yet)
- * le : little endian (not implemented yet)
- *
- * access_type is:
- * raw : host memory access
- * user : user mode access using soft MMU
- * kernel : kernel mode access using soft MMU
- */
-
-/* target-endianness CPU memory access functions */
-#if defined(TARGET_WORDS_BIGENDIAN)
-#define lduw_p(p) lduw_be_p(p)
-#define ldsw_p(p) ldsw_be_p(p)
-#define ldl_p(p) ldl_be_p(p)
-#define ldq_p(p) ldq_be_p(p)
-#define ldfl_p(p) ldfl_be_p(p)
-#define ldfq_p(p) ldfq_be_p(p)
-#define stw_p(p, v) stw_be_p(p, v)
-#define stl_p(p, v) stl_be_p(p, v)
-#define stq_p(p, v) stq_be_p(p, v)
-#define stfl_p(p, v) stfl_be_p(p, v)
-#define stfq_p(p, v) stfq_be_p(p, v)
-#else
-#define lduw_p(p) lduw_le_p(p)
-#define ldsw_p(p) ldsw_le_p(p)
-#define ldl_p(p) ldl_le_p(p)
-#define ldq_p(p) ldq_le_p(p)
-#define ldfl_p(p) ldfl_le_p(p)
-#define ldfq_p(p) ldfq_le_p(p)
-#define stw_p(p, v) stw_le_p(p, v)
-#define stl_p(p, v) stl_le_p(p, v)
-#define stq_p(p, v) stq_le_p(p, v)
-#define stfl_p(p, v) stfl_le_p(p, v)
-#define stfq_p(p, v) stfq_le_p(p, v)
-#endif
-
-/* MMU memory access macros */
-
-#if defined(CONFIG_USER_ONLY)
-#include <assert.h>
-#include "qemu-user-types.h"
-
-/* On some host systems the guest address space is reserved on the host.
- * This allows the guest address space to be offset to a convenient location.
- */
-#if defined(CONFIG_USE_GUEST_BASE)
-extern unsigned long guest_base;
-extern int have_guest_base;
-extern unsigned long reserved_va;
-#define GUEST_BASE guest_base
-#define RESERVED_VA reserved_va
-#else
-#define GUEST_BASE 0ul
-#define RESERVED_VA 0ul
-#endif
-
-/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
-
-#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
-#define h2g_valid(x) 1
-#else
-#define h2g_valid(x) ({ \
- unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
- (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
- (!RESERVED_VA || (__guest < RESERVED_VA)); \
-})
-#endif
-
-#define h2g(x) ({ \
- unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
- /* Check if given address fits target address space */ \
- assert(h2g_valid(x)); \
- (abi_ulong)__ret; \
-})
-
-#define saddr(x) g2h(x)
-#define laddr(x) g2h(x)
-
-#else /* !CONFIG_USER_ONLY */
-/* NOTE: we use double casts if pointers and target_ulong have
- different sizes */
-#define saddr(x) (uint8_t *)(intptr_t)(x)
-#define laddr(x) (uint8_t *)(intptr_t)(x)
-#endif
-
-#define ldub_raw(p) ldub_p(laddr((p)))
-#define ldsb_raw(p) ldsb_p(laddr((p)))
-#define lduw_raw(p) lduw_p(laddr((p)))
-#define ldsw_raw(p) ldsw_p(laddr((p)))
-#define ldl_raw(p) ldl_p(laddr((p)))
-#define ldq_raw(p) ldq_p(laddr((p)))
-#define ldfl_raw(p) ldfl_p(laddr((p)))
-#define ldfq_raw(p) ldfq_p(laddr((p)))
-#define stb_raw(p, v) stb_p(saddr((p)), v)
-#define stw_raw(p, v) stw_p(saddr((p)), v)
-#define stl_raw(p, v) stl_p(saddr((p)), v)
-#define stq_raw(p, v) stq_p(saddr((p)), v)
-#define stfl_raw(p, v) stfl_p(saddr((p)), v)
-#define stfq_raw(p, v) stfq_p(saddr((p)), v)
-
-
-#if defined(CONFIG_USER_ONLY)
-
-/* if user mode, no other memory access functions */
-#define ldub(p) ldub_raw(p)
-#define ldsb(p) ldsb_raw(p)
-#define lduw(p) lduw_raw(p)
-#define ldsw(p) ldsw_raw(p)
-#define ldl(p) ldl_raw(p)
-#define ldq(p) ldq_raw(p)
-#define ldfl(p) ldfl_raw(p)
-#define ldfq(p) ldfq_raw(p)
-#define stb(p, v) stb_raw(p, v)
-#define stw(p, v) stw_raw(p, v)
-#define stl(p, v) stl_raw(p, v)
-#define stq(p, v) stq_raw(p, v)
-#define stfl(p, v) stfl_raw(p, v)
-#define stfq(p, v) stfq_raw(p, v)
-
-#define cpu_ldub_code(env1, p) ldub_raw(p)
-#define cpu_ldsb_code(env1, p) ldsb_raw(p)
-#define cpu_lduw_code(env1, p) lduw_raw(p)
-#define cpu_ldsw_code(env1, p) ldsw_raw(p)
-#define cpu_ldl_code(env1, p) ldl_raw(p)
-#define cpu_ldq_code(env1, p) ldq_raw(p)
-
-#define cpu_ldub_data(env, addr) ldub_raw(addr)
-#define cpu_lduw_data(env, addr) lduw_raw(addr)
-#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
-#define cpu_ldl_data(env, addr) ldl_raw(addr)
-#define cpu_ldq_data(env, addr) ldq_raw(addr)
-
-#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
-#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
-
-#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
-#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
-#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
-#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
-#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
-
-#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
-#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
-
-#define ldub_kernel(p) ldub_raw(p)
-#define ldsb_kernel(p) ldsb_raw(p)
-#define lduw_kernel(p) lduw_raw(p)
-#define ldsw_kernel(p) ldsw_raw(p)
-#define ldl_kernel(p) ldl_raw(p)
-#define ldq_kernel(p) ldq_raw(p)
-#define ldfl_kernel(p) ldfl_raw(p)
-#define ldfq_kernel(p) ldfq_raw(p)
-#define stb_kernel(p, v) stb_raw(p, v)
-#define stw_kernel(p, v) stw_raw(p, v)
-#define stl_kernel(p, v) stl_raw(p, v)
-#define stq_kernel(p, v) stq_raw(p, v)
-#define stfl_kernel(p, v) stfl_raw(p, v)
-#define stfq_kernel(p, vt) stfq_raw(p, v)
-
-#define cpu_ldub_data(env, addr) ldub_raw(addr)
-#define cpu_lduw_data(env, addr) lduw_raw(addr)
-#define cpu_ldl_data(env, addr) ldl_raw(addr)
-
-#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
-#endif /* defined(CONFIG_USER_ONLY) */
-
-/* page related stuff */
-
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
-#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
-
-/* ??? These should be the larger of uintptr_t and target_ulong. */
-extern uintptr_t qemu_real_host_page_size;
-extern uintptr_t qemu_host_page_size;
-extern uintptr_t qemu_host_page_mask;
-
-#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
-
-/* same as PROT_xxx */
-#define PAGE_READ 0x0001
-#define PAGE_WRITE 0x0002
-#define PAGE_EXEC 0x0004
-#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
-#define PAGE_VALID 0x0008
-/* original state of the write flag (used when tracking self-modifying
- code */
-#define PAGE_WRITE_ORG 0x0010
-#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
-/* FIXME: Code that sets/uses this is broken and needs to go away. */
-#define PAGE_RESERVED 0x0020
-#endif
-
-#if defined(CONFIG_USER_ONLY)
-void page_dump(FILE *f);
-
-typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
- abi_ulong, unsigned long);
-int walk_memory_regions(void *, walk_memory_regions_fn);
-
-int page_get_flags(target_ulong address);
-void page_set_flags(target_ulong start, target_ulong end, int flags);
-int page_check_range(target_ulong start, target_ulong len, int flags);
-#endif
-
-CPUArchState *cpu_copy(CPUArchState *env);
-CPUArchState *qemu_get_cpu(int cpu);
-
-#define CPU_DUMP_CODE 0x00010000
-#define CPU_DUMP_FPU 0x00020000 /* dump FPU register state, not just integer */
-/* dump info about TCG QEMU's condition code optimization state */
-#define CPU_DUMP_CCOP 0x00040000
-
-void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
- int flags);
-void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
- int flags);
-
-void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
-extern CPUArchState *first_cpu;
-DECLARE_TLS(CPUArchState *,cpu_single_env);
-#define cpu_single_env tls_var(cpu_single_env)
-
-/* Flags for use in ENV->INTERRUPT_PENDING.
-
- The numbers assigned here are non-sequential in order to preserve
- binary compatibility with the vmstate dump. Bit 0 (0x0001) was
- previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
- the vmstate dump. */
-
-/* External hardware interrupt pending. This is typically used for
- interrupts from devices. */
-#define CPU_INTERRUPT_HARD 0x0002
-
-/* Exit the current TB. This is typically used when some system-level device
- makes some change to the memory mapping. E.g. the a20 line change. */
-#define CPU_INTERRUPT_EXITTB 0x0004
-
-/* Halt the CPU. */
-#define CPU_INTERRUPT_HALT 0x0020
-
-/* Debug event pending. */
-#define CPU_INTERRUPT_DEBUG 0x0080
-
-/* Several target-specific external hardware interrupts. Each target/cpu.h
- should define proper names based on these defines. */
-#define CPU_INTERRUPT_TGT_EXT_0 0x0008
-#define CPU_INTERRUPT_TGT_EXT_1 0x0010
-#define CPU_INTERRUPT_TGT_EXT_2 0x0040
-#define CPU_INTERRUPT_TGT_EXT_3 0x0200
-#define CPU_INTERRUPT_TGT_EXT_4 0x1000
-
-/* Several target-specific internal interrupts. These differ from the
- preceding target-specific interrupts in that they are intended to
- originate from within the cpu itself, typically in response to some
- instruction being executed. These, therefore, are not masked while
- single-stepping within the debugger. */
-#define CPU_INTERRUPT_TGT_INT_0 0x0100
-#define CPU_INTERRUPT_TGT_INT_1 0x0400
-#define CPU_INTERRUPT_TGT_INT_2 0x0800
-#define CPU_INTERRUPT_TGT_INT_3 0x2000
-
-/* First unused bit: 0x4000. */
-
-/* The set of all bits that should be masked when single-stepping. */
-#define CPU_INTERRUPT_SSTEP_MASK \
- (CPU_INTERRUPT_HARD \
- | CPU_INTERRUPT_TGT_EXT_0 \
- | CPU_INTERRUPT_TGT_EXT_1 \
- | CPU_INTERRUPT_TGT_EXT_2 \
- | CPU_INTERRUPT_TGT_EXT_3 \
- | CPU_INTERRUPT_TGT_EXT_4)
-
-#ifndef CONFIG_USER_ONLY
-typedef void (*CPUInterruptHandler)(CPUArchState *, int);
-
-extern CPUInterruptHandler cpu_interrupt_handler;
-
-static inline void cpu_interrupt(CPUArchState *s, int mask)
-{
- cpu_interrupt_handler(s, mask);
-}
-#else /* USER_ONLY */
-void cpu_interrupt(CPUArchState *env, int mask);
-#endif /* USER_ONLY */
-
-void cpu_reset_interrupt(CPUArchState *env, int mask);
-
-void cpu_exit(CPUArchState *s);
-
-/* Breakpoint/watchpoint flags */
-#define BP_MEM_READ 0x01
-#define BP_MEM_WRITE 0x02
-#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
-#define BP_STOP_BEFORE_ACCESS 0x04
-#define BP_WATCHPOINT_HIT 0x08
-#define BP_GDB 0x10
-#define BP_CPU 0x20
-
-int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
- CPUBreakpoint **breakpoint);
-int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
-void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
-void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
-int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
- int flags, CPUWatchpoint **watchpoint);
-int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
- target_ulong len, int flags);
-void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
-void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
-
-#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
-#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
-#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
-
-void cpu_single_step(CPUArchState *env, int enabled);
-
-#if !defined(CONFIG_USER_ONLY)
-
-/* Return the physical page corresponding to a virtual one. Use it
- only for debugging because no protection checks are done. Return -1
- if no page found. */
-hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
-
-/* memory API */
-
-extern int phys_ram_fd;
-extern ram_addr_t ram_size;
-
-/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
-#define RAM_PREALLOC_MASK (1 << 0)
-
-typedef struct RAMBlock {
- struct MemoryRegion *mr;
- uint8_t *host;
- ram_addr_t offset;
- ram_addr_t length;
- uint32_t flags;
- char idstr[256];
- QLIST_ENTRY(RAMBlock) next;
-#if defined(__linux__) && !defined(TARGET_S390X)
- int fd;
-#endif
-} RAMBlock;
-
-typedef struct RAMList {
- uint8_t *phys_dirty;
- QLIST_HEAD(, RAMBlock) blocks;
-} RAMList;
-extern RAMList ram_list;
-
-extern const char *mem_path;
-extern int mem_prealloc;
-
-/* Flags stored in the low bits of the TLB virtual address. These are
- defined so that fast path ram access is all zeros. */
-/* Zero if TLB entry is valid. */
-#define TLB_INVALID_MASK (1 << 3)
-/* Set if TLB entry references a clean RAM page. The iotlb entry will
- contain the page physical address. */
-#define TLB_NOTDIRTY (1 << 4)
-/* Set if TLB entry is an IO callback. */
-#define TLB_MMIO (1 << 5)
-
-void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
-ram_addr_t last_ram_offset(void);
-#endif /* !CONFIG_USER_ONLY */
-
-int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
- uint8_t *buf, int len, int is_write);
-
-#endif /* CPU_ALL_H */
+++ /dev/null
-#ifndef CPU_COMMON_H
-#define CPU_COMMON_H 1
-
-/* CPU interfaces that are target independent. */
-
-#include "hwaddr.h"
-
-#ifndef NEED_CPU_H
-#include "poison.h"
-#endif
-
-#include "bswap.h"
-#include "qemu-queue.h"
-
-#if !defined(CONFIG_USER_ONLY)
-
-enum device_endian {
- DEVICE_NATIVE_ENDIAN,
- DEVICE_BIG_ENDIAN,
- DEVICE_LITTLE_ENDIAN,
-};
-
-/* address in the RAM (different from a physical address) */
-#if defined(CONFIG_XEN_BACKEND)
-typedef uint64_t ram_addr_t;
-# define RAM_ADDR_MAX UINT64_MAX
-# define RAM_ADDR_FMT "%" PRIx64
-#else
-typedef uintptr_t ram_addr_t;
-# define RAM_ADDR_MAX UINTPTR_MAX
-# define RAM_ADDR_FMT "%" PRIxPTR
-#endif
-
-/* memory API */
-
-typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
-typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
-
-void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
-/* This should only be used for ram local to a device. */
-void *qemu_get_ram_ptr(ram_addr_t addr);
-void qemu_put_ram_ptr(void *addr);
-/* This should not be used by devices. */
-int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
-ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
-void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
-
-void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
- int len, int is_write);
-static inline void cpu_physical_memory_read(hwaddr addr,
- void *buf, int len)
-{
- cpu_physical_memory_rw(addr, buf, len, 0);
-}
-static inline void cpu_physical_memory_write(hwaddr addr,
- const void *buf, int len)
-{
- cpu_physical_memory_rw(addr, (void *)buf, len, 1);
-}
-void *cpu_physical_memory_map(hwaddr addr,
- hwaddr *plen,
- int is_write);
-void cpu_physical_memory_unmap(void *buffer, hwaddr len,
- int is_write, hwaddr access_len);
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
-
-bool cpu_physical_memory_is_io(hwaddr phys_addr);
-
-/* Coalesced MMIO regions are areas where write operations can be reordered.
- * This usually implies that write operations are side-effect free. This allows
- * batching which can make a major impact on performance when using
- * virtualization.
- */
-void qemu_flush_coalesced_mmio_buffer(void);
-
-uint32_t ldub_phys(hwaddr addr);
-uint32_t lduw_le_phys(hwaddr addr);
-uint32_t lduw_be_phys(hwaddr addr);
-uint32_t ldl_le_phys(hwaddr addr);
-uint32_t ldl_be_phys(hwaddr addr);
-uint64_t ldq_le_phys(hwaddr addr);
-uint64_t ldq_be_phys(hwaddr addr);
-void stb_phys(hwaddr addr, uint32_t val);
-void stw_le_phys(hwaddr addr, uint32_t val);
-void stw_be_phys(hwaddr addr, uint32_t val);
-void stl_le_phys(hwaddr addr, uint32_t val);
-void stl_be_phys(hwaddr addr, uint32_t val);
-void stq_le_phys(hwaddr addr, uint64_t val);
-void stq_be_phys(hwaddr addr, uint64_t val);
-
-#ifdef NEED_CPU_H
-uint32_t lduw_phys(hwaddr addr);
-uint32_t ldl_phys(hwaddr addr);
-uint64_t ldq_phys(hwaddr addr);
-void stl_phys_notdirty(hwaddr addr, uint32_t val);
-void stq_phys_notdirty(hwaddr addr, uint64_t val);
-void stw_phys(hwaddr addr, uint32_t val);
-void stl_phys(hwaddr addr, uint32_t val);
-void stq_phys(hwaddr addr, uint64_t val);
-#endif
-
-void cpu_physical_memory_write_rom(hwaddr addr,
- const uint8_t *buf, int len);
-
-extern struct MemoryRegion io_mem_ram;
-extern struct MemoryRegion io_mem_rom;
-extern struct MemoryRegion io_mem_unassigned;
-extern struct MemoryRegion io_mem_notdirty;
-
-#endif
-
-#endif /* !CPU_COMMON_H */
+++ /dev/null
-/*
- * common defines for all CPUs
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CPU_DEFS_H
-#define CPU_DEFS_H
-
-#ifndef NEED_CPU_H
-#error cpu.h included from common code
-#endif
-
-#include "config.h"
-#include <setjmp.h>
-#include <inttypes.h>
-#include <signal.h>
-#include "osdep.h"
-#include "qemu-queue.h"
-#include "hwaddr.h"
-
-#ifndef TARGET_LONG_BITS
-#error TARGET_LONG_BITS must be defined before including this header
-#endif
-
-#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
-
-typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
-typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
-typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
-typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
-typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
-typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
-/* target_ulong is the type of a virtual address */
-#if TARGET_LONG_SIZE == 4
-typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
-typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
-#define TARGET_FMT_lx "%08x"
-#define TARGET_FMT_ld "%d"
-#define TARGET_FMT_lu "%u"
-#elif TARGET_LONG_SIZE == 8
-typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
-typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
-#define TARGET_FMT_lx "%016" PRIx64
-#define TARGET_FMT_ld "%" PRId64
-#define TARGET_FMT_lu "%" PRIu64
-#else
-#error TARGET_LONG_SIZE undefined
-#endif
-
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
-#define EXCP_HLT 0x10001 /* hlt instruction reached */
-#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
-#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
-/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
- addresses on the same page. The top bits are the same. This allows
- TLB invalidation to quickly clear a subset of the hash table. */
-#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
-#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
-#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
-#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-
-#if !defined(CONFIG_USER_ONLY)
-#define CPU_TLB_BITS 8
-#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
-
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
-#define CPU_TLB_ENTRY_BITS 4
-#else
-#define CPU_TLB_ENTRY_BITS 5
-#endif
-
-typedef struct CPUTLBEntry {
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
- go directly to ram.
- bit 3 : indicates that the entry is invalid
- bit 2..0 : zero
- */
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- /* padding to get a power of two size */
- uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
- (sizeof(target_ulong) * 3 +
- ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
- sizeof(uintptr_t))];
-} CPUTLBEntry;
-
-extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
-
-#define CPU_COMMON_TLB \
- /* The meaning of the MMU modes is defined in the target code. */ \
- CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
- hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
- target_ulong tlb_flush_addr; \
- target_ulong tlb_flush_mask;
-
-#else
-
-#define CPU_COMMON_TLB
-
-#endif
-
-
-#ifdef HOST_WORDS_BIGENDIAN
-typedef struct icount_decr_u16 {
- uint16_t high;
- uint16_t low;
-} icount_decr_u16;
-#else
-typedef struct icount_decr_u16 {
- uint16_t low;
- uint16_t high;
-} icount_decr_u16;
-#endif
-
-struct kvm_run;
-struct KVMState;
-struct qemu_work_item;
-
-typedef struct CPUBreakpoint {
- target_ulong pc;
- int flags; /* BP_* */
- QTAILQ_ENTRY(CPUBreakpoint) entry;
-} CPUBreakpoint;
-
-typedef struct CPUWatchpoint {
- target_ulong vaddr;
- target_ulong len_mask;
- int flags; /* BP_* */
- QTAILQ_ENTRY(CPUWatchpoint) entry;
-} CPUWatchpoint;
-
-#define CPU_TEMP_BUF_NLONGS 128
-#define CPU_COMMON \
- struct TranslationBlock *current_tb; /* currently executing TB */ \
- /* soft mmu support */ \
- /* in order to avoid passing too many arguments to the MMIO \
- helpers, we store some rarely used information in the CPU \
- context) */ \
- uintptr_t mem_io_pc; /* host pc at which the memory was \
- accessed */ \
- target_ulong mem_io_vaddr; /* target virtual addr at which the \
- memory was accessed */ \
- uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
- uint32_t interrupt_request; \
- volatile sig_atomic_t exit_request; \
- CPU_COMMON_TLB \
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
- /* buffer for temporaries in the code generator */ \
- long temp_buf[CPU_TEMP_BUF_NLONGS]; \
- \
- int64_t icount_extra; /* Instructions until next timer event. */ \
- /* Number of cycles left, with interrupt flag in high bit. \
- This allows a single read-compare-cbranch-write sequence to test \
- for both decrementer underflow and exceptions. */ \
- union { \
- uint32_t u32; \
- icount_decr_u16 u16; \
- } icount_decr; \
- uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
- \
- /* from this point: preserved by CPU reset */ \
- /* ice debug support */ \
- QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
- int singlestep_enabled; \
- \
- QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
- CPUWatchpoint *watchpoint_hit; \
- \
- struct GDBRegisterState *gdb_regs; \
- \
- /* Core interrupt code */ \
- jmp_buf jmp_env; \
- int exception_index; \
- \
- CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
- int cpu_index; /* CPU index (informative) */ \
- uint32_t host_tid; /* host thread ID */ \
- int numa_node; /* NUMA node this cpu is belonging to */ \
- int nr_cores; /* number of cores within this CPU package */ \
- int nr_threads;/* number of threads within this CPU */ \
- int running; /* Nonzero if cpu is currently running(usermode). */ \
- /* user data */ \
- void *opaque; \
- \
- const char *cpu_model_str; \
- struct KVMState *kvm_state; \
- struct kvm_run *kvm_run; \
- int kvm_fd; \
- int kvm_vcpu_dirty;
-
-#endif
#include "monitor.h"
#include "sysemu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "dma.h"
#include "kvm.h"
#include "qmp-commands.h"
#include "config.h"
#include "cpu.h"
-#include "exec-all.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/exec-all.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
-#include "cputlb.h"
+#include "exec/cputlb.h"
-#include "memory-internal.h"
+#include "exec/memory-internal.h"
//#define DEBUG_TLB
//#define DEBUG_TLB_CHECK
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#undef env
+++ /dev/null
-/*
- * Common CPU TLB handling
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef CPUTLB_H
-#define CPUTLB_H
-
-#if !defined(CONFIG_USER_ONLY)
-/* cputlb.c */
-void tlb_protect_code(ram_addr_t ram_addr);
-void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
- target_ulong vaddr);
-void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
- uintptr_t length);
-MemoryRegionSection *phys_page_find(struct AddressSpaceDispatch *d,
- hwaddr index);
-void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
-extern int tlb_flush_count;
-
-/* exec.c */
-void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr);
-hwaddr memory_region_section_get_iotlb(CPUArchState *env,
- MemoryRegionSection *section,
- target_ulong vaddr,
- hwaddr paddr,
- int prot,
- target_ulong *address);
-bool memory_region_is_unassigned(MemoryRegion *mr);
-
-#endif
-#endif
+++ /dev/null
-/* Helper file for declaring TCG helper functions.
- Should be included at the start and end of target-foo/helper.h.
-
- Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
- functions. Names should be specified without the helper_ prefix, and
- the return and argument types specified. 3 basic types are understood
- (i32, i64 and ptr). Additional aliases are provided for convenience and
- to match the types used by the C helper implementation.
-
- The target helper.h should be included in all files that use/define
- helper functions. THis will ensure that function prototypes are
- consistent. In addition it should be included an extra two times for
- helper.c, defining:
- GEN_HELPER 1 to produce op generation functions (gen_helper_*)
- GEN_HELPER 2 to do runtime registration helper functions.
- */
-
-#ifndef DEF_HELPER_H
-#define DEF_HELPER_H 1
-
-#define HELPER(name) glue(helper_, name)
-
-#define GET_TCGV_i32 GET_TCGV_I32
-#define GET_TCGV_i64 GET_TCGV_I64
-#define GET_TCGV_ptr GET_TCGV_PTR
-
-/* Some types that make sense in C, but not for TCG. */
-#define dh_alias_i32 i32
-#define dh_alias_s32 i32
-#define dh_alias_int i32
-#define dh_alias_i64 i64
-#define dh_alias_s64 i64
-#define dh_alias_f32 i32
-#define dh_alias_f64 i64
-#if TARGET_LONG_BITS == 32
-#define dh_alias_tl i32
-#else
-#define dh_alias_tl i64
-#endif
-#define dh_alias_ptr ptr
-#define dh_alias_void void
-#define dh_alias_noreturn noreturn
-#define dh_alias_env ptr
-#define dh_alias(t) glue(dh_alias_, t)
-
-#define dh_ctype_i32 uint32_t
-#define dh_ctype_s32 int32_t
-#define dh_ctype_int int
-#define dh_ctype_i64 uint64_t
-#define dh_ctype_s64 int64_t
-#define dh_ctype_f32 float32
-#define dh_ctype_f64 float64
-#define dh_ctype_tl target_ulong
-#define dh_ctype_ptr void *
-#define dh_ctype_void void
-#define dh_ctype_noreturn void QEMU_NORETURN
-#define dh_ctype_env CPUArchState *
-#define dh_ctype(t) dh_ctype_##t
-
-/* We can't use glue() here because it falls foul of C preprocessor
- recursive expansion rules. */
-#define dh_retvar_decl0_void void
-#define dh_retvar_decl0_noreturn void
-#define dh_retvar_decl0_i32 TCGv_i32 retval
-#define dh_retvar_decl0_i64 TCGv_i64 retval
-#define dh_retvar_decl0_ptr TCGv_ptr retval
-#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
-
-#define dh_retvar_decl_void
-#define dh_retvar_decl_noreturn
-#define dh_retvar_decl_i32 TCGv_i32 retval,
-#define dh_retvar_decl_i64 TCGv_i64 retval,
-#define dh_retvar_decl_ptr TCGv_ptr retval,
-#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
-
-#define dh_retvar_void TCG_CALL_DUMMY_ARG
-#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
-#define dh_retvar_i32 GET_TCGV_i32(retval)
-#define dh_retvar_i64 GET_TCGV_i64(retval)
-#define dh_retvar_ptr GET_TCGV_ptr(retval)
-#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
-
-#define dh_is_64bit_void 0
-#define dh_is_64bit_noreturn 0
-#define dh_is_64bit_i32 0
-#define dh_is_64bit_i64 1
-#define dh_is_64bit_ptr (TCG_TARGET_REG_BITS == 64)
-#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
-
-#define dh_is_signed_void 0
-#define dh_is_signed_noreturn 0
-#define dh_is_signed_i32 0
-#define dh_is_signed_s32 1
-#define dh_is_signed_i64 0
-#define dh_is_signed_s64 1
-#define dh_is_signed_f32 0
-#define dh_is_signed_f64 0
-#define dh_is_signed_tl 0
-#define dh_is_signed_int 1
-/* ??? This is highly specific to the host cpu. There are even special
- extension instructions that may be required, e.g. ia64's addp4. But
- for now we don't support any 64-bit targets with 32-bit pointers. */
-#define dh_is_signed_ptr 0
-#define dh_is_signed_env dh_is_signed_ptr
-#define dh_is_signed(t) dh_is_signed_##t
-
-#define dh_sizemask(t, n) \
- sizemask |= dh_is_64bit(t) << (n*2); \
- sizemask |= dh_is_signed(t) << (n*2+1)
-
-#define dh_arg(t, n) \
- args[n - 1] = glue(GET_TCGV_, dh_alias(t))(glue(arg, n)); \
- dh_sizemask(t, n)
-
-#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
-
-
-#define DEF_HELPER_0(name, ret) \
- DEF_HELPER_FLAGS_0(name, 0, ret)
-#define DEF_HELPER_1(name, ret, t1) \
- DEF_HELPER_FLAGS_1(name, 0, ret, t1)
-#define DEF_HELPER_2(name, ret, t1, t2) \
- DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
-#define DEF_HELPER_3(name, ret, t1, t2, t3) \
- DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
-#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
- DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
-#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
- DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
-
-/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
-
-#endif /* DEF_HELPER_H */
-
-#ifndef GEN_HELPER
-/* Function prototypes. */
-
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-dh_ctype(ret) HELPER(name) (void);
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1));
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4));
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5));
-
-#undef GEN_HELPER
-#define GEN_HELPER -1
-
-#elif GEN_HELPER == 1
-/* Gen functions. */
-
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
-{ \
- int sizemask; \
- sizemask = dh_is_64bit(ret); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 0, NULL); \
-}
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1)) \
-{ \
- TCGArg args[1]; \
- int sizemask = 0; \
- dh_sizemask(ret, 0); \
- dh_arg(t1, 1); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 1, args); \
-}
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
- dh_arg_decl(t2, 2)) \
-{ \
- TCGArg args[2]; \
- int sizemask = 0; \
- dh_sizemask(ret, 0); \
- dh_arg(t1, 1); \
- dh_arg(t2, 2); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 2, args); \
-}
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
- dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
-{ \
- TCGArg args[3]; \
- int sizemask = 0; \
- dh_sizemask(ret, 0); \
- dh_arg(t1, 1); \
- dh_arg(t2, 2); \
- dh_arg(t3, 3); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 3, args); \
-}
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
- dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
-{ \
- TCGArg args[4]; \
- int sizemask = 0; \
- dh_sizemask(ret, 0); \
- dh_arg(t1, 1); \
- dh_arg(t2, 2); \
- dh_arg(t3, 3); \
- dh_arg(t4, 4); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 4, args); \
-}
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
-{ \
- TCGArg args[5]; \
- int sizemask = 0; \
- dh_sizemask(ret, 0); \
- dh_arg(t1, 1); \
- dh_arg(t2, 2); \
- dh_arg(t3, 3); \
- dh_arg(t4, 4); \
- dh_arg(t5, 5); \
- tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 5, args); \
-}
-
-#undef GEN_HELPER
-#define GEN_HELPER -1
-
-#elif GEN_HELPER == 2
-/* Register helpers. */
-
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-tcg_register_helper(HELPER(name), #name);
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-DEF_HELPER_FLAGS_0(name, flags, ret)
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-DEF_HELPER_FLAGS_0(name, flags, ret)
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-DEF_HELPER_FLAGS_0(name, flags, ret)
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-DEF_HELPER_FLAGS_0(name, flags, ret)
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-DEF_HELPER_FLAGS_0(name, flags, ret)
-
-#undef GEN_HELPER
-#define GEN_HELPER -1
-
-#elif GEN_HELPER == -1
-/* Undefine macros. */
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef GEN_HELPER
-
-#endif
#define DMA_H
#include <stdio.h>
-#include "memory.h"
+#include "exec/memory.h"
#include "hw/hw.h"
#include "block/block.h"
#include "kvm.h"
#include "qemu-common.h"
#include "elf.h"
#include "cpu.h"
-#include "cpu-all.h"
-#include "hwaddr.h"
+#include "exec/cpu-all.h"
+#include "exec/hwaddr.h"
#include "monitor.h"
#include "kvm.h"
#include "dump.h"
#include "memory_mapping.h"
#include "qapi/error.h"
#include "qmp-commands.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
{
+++ /dev/null
-/*
- * internal execution defines for qemu
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _EXEC_ALL_H_
-#define _EXEC_ALL_H_
-
-#include "qemu-common.h"
-
-/* allow to see translation results - the slowdown should be negligible, so we leave it */
-#define DEBUG_DISAS
-
-/* Page tracking code uses ram addresses in system mode, and virtual
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
- type. */
-#if defined(CONFIG_USER_ONLY)
-typedef abi_ulong tb_page_addr_t;
-#else
-typedef ram_addr_t tb_page_addr_t;
-#endif
-
-/* is_jmp field values */
-#define DISAS_NEXT 0 /* next instruction can be analyzed */
-#define DISAS_JUMP 1 /* only pc was modified dynamically */
-#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
-#define DISAS_TB_JUMP 3 /* only pc was modified statically */
-
-struct TranslationBlock;
-typedef struct TranslationBlock TranslationBlock;
-
-/* XXX: make safe guess about sizes */
-#define MAX_OP_PER_INSTR 208
-
-#if HOST_LONG_BITS == 32
-#define MAX_OPC_PARAM_PER_ARG 2
-#else
-#define MAX_OPC_PARAM_PER_ARG 1
-#endif
-#define MAX_OPC_PARAM_IARGS 5
-#define MAX_OPC_PARAM_OARGS 1
-#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
-
-/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
- * and up to 4 + N parameters on 64-bit archs
- * (N = number of input arguments + output arguments). */
-#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
-#define OPC_BUF_SIZE 640
-#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
-
-/* Maximum size a TCG op can expand to. This is complicated because a
- single op may require several host instructions and register reloads.
- For now take a wild guess at 192 bytes, which should allow at least
- a couple of fixup instructions per argument. */
-#define TCG_MAX_OP_SIZE 192
-
-#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
-
-#include "qemu-log.h"
-
-void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
-void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
-void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
- int pc_pos);
-
-void cpu_gen_init(void);
-int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
- int *gen_code_size_ptr);
-bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
-
-void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
-void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
-TranslationBlock *tb_gen_code(CPUArchState *env,
- target_ulong pc, target_ulong cs_base, int flags,
- int cflags);
-void cpu_exec_init(CPUArchState *env);
-void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
-int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
- int is_cpu_write_access);
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
- int is_cpu_write_access);
-#if !defined(CONFIG_USER_ONLY)
-/* cputlb.c */
-void tlb_flush_page(CPUArchState *env, target_ulong addr);
-void tlb_flush(CPUArchState *env, int flush_global);
-void tlb_set_page(CPUArchState *env, target_ulong vaddr,
- hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
-void tb_invalidate_phys_addr(hwaddr addr);
-#else
-static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
-{
-}
-
-static inline void tlb_flush(CPUArchState *env, int flush_global)
-{
-}
-#endif
-
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
-
-#define CODE_GEN_PHYS_HASH_BITS 15
-#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
-
-/* estimated block size for TB allocation */
-/* XXX: use a per code average code fragment size and modulate it
- according to the host CPU */
-#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 128
-#else
-#define CODE_GEN_AVG_BLOCK_SIZE 64
-#endif
-
-#if defined(__arm__) || defined(_ARCH_PPC) \
- || defined(__x86_64__) || defined(__i386__) \
- || defined(__sparc__) \
- || defined(CONFIG_TCG_INTERPRETER)
-#define USE_DIRECT_JUMP
-#endif
-
-struct TranslationBlock {
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
- target_ulong cs_base; /* CS base for this block */
- uint64_t flags; /* flags defining in which context the code was generated */
- uint16_t size; /* size of target code for this block (1 <=
- size <= TARGET_PAGE_SIZE) */
- uint16_t cflags; /* compile flags */
-#define CF_COUNT_MASK 0x7fff
-#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
-
- uint8_t *tc_ptr; /* pointer to the translated code */
- /* next matching tb for physical address. */
- struct TranslationBlock *phys_hash_next;
- /* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[] */
- struct TranslationBlock *page_next[2];
- tb_page_addr_t page_addr[2];
-
- /* the following data are used to directly call another TB from
- the code of this one. */
- uint16_t tb_next_offset[2]; /* offset of original jump target */
-#ifdef USE_DIRECT_JUMP
- uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
-#else
- uintptr_t tb_next[2]; /* address of jump generated code */
-#endif
- /* list of TBs jumping to this one. This is a circular list using
- the two least significant bits of the pointers to tell what is
- the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
- jmp_first */
- struct TranslationBlock *jmp_next[2];
- struct TranslationBlock *jmp_first;
- uint32_t icount;
-};
-
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
-}
-
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
- | (tmp & TB_JMP_ADDR_MASK));
-}
-
-static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
-{
- return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
-}
-
-void tb_free(TranslationBlock *tb);
-void tb_flush(CPUArchState *env);
-void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-
-extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
-
-#if defined(USE_DIRECT_JUMP)
-
-#if defined(CONFIG_TCG_INTERPRETER)
-static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
-{
- /* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
- /* no need to flush icache explicitly */
-}
-#elif defined(_ARCH_PPC)
-void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
-#define tb_set_jmp_target1 ppc_tb_set_jmp_target
-#elif defined(__i386__) || defined(__x86_64__)
-static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
-{
- /* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
- /* no need to flush icache explicitly */
-}
-#elif defined(__arm__)
-static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
-{
-#if !QEMU_GNUC_PREREQ(4, 1)
- register unsigned long _beg __asm ("a1");
- register unsigned long _end __asm ("a2");
- register unsigned long _flg __asm ("a3");
-#endif
-
- /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
- *(uint32_t *)jmp_addr =
- (*(uint32_t *)jmp_addr & ~0xffffff)
- | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
-
-#if QEMU_GNUC_PREREQ(4, 1)
- __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
-#else
- /* flush icache */
- _beg = jmp_addr;
- _end = jmp_addr + 4;
- _flg = 0;
- __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
-#endif
-}
-#elif defined(__sparc__)
-void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
-#else
-#error tb_set_jmp_target1 is missing
-#endif
-
-static inline void tb_set_jmp_target(TranslationBlock *tb,
- int n, uintptr_t addr)
-{
- uint16_t offset = tb->tb_jmp_offset[n];
- tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
-}
-
-#else
-
-/* set the jump target */
-static inline void tb_set_jmp_target(TranslationBlock *tb,
- int n, uintptr_t addr)
-{
- tb->tb_next[n] = addr;
-}
-
-#endif
-
-static inline void tb_add_jump(TranslationBlock *tb, int n,
- TranslationBlock *tb_next)
-{
- /* NOTE: this test is only needed for thread safety */
- if (!tb->jmp_next[n]) {
- /* patch the native jump address */
- tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
-
- /* add in TB jmp circular list */
- tb->jmp_next[n] = tb_next->jmp_first;
- tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
- }
-}
-
-#include "qemu-lock.h"
-
-extern spinlock_t tb_lock;
-
-extern int tb_invalidated_flag;
-
-/* The return address may point to the start of the next instruction.
- Subtracting one gets us the call instruction itself. */
-#if defined(CONFIG_TCG_INTERPRETER)
-/* Softmmu, Alpha, MIPS, SH4 and SPARC user mode emulations call GETPC().
- For all others, GETPC remains undefined (which makes TCI a little faster. */
-# if defined(CONFIG_SOFTMMU) || \
- defined(TARGET_ALPHA) || defined(TARGET_MIPS) || \
- defined(TARGET_SH4) || defined(TARGET_SPARC)
-extern uintptr_t tci_tb_ptr;
-# define GETPC() tci_tb_ptr
-# endif
-#elif defined(__s390__) && !defined(__s390x__)
-# define GETPC() \
- (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
-#elif defined(__arm__)
-/* Thumb return addresses have the low bit set, so we need to subtract two.
- This is still safe in ARM mode because instructions are 4 bytes. */
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
-#else
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
-#endif
-
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* qemu_ld/st optimization split code generation to fast and slow path, thus,
- it needs special handling for an MMU helper which is called from the slow
- path, to get the fast path's pc without any additional argument.
- It uses a tricky solution which embeds the fast path pc into the slow path.
-
- Code flow in slow path:
- (1) pre-process
- (2) call MMU helper
- (3) jump to (5)
- (4) fast path information (implementation specific)
- (5) post-process (e.g. stack adjust)
- (6) jump to corresponding code of the next of fast path
- */
-# if defined(__i386__) || defined(__x86_64__)
-/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
- so that the destination is the next code of fast path, though this jmp is
- never executed.
-
- call MMU helper
- jmp POST_PROC (2byte) <- GETRA()
- jmp NEXT_CODE (5byte)
- POST_PROCESS ... <- GETRA() + 7
- */
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
- *(int32_t *)((void *)GETRA() + 3) - 1))
-# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
-# else
-# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
-# endif
-bool is_tcg_gen_code(uintptr_t pc_ptr);
-# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
-#else
-# define GETPC_EXT() GETPC()
-#endif
-
-#if !defined(CONFIG_USER_ONLY)
-
-struct MemoryRegion *iotlb_to_region(hwaddr index);
-uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
- unsigned size);
-void io_mem_write(struct MemoryRegion *mr, hwaddr addr,
- uint64_t value, unsigned size);
-
-void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
- uintptr_t retaddr);
-
-#include "softmmu_defs.h"
-
-#define ACCESS_TYPE (NB_MMU_MODES + 1)
-#define MEMSUFFIX _code
-
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#endif
-
-#if defined(CONFIG_USER_ONLY)
-static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
-{
- return addr;
-}
-#else
-/* cputlb.c */
-tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
-#endif
-
-typedef void (CPUDebugExcpHandler)(CPUArchState *env);
-
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
-
-/* vl.c */
-extern int singlestep;
-
-/* cpu-exec.c */
-extern volatile sig_atomic_t exit_request;
-
-/* Deterministic execution requires that IO only be performed on the last
- instruction of a TB so that interrupts take effect immediately. */
-static inline int can_do_io(CPUArchState *env)
-{
- if (!use_icount) {
- return 1;
- }
- /* If not executing code then assume we are ok. */
- if (!env->current_tb) {
- return 1;
- }
- return env->can_do_io != 0;
-}
-
-#endif
+++ /dev/null
-/*
- * Internal memory management interfaces
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef EXEC_MEMORY_H
-#define EXEC_MEMORY_H
-
-/*
- * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
- * you're one of them.
- */
-
-#include "memory.h"
-
-#ifndef CONFIG_USER_ONLY
-
-/* Get the root memory region. This interface should only be used temporarily
- * until a proper bus interface is available.
- */
-MemoryRegion *get_system_memory(void);
-
-/* Get the root I/O port region. This interface should only be used
- * temporarily until a proper bus interface is available.
- */
-MemoryRegion *get_system_io(void);
-
-extern AddressSpace address_space_memory;
-extern AddressSpace address_space_io;
-
-#endif
-
-#endif
#include "hw/xen.h"
#include "qemu-timer.h"
#include "qemu-config.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "dma.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#else /* !CONFIG_USER_ONLY */
#include "trace.h"
#endif
-#include "cputlb.h"
+#include "exec/cputlb.h"
#include "translate-all.h"
-#include "memory-internal.h"
+#include "exec/memory-internal.h"
//#define DEBUG_UNASSIGNED
//#define DEBUG_SUBPAGE
#include "monitor.h"
#include "qemu-char.h"
#include "sysemu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#endif
#define MAX_PACKET_LENGTH 4096
+++ /dev/null
-#ifndef GDBSTUB_H
-#define GDBSTUB_H
-
-#define DEFAULT_GDBSTUB_PORT "1234"
-
-/* GDB breakpoint/watchpoint types */
-#define GDB_BREAKPOINT_SW 0
-#define GDB_BREAKPOINT_HW 1
-#define GDB_WATCHPOINT_WRITE 2
-#define GDB_WATCHPOINT_READ 3
-#define GDB_WATCHPOINT_ACCESS 4
-
-#ifdef NEED_CPU_H
-typedef void (*gdb_syscall_complete_cb)(CPUArchState *env,
- target_ulong ret, target_ulong err);
-
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
-int use_gdb_syscalls(void);
-void gdb_set_stop_cpu(CPUArchState *env);
-void gdb_exit(CPUArchState *, int);
-#ifdef CONFIG_USER_ONLY
-int gdb_queuesig (void);
-int gdb_handlesig (CPUArchState *, int);
-void gdb_signalled(CPUArchState *, int);
-void gdbserver_fork(CPUArchState *);
-#endif
-/* Get or set a register. Returns the size of the register. */
-typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
-void gdb_register_coprocessor(CPUArchState *env,
- gdb_reg_cb get_reg, gdb_reg_cb set_reg,
- int num_regs, const char *xml, int g_pos);
-
-static inline int cpu_index(CPUArchState *env)
-{
-#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
- return env->host_tid;
-#else
- return env->cpu_index + 1;
-#endif
-}
-
-#endif
-
-#ifdef CONFIG_USER_ONLY
-int gdbserver_start(int);
-#else
-int gdbserver_start(const char *port);
-#endif
-
-/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
-extern const char *const xml_builtin[][2];
-
-#endif
+++ /dev/null
-#ifndef GEN_ICOUNT_H
-#define GEN_ICOUNT_H 1
-
-#include "qemu-timer.h"
-
-/* Helpers for instruction counting code generation. */
-
-static TCGArg *icount_arg;
-static int icount_label;
-
-static inline void gen_icount_start(void)
-{
- TCGv_i32 count;
-
- if (!use_icount)
- return;
-
- icount_label = gen_new_label();
- count = tcg_temp_local_new_i32();
- tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
- /* This is a horrid hack to allow fixing up the value later. */
- icount_arg = tcg_ctx.gen_opparam_ptr + 1;
- tcg_gen_subi_i32(count, count, 0xdeadbeef);
-
- tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
- tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
- tcg_temp_free_i32(count);
-}
-
-static void gen_icount_end(TranslationBlock *tb, int num_insns)
-{
- if (use_icount) {
- *icount_arg = num_insns;
- gen_set_label(icount_label);
- tcg_gen_exit_tb((tcg_target_long)tb + 2);
- }
-}
-
-static inline void gen_io_start(void)
-{
- TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
- tcg_temp_free_i32(tmp);
-}
-
-static inline void gen_io_end(void)
-{
- TCGv_i32 tmp = tcg_const_i32(0);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
- tcg_temp_free_i32(tmp);
-}
-
-#endif
#include "sysemu.h"
#include "acpi.h"
#include "kvm.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ich9.h"
#include "acpi.h"
#include "sysemu.h"
#include "range.h"
-#include "ioport.h"
+#include "exec/ioport.h"
#include "fw_cfg.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG
*/
#include "cpu.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
#include "hw.h"
#include "devices.h"
#include "sysemu.h"
#include "alpha_sys.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define TYPE_TYPHOON_PCI_HOST_BRIDGE "typhoon-pcihost"
#include "boards.h"
#include "loader.h"
#include "elf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define KERNEL_LOAD_ADDR 0x10000
#define AN5206_MBAR_ADDR 0x10000000
#include "pci/pci_bus.h"
#include "apb_pci.h"
#include "sysemu.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* debug APB */
//#define DEBUG_APB
#ifndef QEMU_APIC_INTERNAL_H
#define QEMU_APIC_INTERNAL_H
-#include "memory.h"
+#include "exec/memory.h"
#include "sysbus.h"
#include "qemu-timer.h"
#include <stdint.h>
#include "qemu-common.h"
#include "hw.h"
-#include "memory.h"
+#include "exec/memory.h"
typedef void (*apm_ctrl_changed_t)(uint32_t val, void *arg);
#ifndef ARM_MISC_H
#define ARM_MISC_H 1
-#include "memory.h"
+#include "exec/memory.h"
#include "hw/irq.h"
/* The CPU is also modeled as an interrupt controller. */
#include "sysbus.h"
#include "qemu-timer.h"
#include "arm-misc.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "arm_gic_internal.h"
typedef struct {
#include "elf.h"
#include "cris-boot.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define D(x)
#define DNAND(x)
#include "mips.h"
#include "pci/pci_host.h"
#include "sysemu.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_BONITO
#include "arm-misc.h"
#include "flash.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static struct arm_boot_info collie_binfo = {
.loader_start = SA_SDCS0,
#include "boards.h"
#include "loader.h"
#include "elf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define KERNEL_LOAD_ADDR 0x10000
#include <stdio.h>
#include <sys/time.h>
#include "hw.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "qemu-common.h"
#include "sysemu.h"
#define EXYNOS4210_H_
#include "qemu-common.h"
-#include "memory.h"
+#include "exec/memory.h"
#define EXYNOS4210_NCPUS 2
*/
#include "qemu-common.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "sysbus.h"
#include "ui/console.h"
#include "ui/pixel_ops.h"
#include "sysbus.h"
#include "net/net.h"
#include "arm-misc.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "exynos4210.h"
#include "boards.h"
/* NOR flash devices */
-#include "memory.h"
+#include "exec/memory.h"
typedef struct pflash_t pflash_t;
#ifndef QEMU_FRAMEBUFFER_H
#define QEMU_FRAMEBUFFER_H
-#include "memory.h"
+#include "exec/memory.h"
/* Framebuffer device helper routines. */
#include "pci/pci.h"
#include "pci/pci_host.h"
#include "pc.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG
#include "devices.h"
#include "boards.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static const int sector_len = 128 * 1024;
#include "boards.h"
#include "sysbus.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define SMP_BOOT_ADDR 0x100
#define SMP_BOOT_REG 0x40
#include "qemu-common.h"
#if !defined(CONFIG_USER_ONLY) && !defined(NEED_CPU_H)
-#include "cpu-common.h"
+#include "exec/cpu-common.h"
#endif
-#include "ioport.h"
+#include "exec/ioport.h"
#include "irq.h"
#include "block/aio.h"
#include "qemu-file.h"
#include "isa.h"
#include "pci/pci.h"
-#include "memory.h"
+#include "exec/memory.h"
#define MAX_IDE_DEVS 2
#include "monitor.h"
#include "dma.h"
-#include "cpu-common.h"
+#include "exec/cpu-common.h"
#include "internal.h"
#include <hw/ide/pci.h>
#include <hw/ide/ahci.h>
*/
#include <hw/ide.h>
#include <hw/isa.h>
-#include "iorange.h"
+#include "exec/iorange.h"
#include "dma.h"
#include "sysemu.h"
#include "hw/block-common.h"
#include "boards.h"
#include "arm-misc.h"
#include "net/net.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysemu.h"
typedef struct {
#define QEMU_IOAPIC_INTERNAL_H
#include "hw.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "sysbus.h"
#define MAX_IOAPICS 1
#include "sysbus.h"
#include "sysemu.h"
#include "isa.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static ISABus *isabus;
hwaddr isa_mem_base = 0;
/* ISA bus */
-#include "ioport.h"
-#include "memory.h"
+#include "exec/ioport.h"
+#include "exec/memory.h"
#include "qdev.h"
#define ISA_NUM_IRQS 16
#include "hw.h"
#include "isa.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static void isa_mmio_writeb (void *opaque, hwaddr addr,
uint32_t val)
*/
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "hw.h"
#include "arm-misc.h"
#include "devices.h"
#include "loader.h"
#include "elf.h"
#include "trace.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "grlib.h"
#include "elf.h"
#include "lm32_hwsetup.h"
#include "lm32.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct {
LM32CPU *cpu;
#include "uboot_image.h"
#include "loader.h"
#include "fw_cfg.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include <zlib.h>
#include "acpi_ich9.h"
#include "pam.h"
#include "pci/pci_bus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysemu.h"
static int ich9_lpc_sci_irq(ICH9LPCState *lpc);
#include "sysemu.h"
#include "sysbus.h"
#include "isa.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_NVRAM
#ifndef HW_MAC_DBDMA_H
#define HW_MAC_DBDMA_H 1
-#include "memory.h"
+#include "exec/memory.h"
typedef struct DBDMA_io DBDMA_io;
#include "flash.h"
#include "blockdev.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* Device addresses */
#define MST_FPGA_PHYS 0x08000000
#include "qemu-timer.h"
#include "ptimer.h"
#include "sysemu.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* General purpose timer module. */
typedef struct {
#include "boards.h"
#include "loader.h"
#include "elf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define SYS_FREQ 66000000
#include "mcf.h"
/* For crc32 */
#include <zlib.h>
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_FEC 1
*/
#include "hw.h"
#include "mcf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct {
MemoryRegion iomem;
#include "hw.h"
#include "mcf.h"
#include "qemu-char.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct {
MemoryRegion iomem;
#include "blockdev.h"
#include "milkymist-hw.h"
#include "lm32.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define BIOS_FILENAME "mmone-bios.bin"
#define BIOS_OFFSET 0x00860000
#define HW_MIPS_H
/* Definitions for mips board emulation. */
-#include "memory.h"
+#include "exec/memory.h"
/* gt64xxx.c */
PCIBus *gt64120_register(qemu_irq *pic);
#include "mc146818rtc.h"
#include "i8254.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define DEBUG_FULONG2E_INIT
#include "pcspk.h"
#include "blockdev.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
enum jazz_model_e
{
#include "mc146818rtc.h"
#include "i8254.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysbus.h" /* SysBusDevice */
//#define DEBUG_BOARD_INIT
#include "loader.h"
#include "elf.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static struct _loaderparams {
int ram_size;
#include "mc146818rtc.h"
#include "i8254.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define MAX_IDE_BUS 2
#include "ui/console.h"
#include "i2c.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ui/pixel_ops.h"
#define MP_MISC_BASE 0x80002000
#include "qdev.h"
#include "net/net.h"
#include "ne2000.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct ISANE2000State {
ISADevice dev;
#include "loader.h"
#include "blockdev.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* Nokia N8x0 support */
struct n800_s {
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef hw_omap_h
-#include "memory.h"
+#include "exec/memory.h"
# define hw_omap_h "omap.h"
#include "hw/irq.h"
#include "hw.h"
#include "flash.h"
#include "omap.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
/* General-Purpose Memory Controller */
struct omap_gpmc_s {
#include "arm-misc.h"
#include "flash.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/*****************************************************************************/
/* Siemens SX1 Cellphone V1 */
#include "hw.h"
#include "omap.h"
#include "serial.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* UARTs */
struct omap_uart_s {
#include "flash.h"
#include "irq.h"
#include "blockdev.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include "sysbus.h"
#include "qemu-error.h"
#include "serial.h"
#include "net/net.h"
#include "loader.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysemu.h"
#include "sysbus.h"
#include "qtest.h"
#include "arm-misc.h"
#include "devices.h"
#include "loader.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static uint32_t static_readb(void *opaque, hwaddr offset)
{
*/
#include "qemu-common.h"
-#include "memory.h"
+#include "exec/memory.h"
#define SMRAM_C_BASE 0xa0000
#define SMRAM_C_END 0xc0000
#include "blockdev.h"
#include "hw/block-common.h"
#include "ui/qemu-spice.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include "arch_init.h"
#include "bitmap.h"
#define HW_PC_H
#include "qemu-common.h"
-#include "memory.h"
-#include "ioport.h"
+#include "exec/memory.h"
+#include "exec/ioport.h"
#include "isa.h"
#include "fdc.h"
#include "net/net.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "ioapic.h"
/* PC-style peripherals (also used by other machines). */
#include "blockdev.h"
#include "smbus.h"
#include "xen.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include "cpu.h"
#ifdef CONFIG_XEN
# include <xen/hvm/hvm_info_table.h>
#include "kvm.h"
#include "kvm/clock.h"
#include "q35.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ich9.h"
#include "hw/ide/pci.h"
#include "hw/ide/ahci.h"
#include "qmp-commands.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_PCI
#ifdef DEBUG_PCI
#include "qemu-common.h"
#include "hw/qdev.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "dma.h"
/* PCI includes legacy ISA access. */
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "hw/pci/pcie_host.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/*
* PCI express mmcfig address
#define PCIE_HOST_H
#include "hw/pci/pci_host.h"
-#include "memory.h"
+#include "exec/memory.h"
#define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge"
#define PCIE_HOST_BRIDGE(obj) \
#define SHPC_H
#include "qemu-common.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "vmstate.h"
struct SHPCDevice {
#include "pci/msi.h"
#include "pci/shpc.h"
#include "pci/slotid_cap.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "pci/pci_bus.h"
#define REDHAT_PCI_VENDOR_ID 0x1b36
#define PCNET_LOOPTEST_CRC 1
#define PCNET_LOOPTEST_NOCRC 2
-#include "memory.h"
+#include "exec/memory.h"
/* BUS CONFIGURATION REGISTERS */
#define BCR_MSRDA 0
#include "xilinx.h"
#include "blockdev.h"
#include "serial.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ssi.h"
#include "microblaze_boot.h"
#include "boards.h"
#include "xilinx.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "microblaze_boot.h"
#include "microblaze_pic_cpu.h"
#include "flash.h"
#include "block/block.h"
#include "qemu-timer.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "host-utils.h"
#include "sysbus.h"
#include "flash.h"
#include "qemu-timer.h"
#include "block/block.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "host-utils.h"
#include "sysbus.h"
#include "hw/loader.h"
#include "elf.h"
#include "hw/sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "host-utils.h"
#include "hw/ppce500_pci.h"
#include "qemu-log.h"
#include "loader.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define BIOS_FILENAME "ppc405_rom.bin"
#define BIOS_SIZE (2048 * 1024)
#include "qemu-timer.h"
#include "sysemu.h"
#include "qemu-log.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define DEBUG_OPBA
#define DEBUG_SDRAM
#include "device_tree.h"
#include "loader.h"
#include "elf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "serial.h"
#include "ppc.h"
#include "ppc405.h"
#include "ppc.h"
#include "ppc4xx.h"
#include "qemu-log.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_MMIO
//#define DEBUG_UNASSIGNED
#include "ppc4xx.h"
#include "pci/pci.h"
#include "pci/pci_host.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#undef DEBUG
#ifdef DEBUG
#if !defined(__PPC_MAC_H__)
#define __PPC_MAC_H__
-#include "memory.h"
+#include "exec/memory.h"
/* SMP is not enabled, for now */
#define MAX_CPUS 1
#include "kvm_ppc.h"
#include "hw/usb.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysbus.h"
#define MAX_IDE_BUS 2
#include "kvm.h"
#include "kvm_ppc.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define MAX_IDE_BUS 2
#define CFG_ADDR 0xf0000510
#include "mc146818rtc.h"
#include "blockdev.h"
#include "arch_init.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define HARD_DEBUG_PPC_IO
//#define DEBUG_PPC_IO
}
};
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static int e500_pcihost_bridge_initfn(PCIDevice *d)
{
#include "pci/pci.h"
#include "pci/pci_host.h"
#include "pc.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define TYPE_RAVEN_PCI_HOST_BRIDGE "raven-pcihost"
#include "qemu-common.h"
#include "ui/console.h"
#include "elf.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysbus.h"
#include "boards.h"
#include "loader.h"
#ifndef PXA_H
# define PXA_H "pxa.h"
-#include "memory.h"
+#include "exec/memory.h"
/* Interrupt numbers */
# define PXA2XX_PIC_SSP3 0
#include "qdev.h"
#include "qdev-addr.h"
-#include "hwaddr.h"
+#include "exec/hwaddr.h"
#include "qapi/visitor.h"
/* --- target physical address --- */
#include "usb.h"
#include "flash.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define FLASH_BASE 0x00000000
#define FLASH_SIZE 0x02000000
#include "boards.h"
#include "i2c.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define SMP_BOOT_ADDR 0xe0000000
#define SMP_BOOTREG_ADDR 0x10000030
#include "hw/virtio.h"
#include "hw/sysbus.h"
#include "kvm.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "hw/s390-virtio-bus.h"
#include "hw/s390x/sclp.h"
#include "cpu.h"
#include "kvm.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "sclp.h"
#include "serial.h"
#include "qemu-char.h"
#include "qemu-timer.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_SERIAL
#include "hw.h"
#include "sysemu.h"
-#include "memory.h"
+#include "exec/memory.h"
#define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */
#include "sh7750_regnames.h"
#include "sh_intc.h"
#include "cpu.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define NB_DEVICES 4
#include "qemu-common.h"
#include "irq.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef unsigned char intc_enum;
#include "pci/pci.h"
#include "pci/pci_host.h"
#include "bswap.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct SHPCIState {
SysBusDevice busdev;
#include "hw.h"
#include "sh.h"
#include "qemu-char.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_SERIAL
#include "hw.h"
#include "sh.h"
#include "qemu-timer.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ptimer.h"
//#define DEBUG_TIMER
#include "sysemu.h"
#include "boards.h"
#include "loader.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define BIOS_FILENAME "shix_bios.bin"
#define BIOS_ADDRESS 0xA0000000
#define HW_SOC_DMA_H 1
-#include "memory.h"
+#include "exec/memory.h"
#include "hw/irq.h"
struct soc_dma_s;
#include "kvm_ppc.h"
#include "pci/pci.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "hw/usb.h"
#include "qemu-config.h"
#include "qdev.h"
#include "kvm_ppc.h"
#include "dma.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "hw/spapr.h"
#include "pci/pci_host.h"
#include "hw/spapr.h"
#include "hw/spapr_pci.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include <libfdt.h>
#include "trace.h"
#include "boards.h"
#include "blockdev.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#undef REG_FMT
#define REG_FMT "0x%02lx"
#include "i2c.h"
#include "net/net.h"
#include "boards.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define GPIO_A 0
#define GPIO_B 1
#ifndef _STRONGARM_H
#define _STRONGARM_H
-#include "memory.h"
+#include "exec/memory.h"
#define SA_CS0 0x00000000
#define SA_CS1 0x08000000
#include "loader.h"
#include "elf.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
//#define DEBUG_IRQ
//#define DEBUG_EBUS
#include "sysbus.h"
#include "monitor.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent);
static char *sysbus_get_fw_dev_path(DeviceState *dev);
/* Devices attached directly to the main system bus. */
#include "qdev.h"
-#include "memory.h"
+#include "exec/memory.h"
#define QDEV_MAX_MMIO 32
#define QDEV_MAX_PIO 32
#include "ssi.h"
#include "blockdev.h"
#include "sysbus.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define TOSA_RAM 0x04000000
#define TOSA_ROM 0x00800000
* THE SOFTWARE.
*/
#include "qemu-common.h"
-#include "cpu-common.h"
+#include "exec/cpu-common.h"
#include "hw/usb.h"
#include "dma.h"
#include "sysbus.h"
#include "pci/pci.h"
#include "pci/pci_host.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef struct {
SysBusDevice busdev;
#include "i2c.h"
#include "boards.h"
#include "blockdev.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "flash.h"
#define VERSATILE_FLASH_ADDR 0x34000000
#include "net/net.h"
#include "sysemu.h"
#include "boards.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "blockdev.h"
#include "flash.h"
#include "config.h"
#include "event_notifier.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "kvm.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "pci/msi.h"
#include "pci/msix.h"
#include "pci/pci.h"
#include <hw/hw.h>
#include "qapi/error.h"
-#include "memory.h"
+#include "exec/memory.h"
#define ST01_V_RETRACE 0x08
#define ST01_DISP_ENABLE 0x01
#include "hw/hw.h"
#include "range.h"
#include <linux/vhost.h>
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
static void vhost_dev_sync_region(struct vhost_dev *dev,
MemoryRegionSection *section,
#include "hw/hw.h"
#include "hw/virtio.h"
-#include "memory.h"
+#include "exec/memory.h"
/* Generic structures common for any vhost based device. */
struct vhost_virtqueue {
#include "loader.h"
#include "elf.h"
#include "qemu-log.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "ppc.h"
#include "ppc4xx.h"
#include "balloon.h"
#include "virtio-balloon.h"
#include "kvm.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#if defined(__linux__)
#include <sys/mman.h>
#include "pm_smbus.h"
#include "sysemu.h"
#include "qemu-timer.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
typedef uint32_t pci_addr_t;
#include "pci/pci_host.h"
#include "xen_common.h"
#include "xen_backend.h"
#include "trace.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include <xenguest.h>
#include "xen_backend.h"
#include "xen_pt.h"
#include "range.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#define XEN_PT_NR_IRQS (256)
static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
#include "sysbus.h"
#include "arm-misc.h"
#include "net/net.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include "sysemu.h"
#include "boards.h"
#include "flash.h"
#include "boards.h"
#include "loader.h"
#include "elf.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include "serial.h"
#include "net/net.h"
#include "sysbus.h"
#include "boards.h"
#include "loader.h"
#include "elf.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
static uint64_t translate_phys_addr(void *env, uint64_t addr)
{
#include "blockdev.h"
#include "ui/console.h"
#include "audio/audio.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#ifdef DEBUG_Z2
#define DPRINTF(fmt, ...) \
+++ /dev/null
-/* Define hwaddr if it exists. */
-
-#ifndef HWADDR_H
-#define HWADDR_H
-
-#ifndef CONFIG_USER_ONLY
-
-#define HWADDR_BITS 64
-/* hwaddr is the type of a physical address (its size can
- be different from 'target_ulong'). */
-
-typedef uint64_t hwaddr;
-#define HWADDR_MAX UINT64_MAX
-#define TARGET_FMT_plx "%016" PRIx64
-#define HWADDR_PRId PRId64
-#define HWADDR_PRIi PRIi64
-#define HWADDR_PRIo PRIo64
-#define HWADDR_PRIu PRIu64
-#define HWADDR_PRIx PRIx64
-#define HWADDR_PRIX PRIX64
-
-#endif
-
-#endif
--- /dev/null
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_MEMORY_H
+#define EXEC_MEMORY_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region. This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region. This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * defines common to all virtual CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_ALL_H
+#define CPU_ALL_H
+
+#include "qemu-common.h"
+#include "qemu-tls.h"
+#include "exec/cpu-common.h"
+
+/* some important defines:
+ *
+ * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
+ * memory accesses.
+ *
+ * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * otherwise little endian.
+ *
+ * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
+ *
+ * TARGET_WORDS_BIGENDIAN : same for target cpu
+ */
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#define BSWAP_NEEDED
+#endif
+
+#ifdef BSWAP_NEEDED
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return bswap16(s);
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return bswap32(s);
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return bswap64(s);
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ *s = bswap16(*s);
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ *s = bswap32(*s);
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ *s = bswap64(*s);
+}
+
+#else
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return s;
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return s;
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return s;
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+}
+
+#endif
+
+#if TARGET_LONG_SIZE == 4
+#define tswapl(s) tswap32(s)
+#define tswapls(s) tswap32s((uint32_t *)(s))
+#define bswaptls(s) bswap32s(s)
+#else
+#define tswapl(s) tswap64(s)
+#define tswapls(s) tswap64s((uint64_t *)(s))
+#define bswaptls(s) bswap64s(s)
+#endif
+
+/* CPU memory access without any memory or io remapping */
+
+/*
+ * the generic syntax for the memory accesses is:
+ *
+ * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
+ *
+ * store: st{type}{size}{endian}_{access_type}(ptr, val)
+ *
+ * type is:
+ * (empty): integer access
+ * f : float access
+ *
+ * sign is:
+ * (empty): for floats or 32 bit size
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * endian is:
+ * (empty): target cpu endianness or 8 bit access
+ * r : reversed target cpu endianness (not implemented yet)
+ * be : big endian (not implemented yet)
+ * le : little endian (not implemented yet)
+ *
+ * access_type is:
+ * raw : host memory access
+ * user : user mode access using soft MMU
+ * kernel : kernel mode access using soft MMU
+ */
+
+/* target-endianness CPU memory access functions */
+#if defined(TARGET_WORDS_BIGENDIAN)
+#define lduw_p(p) lduw_be_p(p)
+#define ldsw_p(p) ldsw_be_p(p)
+#define ldl_p(p) ldl_be_p(p)
+#define ldq_p(p) ldq_be_p(p)
+#define ldfl_p(p) ldfl_be_p(p)
+#define ldfq_p(p) ldfq_be_p(p)
+#define stw_p(p, v) stw_be_p(p, v)
+#define stl_p(p, v) stl_be_p(p, v)
+#define stq_p(p, v) stq_be_p(p, v)
+#define stfl_p(p, v) stfl_be_p(p, v)
+#define stfq_p(p, v) stfq_be_p(p, v)
+#else
+#define lduw_p(p) lduw_le_p(p)
+#define ldsw_p(p) ldsw_le_p(p)
+#define ldl_p(p) ldl_le_p(p)
+#define ldq_p(p) ldq_le_p(p)
+#define ldfl_p(p) ldfl_le_p(p)
+#define ldfq_p(p) ldfq_le_p(p)
+#define stw_p(p, v) stw_le_p(p, v)
+#define stl_p(p, v) stl_le_p(p, v)
+#define stq_p(p, v) stq_le_p(p, v)
+#define stfl_p(p, v) stfl_le_p(p, v)
+#define stfq_p(p, v) stfq_le_p(p, v)
+#endif
+
+/* MMU memory access macros */
+
+#if defined(CONFIG_USER_ONLY)
+#include <assert.h>
+#include "exec/user/abitypes.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+#if defined(CONFIG_USE_GUEST_BASE)
+extern unsigned long guest_base;
+extern int have_guest_base;
+extern unsigned long reserved_va;
+#define GUEST_BASE guest_base
+#define RESERVED_VA reserved_va
+#else
+#define GUEST_BASE 0ul
+#define RESERVED_VA 0ul
+#endif
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+
+#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
+#define h2g_valid(x) 1
+#else
+#define h2g_valid(x) ({ \
+ unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
+ (!RESERVED_VA || (__guest < RESERVED_VA)); \
+})
+#endif
+
+#define h2g(x) ({ \
+ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ (abi_ulong)__ret; \
+})
+
+#define saddr(x) g2h(x)
+#define laddr(x) g2h(x)
+
+#else /* !CONFIG_USER_ONLY */
+/* NOTE: we use double casts if pointers and target_ulong have
+ different sizes */
+#define saddr(x) (uint8_t *)(intptr_t)(x)
+#define laddr(x) (uint8_t *)(intptr_t)(x)
+#endif
+
+#define ldub_raw(p) ldub_p(laddr((p)))
+#define ldsb_raw(p) ldsb_p(laddr((p)))
+#define lduw_raw(p) lduw_p(laddr((p)))
+#define ldsw_raw(p) ldsw_p(laddr((p)))
+#define ldl_raw(p) ldl_p(laddr((p)))
+#define ldq_raw(p) ldq_p(laddr((p)))
+#define ldfl_raw(p) ldfl_p(laddr((p)))
+#define ldfq_raw(p) ldfq_p(laddr((p)))
+#define stb_raw(p, v) stb_p(saddr((p)), v)
+#define stw_raw(p, v) stw_p(saddr((p)), v)
+#define stl_raw(p, v) stl_p(saddr((p)), v)
+#define stq_raw(p, v) stq_p(saddr((p)), v)
+#define stfl_raw(p, v) stfl_p(saddr((p)), v)
+#define stfq_raw(p, v) stfq_p(saddr((p)), v)
+
+
+#if defined(CONFIG_USER_ONLY)
+
+/* if user mode, no other memory access functions */
+#define ldub(p) ldub_raw(p)
+#define ldsb(p) ldsb_raw(p)
+#define lduw(p) lduw_raw(p)
+#define ldsw(p) ldsw_raw(p)
+#define ldl(p) ldl_raw(p)
+#define ldq(p) ldq_raw(p)
+#define ldfl(p) ldfl_raw(p)
+#define ldfq(p) ldfq_raw(p)
+#define stb(p, v) stb_raw(p, v)
+#define stw(p, v) stw_raw(p, v)
+#define stl(p, v) stl_raw(p, v)
+#define stq(p, v) stq_raw(p, v)
+#define stfl(p, v) stfl_raw(p, v)
+#define stfq(p, v) stfq_raw(p, v)
+
+#define cpu_ldub_code(env1, p) ldub_raw(p)
+#define cpu_ldsb_code(env1, p) ldsb_raw(p)
+#define cpu_lduw_code(env1, p) lduw_raw(p)
+#define cpu_ldsw_code(env1, p) ldsw_raw(p)
+#define cpu_ldl_code(env1, p) ldl_raw(p)
+#define cpu_ldq_code(env1, p) ldq_raw(p)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+#define cpu_ldq_data(env, addr) ldq_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
+
+#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
+#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
+#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
+#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
+#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
+
+#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
+
+#define ldub_kernel(p) ldub_raw(p)
+#define ldsb_kernel(p) ldsb_raw(p)
+#define lduw_kernel(p) lduw_raw(p)
+#define ldsw_kernel(p) ldsw_raw(p)
+#define ldl_kernel(p) ldl_raw(p)
+#define ldq_kernel(p) ldq_raw(p)
+#define ldfl_kernel(p) ldfl_raw(p)
+#define ldfq_kernel(p) ldfq_raw(p)
+#define stb_kernel(p, v) stb_raw(p, v)
+#define stw_kernel(p, v) stw_raw(p, v)
+#define stl_kernel(p, v) stl_raw(p, v)
+#define stq_kernel(p, v) stq_raw(p, v)
+#define stfl_kernel(p, v) stfl_raw(p, v)
+#define stfq_kernel(p, vt) stfq_raw(p, v)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#endif /* defined(CONFIG_USER_ONLY) */
+
+/* page related stuff */
+
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
+#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+
+/* ??? These should be the larger of uintptr_t and target_ulong. */
+extern uintptr_t qemu_real_host_page_size;
+extern uintptr_t qemu_host_page_size;
+extern uintptr_t qemu_host_page_mask;
+
+#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/* original state of the write flag (used when tracking self-modifying
+ code */
+#define PAGE_WRITE_ORG 0x0010
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
+/* FIXME: Code that sets/uses this is broken and needs to go away. */
+#define PAGE_RESERVED 0x0020
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+void page_dump(FILE *f);
+
+typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
+ abi_ulong, unsigned long);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
+#endif
+
+CPUArchState *cpu_copy(CPUArchState *env);
+CPUArchState *qemu_get_cpu(int cpu);
+
+#define CPU_DUMP_CODE 0x00010000
+#define CPU_DUMP_FPU 0x00020000 /* dump FPU register state, not just integer */
+/* dump info about TCG QEMU's condition code optimization state */
+#define CPU_DUMP_CCOP 0x00040000
+
+void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+
+void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
+ GCC_FMT_ATTR(2, 3);
+extern CPUArchState *first_cpu;
+DECLARE_TLS(CPUArchState *,cpu_single_env);
+#define cpu_single_env tls_var(cpu_single_env)
+
+/* Flags for use in ENV->INTERRUPT_PENDING.
+
+ The numbers assigned here are non-sequential in order to preserve
+ binary compatibility with the vmstate dump. Bit 0 (0x0001) was
+ previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
+ the vmstate dump. */
+
+/* External hardware interrupt pending. This is typically used for
+ interrupts from devices. */
+#define CPU_INTERRUPT_HARD 0x0002
+
+/* Exit the current TB. This is typically used when some system-level device
+ makes some change to the memory mapping. E.g. the a20 line change. */
+#define CPU_INTERRUPT_EXITTB 0x0004
+
+/* Halt the CPU. */
+#define CPU_INTERRUPT_HALT 0x0020
+
+/* Debug event pending. */
+#define CPU_INTERRUPT_DEBUG 0x0080
+
+/* Several target-specific external hardware interrupts. Each target/cpu.h
+ should define proper names based on these defines. */
+#define CPU_INTERRUPT_TGT_EXT_0 0x0008
+#define CPU_INTERRUPT_TGT_EXT_1 0x0010
+#define CPU_INTERRUPT_TGT_EXT_2 0x0040
+#define CPU_INTERRUPT_TGT_EXT_3 0x0200
+#define CPU_INTERRUPT_TGT_EXT_4 0x1000
+
+/* Several target-specific internal interrupts. These differ from the
+ preceding target-specific interrupts in that they are intended to
+ originate from within the cpu itself, typically in response to some
+ instruction being executed. These, therefore, are not masked while
+ single-stepping within the debugger. */
+#define CPU_INTERRUPT_TGT_INT_0 0x0100
+#define CPU_INTERRUPT_TGT_INT_1 0x0400
+#define CPU_INTERRUPT_TGT_INT_2 0x0800
+#define CPU_INTERRUPT_TGT_INT_3 0x2000
+
+/* First unused bit: 0x4000. */
+
+/* The set of all bits that should be masked when single-stepping. */
+#define CPU_INTERRUPT_SSTEP_MASK \
+ (CPU_INTERRUPT_HARD \
+ | CPU_INTERRUPT_TGT_EXT_0 \
+ | CPU_INTERRUPT_TGT_EXT_1 \
+ | CPU_INTERRUPT_TGT_EXT_2 \
+ | CPU_INTERRUPT_TGT_EXT_3 \
+ | CPU_INTERRUPT_TGT_EXT_4)
+
+#ifndef CONFIG_USER_ONLY
+typedef void (*CPUInterruptHandler)(CPUArchState *, int);
+
+extern CPUInterruptHandler cpu_interrupt_handler;
+
+static inline void cpu_interrupt(CPUArchState *s, int mask)
+{
+ cpu_interrupt_handler(s, mask);
+}
+#else /* USER_ONLY */
+void cpu_interrupt(CPUArchState *env, int mask);
+#endif /* USER_ONLY */
+
+void cpu_reset_interrupt(CPUArchState *env, int mask);
+
+void cpu_exit(CPUArchState *s);
+
+/* Breakpoint/watchpoint flags */
+#define BP_MEM_READ 0x01
+#define BP_MEM_WRITE 0x02
+#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
+#define BP_STOP_BEFORE_ACCESS 0x04
+#define BP_WATCHPOINT_HIT 0x08
+#define BP_GDB 0x10
+#define BP_CPU 0x20
+
+int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint);
+int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
+int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
+ target_ulong len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
+
+#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
+#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
+#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
+
+void cpu_single_step(CPUArchState *env, int enabled);
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Return the physical page corresponding to a virtual one. Use it
+ only for debugging because no protection checks are done. Return -1
+ if no page found. */
+hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
+
+/* memory API */
+
+extern int phys_ram_fd;
+extern ram_addr_t ram_size;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC_MASK (1 << 0)
+
+typedef struct RAMBlock {
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t length;
+ uint32_t flags;
+ char idstr[256];
+ QLIST_ENTRY(RAMBlock) next;
+#if defined(__linux__) && !defined(TARGET_S390X)
+ int fd;
+#endif
+} RAMBlock;
+
+typedef struct RAMList {
+ uint8_t *phys_dirty;
+ QLIST_HEAD(, RAMBlock) blocks;
+} RAMList;
+extern RAMList ram_list;
+
+extern const char *mem_path;
+extern int mem_prealloc;
+
+/* Flags stored in the low bits of the TLB virtual address. These are
+ defined so that fast path ram access is all zeros. */
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 3)
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << 4)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 5)
+
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
+ram_addr_t last_ram_offset(void);
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
+ uint8_t *buf, int len, int is_write);
+
+#endif /* CPU_ALL_H */
--- /dev/null
+#ifndef CPU_COMMON_H
+#define CPU_COMMON_H 1
+
+/* CPU interfaces that are target independent. */
+
+#include "exec/hwaddr.h"
+
+#ifndef NEED_CPU_H
+#include "exec/poison.h"
+#endif
+
+#include "bswap.h"
+#include "qemu-queue.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum device_endian {
+ DEVICE_NATIVE_ENDIAN,
+ DEVICE_BIG_ENDIAN,
+ DEVICE_LITTLE_ENDIAN,
+};
+
+/* address in the RAM (different from a physical address) */
+#if defined(CONFIG_XEN_BACKEND)
+typedef uint64_t ram_addr_t;
+# define RAM_ADDR_MAX UINT64_MAX
+# define RAM_ADDR_FMT "%" PRIx64
+#else
+typedef uintptr_t ram_addr_t;
+# define RAM_ADDR_MAX UINTPTR_MAX
+# define RAM_ADDR_FMT "%" PRIxPTR
+#endif
+
+/* memory API */
+
+typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
+
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+/* This should only be used for ram local to a device. */
+void *qemu_get_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
+/* This should not be used by devices. */
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
+void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
+
+void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
+ int len, int is_write);
+static inline void cpu_physical_memory_read(hwaddr addr,
+ void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, buf, len, 0);
+}
+static inline void cpu_physical_memory_write(hwaddr addr,
+ const void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+}
+void *cpu_physical_memory_map(hwaddr addr,
+ hwaddr *plen,
+ int is_write);
+void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+
+bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_flush_coalesced_mmio_buffer(void);
+
+uint32_t ldub_phys(hwaddr addr);
+uint32_t lduw_le_phys(hwaddr addr);
+uint32_t lduw_be_phys(hwaddr addr);
+uint32_t ldl_le_phys(hwaddr addr);
+uint32_t ldl_be_phys(hwaddr addr);
+uint64_t ldq_le_phys(hwaddr addr);
+uint64_t ldq_be_phys(hwaddr addr);
+void stb_phys(hwaddr addr, uint32_t val);
+void stw_le_phys(hwaddr addr, uint32_t val);
+void stw_be_phys(hwaddr addr, uint32_t val);
+void stl_le_phys(hwaddr addr, uint32_t val);
+void stl_be_phys(hwaddr addr, uint32_t val);
+void stq_le_phys(hwaddr addr, uint64_t val);
+void stq_be_phys(hwaddr addr, uint64_t val);
+
+#ifdef NEED_CPU_H
+uint32_t lduw_phys(hwaddr addr);
+uint32_t ldl_phys(hwaddr addr);
+uint64_t ldq_phys(hwaddr addr);
+void stl_phys_notdirty(hwaddr addr, uint32_t val);
+void stq_phys_notdirty(hwaddr addr, uint64_t val);
+void stw_phys(hwaddr addr, uint32_t val);
+void stl_phys(hwaddr addr, uint32_t val);
+void stq_phys(hwaddr addr, uint64_t val);
+#endif
+
+void cpu_physical_memory_write_rom(hwaddr addr,
+ const uint8_t *buf, int len);
+
+extern struct MemoryRegion io_mem_ram;
+extern struct MemoryRegion io_mem_rom;
+extern struct MemoryRegion io_mem_unassigned;
+extern struct MemoryRegion io_mem_notdirty;
+
+#endif
+
+#endif /* !CPU_COMMON_H */
--- /dev/null
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "config.h"
+#include <setjmp.h>
+#include <inttypes.h>
+#include <signal.h>
+#include "osdep.h"
+#include "qemu-queue.h"
+#include "exec/hwaddr.h"
+
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS must be defined before including this header
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
+/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+ addresses on the same page. The top bits are the same. This allows
+ TLB invalidation to quickly clear a subset of the hash table. */
+#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
+#if !defined(CONFIG_USER_ONLY)
+#define CPU_TLB_BITS 8
+#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ /* padding to get a power of two size */
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
+ sizeof(uintptr_t))];
+} CPUTLBEntry;
+
+extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
+
+#define CPU_COMMON_TLB \
+ /* The meaning of the MMU modes is defined in the target code. */ \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask;
+
+#else
+
+#define CPU_COMMON_TLB
+
+#endif
+
+
+#ifdef HOST_WORDS_BIGENDIAN
+typedef struct icount_decr_u16 {
+ uint16_t high;
+ uint16_t low;
+} icount_decr_u16;
+#else
+typedef struct icount_decr_u16 {
+ uint16_t low;
+ uint16_t high;
+} icount_decr_u16;
+#endif
+
+struct kvm_run;
+struct KVMState;
+struct qemu_work_item;
+
+typedef struct CPUBreakpoint {
+ target_ulong pc;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUBreakpoint) entry;
+} CPUBreakpoint;
+
+typedef struct CPUWatchpoint {
+ target_ulong vaddr;
+ target_ulong len_mask;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUWatchpoint) entry;
+} CPUWatchpoint;
+
+#define CPU_TEMP_BUF_NLONGS 128
+#define CPU_COMMON \
+ struct TranslationBlock *current_tb; /* currently executing TB */ \
+ /* soft mmu support */ \
+ /* in order to avoid passing too many arguments to the MMIO \
+ helpers, we store some rarely used information in the CPU \
+ context) */ \
+ uintptr_t mem_io_pc; /* host pc at which the memory was \
+ accessed */ \
+ target_ulong mem_io_vaddr; /* target virtual addr at which the \
+ memory was accessed */ \
+ uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
+ uint32_t interrupt_request; \
+ volatile sig_atomic_t exit_request; \
+ CPU_COMMON_TLB \
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
+ /* buffer for temporaries in the code generator */ \
+ long temp_buf[CPU_TEMP_BUF_NLONGS]; \
+ \
+ int64_t icount_extra; /* Instructions until next timer event. */ \
+ /* Number of cycles left, with interrupt flag in high bit. \
+ This allows a single read-compare-cbranch-write sequence to test \
+ for both decrementer underflow and exceptions. */ \
+ union { \
+ uint32_t u32; \
+ icount_decr_u16 u16; \
+ } icount_decr; \
+ uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
+ \
+ /* from this point: preserved by CPU reset */ \
+ /* ice debug support */ \
+ QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ int singlestep_enabled; \
+ \
+ QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ CPUWatchpoint *watchpoint_hit; \
+ \
+ struct GDBRegisterState *gdb_regs; \
+ \
+ /* Core interrupt code */ \
+ jmp_buf jmp_env; \
+ int exception_index; \
+ \
+ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
+ int cpu_index; /* CPU index (informative) */ \
+ uint32_t host_tid; /* host thread ID */ \
+ int numa_node; /* NUMA node this cpu is belonging to */ \
+ int nr_cores; /* number of cores within this CPU package */ \
+ int nr_threads;/* number of threads within this CPU */ \
+ int running; /* Nonzero if cpu is currently running(usermode). */ \
+ /* user data */ \
+ void *opaque; \
+ \
+ const char *cpu_model_str; \
+ struct KVMState *kvm_state; \
+ struct kvm_run *kvm_run; \
+ int kvm_fd; \
+ int kvm_vcpu_dirty;
+
+#endif
--- /dev/null
+/*
+ * Common CPU TLB handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPUTLB_H
+#define CPUTLB_H
+
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_protect_code(ram_addr_t ram_addr);
+void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
+ target_ulong vaddr);
+void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
+ uintptr_t length);
+MemoryRegionSection *phys_page_find(struct AddressSpaceDispatch *d,
+ hwaddr index);
+void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
+extern int tlb_flush_count;
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr);
+hwaddr memory_region_section_get_iotlb(CPUArchState *env,
+ MemoryRegionSection *section,
+ target_ulong vaddr,
+ hwaddr paddr,
+ int prot,
+ target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
+#endif
+#endif
--- /dev/null
+/* Helper file for declaring TCG helper functions.
+ Should be included at the start and end of target-foo/helper.h.
+
+ Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
+ functions. Names should be specified without the helper_ prefix, and
+ the return and argument types specified. 3 basic types are understood
+ (i32, i64 and ptr). Additional aliases are provided for convenience and
+ to match the types used by the C helper implementation.
+
+ The target helper.h should be included in all files that use/define
+ helper functions. THis will ensure that function prototypes are
+ consistent. In addition it should be included an extra two times for
+ helper.c, defining:
+ GEN_HELPER 1 to produce op generation functions (gen_helper_*)
+ GEN_HELPER 2 to do runtime registration helper functions.
+ */
+
+#ifndef DEF_HELPER_H
+#define DEF_HELPER_H 1
+
+#define HELPER(name) glue(helper_, name)
+
+#define GET_TCGV_i32 GET_TCGV_I32
+#define GET_TCGV_i64 GET_TCGV_I64
+#define GET_TCGV_ptr GET_TCGV_PTR
+
+/* Some types that make sense in C, but not for TCG. */
+#define dh_alias_i32 i32
+#define dh_alias_s32 i32
+#define dh_alias_int i32
+#define dh_alias_i64 i64
+#define dh_alias_s64 i64
+#define dh_alias_f32 i32
+#define dh_alias_f64 i64
+#if TARGET_LONG_BITS == 32
+#define dh_alias_tl i32
+#else
+#define dh_alias_tl i64
+#endif
+#define dh_alias_ptr ptr
+#define dh_alias_void void
+#define dh_alias_noreturn noreturn
+#define dh_alias_env ptr
+#define dh_alias(t) glue(dh_alias_, t)
+
+#define dh_ctype_i32 uint32_t
+#define dh_ctype_s32 int32_t
+#define dh_ctype_int int
+#define dh_ctype_i64 uint64_t
+#define dh_ctype_s64 int64_t
+#define dh_ctype_f32 float32
+#define dh_ctype_f64 float64
+#define dh_ctype_tl target_ulong
+#define dh_ctype_ptr void *
+#define dh_ctype_void void
+#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_env CPUArchState *
+#define dh_ctype(t) dh_ctype_##t
+
+/* We can't use glue() here because it falls foul of C preprocessor
+ recursive expansion rules. */
+#define dh_retvar_decl0_void void
+#define dh_retvar_decl0_noreturn void
+#define dh_retvar_decl0_i32 TCGv_i32 retval
+#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retvar_decl0_ptr TCGv_ptr retval
+#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
+
+#define dh_retvar_decl_void
+#define dh_retvar_decl_noreturn
+#define dh_retvar_decl_i32 TCGv_i32 retval,
+#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_ptr TCGv_ptr retval,
+#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
+
+#define dh_retvar_void TCG_CALL_DUMMY_ARG
+#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
+#define dh_retvar_i32 GET_TCGV_i32(retval)
+#define dh_retvar_i64 GET_TCGV_i64(retval)
+#define dh_retvar_ptr GET_TCGV_ptr(retval)
+#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
+
+#define dh_is_64bit_void 0
+#define dh_is_64bit_noreturn 0
+#define dh_is_64bit_i32 0
+#define dh_is_64bit_i64 1
+#define dh_is_64bit_ptr (TCG_TARGET_REG_BITS == 64)
+#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
+
+#define dh_is_signed_void 0
+#define dh_is_signed_noreturn 0
+#define dh_is_signed_i32 0
+#define dh_is_signed_s32 1
+#define dh_is_signed_i64 0
+#define dh_is_signed_s64 1
+#define dh_is_signed_f32 0
+#define dh_is_signed_f64 0
+#define dh_is_signed_tl 0
+#define dh_is_signed_int 1
+/* ??? This is highly specific to the host cpu. There are even special
+ extension instructions that may be required, e.g. ia64's addp4. But
+ for now we don't support any 64-bit targets with 32-bit pointers. */
+#define dh_is_signed_ptr 0
+#define dh_is_signed_env dh_is_signed_ptr
+#define dh_is_signed(t) dh_is_signed_##t
+
+#define dh_sizemask(t, n) \
+ sizemask |= dh_is_64bit(t) << (n*2); \
+ sizemask |= dh_is_signed(t) << (n*2+1)
+
+#define dh_arg(t, n) \
+ args[n - 1] = glue(GET_TCGV_, dh_alias(t))(glue(arg, n)); \
+ dh_sizemask(t, n)
+
+#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
+
+
+#define DEF_HELPER_0(name, ret) \
+ DEF_HELPER_FLAGS_0(name, 0, ret)
+#define DEF_HELPER_1(name, ret, t1) \
+ DEF_HELPER_FLAGS_1(name, 0, ret, t1)
+#define DEF_HELPER_2(name, ret, t1, t2) \
+ DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
+#define DEF_HELPER_3(name, ret, t1, t2, t3) \
+ DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
+#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
+ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
+#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
+ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
+#endif /* DEF_HELPER_H */
+
+#ifndef GEN_HELPER
+/* Function prototypes. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1));
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4));
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5));
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 1
+/* Gen functions. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ int sizemask; \
+ sizemask = dh_is_64bit(ret); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 0, NULL); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1)) \
+{ \
+ TCGArg args[1]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 1, args); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2)) \
+{ \
+ TCGArg args[2]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 2, args); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ TCGArg args[3]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 3, args); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ TCGArg args[4]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ dh_arg(t4, 4); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 4, args); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ TCGArg args[5]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ dh_arg(t4, 4); \
+ dh_arg(t5, 5); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 5, args); \
+}
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 2
+/* Register helpers. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+tcg_register_helper(HELPER(name), #name);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == -1
+/* Undefine macros. */
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef GEN_HELPER
+
+#endif
--- /dev/null
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _EXEC_ALL_H_
+#define _EXEC_ALL_H_
+
+#include "qemu-common.h"
+
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
+/* is_jmp field values */
+#define DISAS_NEXT 0 /* next instruction can be analyzed */
+#define DISAS_JUMP 1 /* only pc was modified dynamically */
+#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
+#define DISAS_TB_JUMP 3 /* only pc was modified statically */
+
+struct TranslationBlock;
+typedef struct TranslationBlock TranslationBlock;
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 208
+
+#if HOST_LONG_BITS == 32
+#define MAX_OPC_PARAM_PER_ARG 2
+#else
+#define MAX_OPC_PARAM_PER_ARG 1
+#endif
+#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_OARGS 1
+#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
+
+/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
+ * and up to 4 + N parameters on 64-bit archs
+ * (N = number of input arguments + output arguments). */
+#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define OPC_BUF_SIZE 640
+#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
+
+/* Maximum size a TCG op can expand to. This is complicated because a
+ single op may require several host instructions and register reloads.
+ For now take a wild guess at 192 bytes, which should allow at least
+ a couple of fixup instructions per argument. */
+#define TCG_MAX_OP_SIZE 192
+
+#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
+
+#include "qemu-log.h"
+
+void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
+void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
+void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
+ int pc_pos);
+
+void cpu_gen_init(void);
+int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
+ int *gen_code_size_ptr);
+bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
+
+void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUArchState *env,
+ target_ulong pc, target_ulong cs_base, int flags,
+ int cflags);
+void cpu_exec_init(CPUArchState *env);
+void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
+int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
+ int is_cpu_write_access);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
+ int is_cpu_write_access);
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_flush_page(CPUArchState *env, target_ulong addr);
+void tlb_flush(CPUArchState *env, int flush_global);
+void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
+void tb_invalidate_phys_addr(hwaddr addr);
+#else
+static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+{
+}
+
+static inline void tlb_flush(CPUArchState *env, int flush_global)
+{
+}
+#endif
+
+#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+
+#define CODE_GEN_PHYS_HASH_BITS 15
+#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
+
+/* estimated block size for TB allocation */
+/* XXX: use a per code average code fragment size and modulate it
+ according to the host CPU */
+#if defined(CONFIG_SOFTMMU)
+#define CODE_GEN_AVG_BLOCK_SIZE 128
+#else
+#define CODE_GEN_AVG_BLOCK_SIZE 64
+#endif
+
+#if defined(__arm__) || defined(_ARCH_PPC) \
+ || defined(__x86_64__) || defined(__i386__) \
+ || defined(__sparc__) \
+ || defined(CONFIG_TCG_INTERPRETER)
+#define USE_DIRECT_JUMP
+#endif
+
+struct TranslationBlock {
+ target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
+ target_ulong cs_base; /* CS base for this block */
+ uint64_t flags; /* flags defining in which context the code was generated */
+ uint16_t size; /* size of target code for this block (1 <=
+ size <= TARGET_PAGE_SIZE) */
+ uint16_t cflags; /* compile flags */
+#define CF_COUNT_MASK 0x7fff
+#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
+
+ uint8_t *tc_ptr; /* pointer to the translated code */
+ /* next matching tb for physical address. */
+ struct TranslationBlock *phys_hash_next;
+ /* first and second physical page containing code. The lower bit
+ of the pointer tells the index in page_next[] */
+ struct TranslationBlock *page_next[2];
+ tb_page_addr_t page_addr[2];
+
+ /* the following data are used to directly call another TB from
+ the code of this one. */
+ uint16_t tb_next_offset[2]; /* offset of original jump target */
+#ifdef USE_DIRECT_JUMP
+ uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+#else
+ uintptr_t tb_next[2]; /* address of jump generated code */
+#endif
+ /* list of TBs jumping to this one. This is a circular list using
+ the two least significant bits of the pointers to tell what is
+ the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
+ jmp_first */
+ struct TranslationBlock *jmp_next[2];
+ struct TranslationBlock *jmp_first;
+ uint32_t icount;
+};
+
+static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
+}
+
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
+ | (tmp & TB_JMP_ADDR_MASK));
+}
+
+static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
+{
+ return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
+}
+
+void tb_free(TranslationBlock *tb);
+void tb_flush(CPUArchState *env);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+
+extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+
+#if defined(USE_DIRECT_JUMP)
+
+#if defined(CONFIG_TCG_INTERPRETER)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
+}
+#elif defined(_ARCH_PPC)
+void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
+#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__i386__) || defined(__x86_64__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
+}
+#elif defined(__arm__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+#if !QEMU_GNUC_PREREQ(4, 1)
+ register unsigned long _beg __asm ("a1");
+ register unsigned long _end __asm ("a2");
+ register unsigned long _flg __asm ("a3");
+#endif
+
+ /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+ *(uint32_t *)jmp_addr =
+ (*(uint32_t *)jmp_addr & ~0xffffff)
+ | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
+
+#if QEMU_GNUC_PREREQ(4, 1)
+ __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
+#else
+ /* flush icache */
+ _beg = jmp_addr;
+ _end = jmp_addr + 4;
+ _flg = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
+#elif defined(__sparc__)
+void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
+#else
+#error tb_set_jmp_target1 is missing
+#endif
+
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ uint16_t offset = tb->tb_jmp_offset[n];
+ tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
+}
+
+#else
+
+/* set the jump target */
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ tb->tb_next[n] = addr;
+}
+
+#endif
+
+static inline void tb_add_jump(TranslationBlock *tb, int n,
+ TranslationBlock *tb_next)
+{
+ /* NOTE: this test is only needed for thread safety */
+ if (!tb->jmp_next[n]) {
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
+
+ /* add in TB jmp circular list */
+ tb->jmp_next[n] = tb_next->jmp_first;
+ tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+ }
+}
+
+#include "exec/spinlock.h"
+
+extern spinlock_t tb_lock;
+
+extern int tb_invalidated_flag;
+
+/* The return address may point to the start of the next instruction.
+ Subtracting one gets us the call instruction itself. */
+#if defined(CONFIG_TCG_INTERPRETER)
+/* Softmmu, Alpha, MIPS, SH4 and SPARC user mode emulations call GETPC().
+ For all others, GETPC remains undefined (which makes TCI a little faster. */
+# if defined(CONFIG_SOFTMMU) || \
+ defined(TARGET_ALPHA) || defined(TARGET_MIPS) || \
+ defined(TARGET_SH4) || defined(TARGET_SPARC)
+extern uintptr_t tci_tb_ptr;
+# define GETPC() tci_tb_ptr
+# endif
+#elif defined(__s390__) && !defined(__s390x__)
+# define GETPC() \
+ (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
+#elif defined(__arm__)
+/* Thumb return addresses have the low bit set, so we need to subtract two.
+ This is still safe in ARM mode because instructions are 4 bytes. */
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
+#else
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
+#endif
+
+#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
+/* qemu_ld/st optimization split code generation to fast and slow path, thus,
+ it needs special handling for an MMU helper which is called from the slow
+ path, to get the fast path's pc without any additional argument.
+ It uses a tricky solution which embeds the fast path pc into the slow path.
+
+ Code flow in slow path:
+ (1) pre-process
+ (2) call MMU helper
+ (3) jump to (5)
+ (4) fast path information (implementation specific)
+ (5) post-process (e.g. stack adjust)
+ (6) jump to corresponding code of the next of fast path
+ */
+# if defined(__i386__) || defined(__x86_64__)
+/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
+ so that the destination is the next code of fast path, though this jmp is
+ never executed.
+
+ call MMU helper
+ jmp POST_PROC (2byte) <- GETRA()
+ jmp NEXT_CODE (5byte)
+ POST_PROCESS ... <- GETRA() + 7
+ */
+# define GETRA() ((uintptr_t)__builtin_return_address(0))
+# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
+ *(int32_t *)((void *)GETRA() + 3) - 1))
+# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
+# define GETRA() ((uintptr_t)__builtin_return_address(0))
+# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
+# else
+# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
+# endif
+bool is_tcg_gen_code(uintptr_t pc_ptr);
+# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
+#else
+# define GETPC_EXT() GETPC()
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+struct MemoryRegion *iotlb_to_region(hwaddr index);
+uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
+ unsigned size);
+void io_mem_write(struct MemoryRegion *mr, hwaddr addr,
+ uint64_t value, unsigned size);
+
+void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
+ uintptr_t retaddr);
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE (NB_MMU_MODES + 1)
+#define MEMSUFFIX _code
+
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
+{
+ return addr;
+}
+#else
+/* cputlb.c */
+tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+#endif
+
+typedef void (CPUDebugExcpHandler)(CPUArchState *env);
+
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
+
+/* vl.c */
+extern int singlestep;
+
+/* cpu-exec.c */
+extern volatile sig_atomic_t exit_request;
+
+/* Deterministic execution requires that IO only be performed on the last
+ instruction of a TB so that interrupts take effect immediately. */
+static inline int can_do_io(CPUArchState *env)
+{
+ if (!use_icount) {
+ return 1;
+ }
+ /* If not executing code then assume we are ok. */
+ if (!env->current_tb) {
+ return 1;
+ }
+ return env->can_do_io != 0;
+}
+
+#endif
--- /dev/null
+#ifndef GDBSTUB_H
+#define GDBSTUB_H
+
+#define DEFAULT_GDBSTUB_PORT "1234"
+
+/* GDB breakpoint/watchpoint types */
+#define GDB_BREAKPOINT_SW 0
+#define GDB_BREAKPOINT_HW 1
+#define GDB_WATCHPOINT_WRITE 2
+#define GDB_WATCHPOINT_READ 3
+#define GDB_WATCHPOINT_ACCESS 4
+
+#ifdef NEED_CPU_H
+typedef void (*gdb_syscall_complete_cb)(CPUArchState *env,
+ target_ulong ret, target_ulong err);
+
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+int use_gdb_syscalls(void);
+void gdb_set_stop_cpu(CPUArchState *env);
+void gdb_exit(CPUArchState *, int);
+#ifdef CONFIG_USER_ONLY
+int gdb_queuesig (void);
+int gdb_handlesig (CPUArchState *, int);
+void gdb_signalled(CPUArchState *, int);
+void gdbserver_fork(CPUArchState *);
+#endif
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUArchState *env,
+ gdb_reg_cb get_reg, gdb_reg_cb set_reg,
+ int num_regs, const char *xml, int g_pos);
+
+static inline int cpu_index(CPUArchState *env)
+{
+#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
+ return env->host_tid;
+#else
+ return env->cpu_index + 1;
+#endif
+}
+
+#endif
+
+#ifdef CONFIG_USER_ONLY
+int gdbserver_start(int);
+#else
+int gdbserver_start(const char *port);
+#endif
+
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
+extern const char *const xml_builtin[][2];
+
+#endif
--- /dev/null
+#ifndef GEN_ICOUNT_H
+#define GEN_ICOUNT_H 1
+
+#include "qemu-timer.h"
+
+/* Helpers for instruction counting code generation. */
+
+static TCGArg *icount_arg;
+static int icount_label;
+
+static inline void gen_icount_start(void)
+{
+ TCGv_i32 count;
+
+ if (!use_icount)
+ return;
+
+ icount_label = gen_new_label();
+ count = tcg_temp_local_new_i32();
+ tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
+ /* This is a horrid hack to allow fixing up the value later. */
+ icount_arg = tcg_ctx.gen_opparam_ptr + 1;
+ tcg_gen_subi_i32(count, count, 0xdeadbeef);
+
+ tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
+ tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
+ tcg_temp_free_i32(count);
+}
+
+static void gen_icount_end(TranslationBlock *tb, int num_insns)
+{
+ if (use_icount) {
+ *icount_arg = num_insns;
+ gen_set_label(icount_label);
+ tcg_gen_exit_tb((tcg_target_long)tb + 2);
+ }
+}
+
+static inline void gen_io_start(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_io_end(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(0);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+#endif
--- /dev/null
+/* Define hwaddr if it exists. */
+
+#ifndef HWADDR_H
+#define HWADDR_H
+
+#ifndef CONFIG_USER_ONLY
+
+#define HWADDR_BITS 64
+/* hwaddr is the type of a physical address (its size can
+ be different from 'target_ulong'). */
+
+typedef uint64_t hwaddr;
+#define HWADDR_MAX UINT64_MAX
+#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_PRId PRId64
+#define HWADDR_PRIi PRIi64
+#define HWADDR_PRIo PRIo64
+#define HWADDR_PRIu PRIu64
+#define HWADDR_PRIx PRIx64
+#define HWADDR_PRIX PRIX64
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * defines ioport related functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef IOPORT_H
+#define IOPORT_H
+
+#include "qemu-common.h"
+#include "exec/iorange.h"
+
+typedef uint32_t pio_addr_t;
+#define FMT_pioaddr PRIx32
+
+#define MAX_IOPORTS (64 * 1024)
+#define IOPORTS_MASK (MAX_IOPORTS - 1)
+
+/* These should really be in isa.h, but are here to make pc.h happy. */
+typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
+typedef uint32_t (IOPortReadFunc)(void *opaque, uint32_t address);
+typedef void (IOPortDestructor)(void *opaque);
+
+void ioport_register(IORange *iorange);
+int register_ioport_read(pio_addr_t start, int length, int size,
+ IOPortReadFunc *func, void *opaque);
+int register_ioport_write(pio_addr_t start, int length, int size,
+ IOPortWriteFunc *func, void *opaque);
+void isa_unassign_ioport(pio_addr_t start, int length);
+bool isa_is_ioport_assigned(pio_addr_t start);
+
+void cpu_outb(pio_addr_t addr, uint8_t val);
+void cpu_outw(pio_addr_t addr, uint16_t val);
+void cpu_outl(pio_addr_t addr, uint32_t val);
+uint8_t cpu_inb(pio_addr_t addr);
+uint16_t cpu_inw(pio_addr_t addr);
+uint32_t cpu_inl(pio_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionPortio;
+
+typedef struct PortioList {
+ const struct MemoryRegionPortio *ports;
+ struct MemoryRegion *address_space;
+ unsigned nr;
+ struct MemoryRegion **regions;
+ struct MemoryRegion **aliases;
+ void *opaque;
+ const char *name;
+} PortioList;
+
+void portio_list_init(PortioList *piolist,
+ const struct MemoryRegionPortio *callbacks,
+ void *opaque, const char *name);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+ struct MemoryRegion *address_space,
+ uint32_t addr);
+void portio_list_del(PortioList *piolist);
+
+#endif /* IOPORT_H */
--- /dev/null
+#ifndef IORANGE_H
+#define IORANGE_H
+
+#include <stdint.h>
+
+typedef struct IORange IORange;
+typedef struct IORangeOps IORangeOps;
+
+struct IORangeOps {
+ void (*read)(IORange *iorange, uint64_t offset, unsigned width,
+ uint64_t *data);
+ void (*write)(IORange *iorange, uint64_t offset, unsigned width,
+ uint64_t data);
+ void (*destructor)(IORange *iorange);
+};
+
+struct IORange {
+ const IORangeOps *ops;
+ uint64_t base;
+ uint64_t len;
+};
+
+static inline void iorange_init(IORange *iorange, const IORangeOps *ops,
+ uint64_t base, uint64_t len)
+{
+ iorange->ops = ops;
+ iorange->base = base;
+ iorange->len = len;
+}
+
+#endif
--- /dev/null
+/*
+ * Declarations for obsolete exec.c functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen.h"
+
+typedef struct PhysPageEntry PhysPageEntry;
+
+struct PhysPageEntry {
+ uint16_t is_leaf : 1;
+ /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
+ uint16_t ptr : 15;
+};
+
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+
+struct AddressSpaceDispatch {
+ /* This is a multi-level map on the physical address space.
+ * The bottom level has pointers to MemoryRegionSections.
+ */
+ PhysPageEntry phys_map;
+ MemoryListener listener;
+};
+
+void address_space_init_dispatch(AddressSpace *as);
+void address_space_destroy_dispatch(AddressSpace *as);
+
+ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr);
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
+void qemu_ram_free(ram_addr_t addr);
+void qemu_ram_free_from_ptr(ram_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionSection;
+
+void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size);
+void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size);
+
+#define VGA_DIRTY_FLAG 0x01
+#define CODE_DIRTY_FLAG 0x02
+#define MIGRATION_DIRTY_FLAG 0x08
+
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+}
+
+/* read dirty bit (return 0 or 1) */
+static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
+{
+ return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
+}
+
+static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ int ret = 0;
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
+ }
+ return ret;
+}
+
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+}
+
+static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
+{
+ cpu_physical_memory_set_dirty_flags(addr, 0xff);
+}
+
+static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ int mask = ~dirty_flags;
+
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
+ }
+ xen_modified_memory(addr, length);
+}
+
+static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
+ }
+}
+
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
+ int dirty_flags);
+
+extern const IORangeOps memory_region_iorange_ops;
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "qemu-common.h"
+#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "qemu-queue.h"
+#include "exec/iorange.h"
+#include "exec/ioport.h"
+#include "int128.h"
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+typedef struct MemoryRegion MemoryRegion;
+typedef struct MemoryRegionPortio MemoryRegionPortio;
+typedef struct MemoryRegionMmio MemoryRegionMmio;
+
+/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
+ * registration.
+ */
+#define DIRTY_MEMORY_VGA 0
+#define DIRTY_MEMORY_CODE 1
+#define DIRTY_MEMORY_MIGRATION 3
+
+struct MemoryRegionMmio {
+ CPUReadMemoryFunc *read[3];
+ CPUWriteMemoryFunc *write[3];
+};
+
+/* Internal use; thunks between old-style IORange and MemoryRegions. */
+typedef struct MemoryRegionIORange MemoryRegionIORange;
+struct MemoryRegionIORange {
+ IORange iorange;
+ MemoryRegion *mr;
+ hwaddr offset;
+};
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+ /* Read from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*read)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+ /* Write to the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ void (*write)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size);
+
+ enum device_endian endianness;
+ /* Guest-visible constraints: */
+ struct {
+ /* If nonzero, specify bounds on access sizes beyond which a machine
+ * check is thrown.
+ */
+ unsigned min_access_size;
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise unaligned
+ * accesses throw machine checks.
+ */
+ bool unaligned;
+ /*
+ * If present, and returns #false, the transaction is not accepted
+ * by the device (and results in machine dependent behaviour such
+ * as a machine check exception).
+ */
+ bool (*accepts)(void *opaque, hwaddr addr,
+ unsigned size, bool is_write);
+ } valid;
+ /* Internal implementation constraints: */
+ struct {
+ /* If nonzero, specifies the minimum size implemented. Smaller sizes
+ * will be rounded upwards and a partial result will be returned.
+ */
+ unsigned min_access_size;
+ /* If nonzero, specifies the maximum size implemented. Larger sizes
+ * will be done as a series of accesses with smaller sizes.
+ */
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise all accesses
+ * are converted to (possibly multiple) naturally aligned accesses.
+ */
+ bool unaligned;
+ } impl;
+
+ /* If .read and .write are not present, old_portio may be used for
+ * backwards compatibility with old portio registration
+ */
+ const MemoryRegionPortio *old_portio;
+ /* If .read and .write are not present, old_mmio may be used for
+ * backwards compatibility with old mmio registration
+ */
+ const MemoryRegionMmio old_mmio;
+};
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+struct MemoryRegion {
+ /* All fields are private - violators will be prosecuted */
+ const MemoryRegionOps *ops;
+ void *opaque;
+ MemoryRegion *parent;
+ Int128 size;
+ hwaddr addr;
+ void (*destructor)(MemoryRegion *mr);
+ ram_addr_t ram_addr;
+ bool subpage;
+ bool terminates;
+ bool readable;
+ bool ram;
+ bool readonly; /* For RAM regions */
+ bool enabled;
+ bool rom_device;
+ bool warning_printed; /* For reservations */
+ bool flush_coalesced_mmio;
+ MemoryRegion *alias;
+ hwaddr alias_offset;
+ unsigned priority;
+ bool may_overlap;
+ QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+ const char *name;
+ uint8_t dirty_log_mask;
+ unsigned ioeventfd_nb;
+ MemoryRegionIoeventfd *ioeventfds;
+};
+
+struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ IOPortReadFunc *read;
+ IOPortWriteFunc *write;
+};
+
+#define PORTIO_END_OF_LIST() { }
+
+typedef struct AddressSpace AddressSpace;
+
+/**
+ * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+ /* All fields are private. */
+ const char *name;
+ MemoryRegion *root;
+ struct FlatView *current_map;
+ int ioeventfd_nb;
+ struct MemoryRegionIoeventfd *ioeventfds;
+ struct AddressSpaceDispatch *dispatch;
+ QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+typedef struct MemoryRegionSection MemoryRegionSection;
+
+/**
+ * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @address_space: the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ */
+struct MemoryRegionSection {
+ MemoryRegion *mr;
+ AddressSpace *address_space;
+ hwaddr offset_within_region;
+ uint64_t size;
+ hwaddr offset_within_address_space;
+ bool readonly;
+};
+
+typedef struct MemoryListener MemoryListener;
+
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ void (*begin)(MemoryListener *listener);
+ void (*commit)(MemoryListener *listener);
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_global_start)(MemoryListener *listener);
+ void (*log_global_stop)(MemoryListener *listener);
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /* Lower = earlier (during add), later (during del) */
+ unsigned priority;
+ AddressSpace *address_space_filter;
+ QTAILQ_ENTRY(MemoryListener) link;
+};
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions. Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: a structure containing read and write callbacks to be used when
+ * I/O is performed on the region.
+ * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram: Initialize RAM memory region. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ * part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ * @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+ const char *name,
+ MemoryRegion *orig,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
+ * handled via callbacks.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: callbacks for write access handling.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_reservation: Initialize a memory region that reserves
+ * I/O space.
+ *
+ * A reservation region primariy serves debugging purposes. It claims I/O
+ * space that is not supposed to be handled by QEMU itself. Any access via
+ * the memory API will cause an abort().
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_reservation(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+/**
+ * memory_region_destroy: Destroy a memory region and reclaim all resources.
+ *
+ * @mr: the region to be destroyed. May not currently be a subregion
+ * (see memory_region_add_subregion()) or referenced in an alias
+ * (see memory_region_init_alias()).
+ */
+void memory_region_destroy(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true is a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is ROMD
+ *
+ * Returns %true is a memory region is ROMD and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+ return mr->rom_device && mr->readable;
+}
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true is a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_rom(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
+ * care.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_get_dirty: Check whether a range of bytes is dirty
+ * for a specified client.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client. Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size);
+
+/**
+ * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
+ * any external TLBs (e.g. kvm)
+ *
+ * Flushes dirty information from accelerators such as kvm and vhost-net
+ * and makes it available to users of the memory API.
+ *
+ * @mr: the region being flushed.
+ */
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ * client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_rom_device_set_readable: enable/disable ROM readability
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * to be marked as readable (default) or not readable. When it is readable,
+ * the device is mapped to guest memory. When not readable, reads are
+ * forwarded to the #MemoryRegion.read function.
+ *
+ * @mr: the memory region to be updated
+ * @readable: whether reads are satisified directly (%true) or via callbacks
+ * (%false)
+ */
+void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions. Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ * a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ * is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event. The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset. The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping). A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
+ *
+ * Adds a subregion at @offset. The subregion may overlap with other
+ * subregions. Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion,
+ unsigned priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ * region
+ *
+ * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
+ * code is being reworked.
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+ MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region. A disabled memory region
+ * ignores all accesses to itself and its subregions. It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its parent.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to parent region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+ hwaddr offset);
+
+/**
+ * memory_region_find: locate a MemoryRegion in an address space
+ *
+ * Locates the first #MemoryRegion within an address space given by
+ * @address_space that overlaps the range given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ * .@offset_within_address_space >= @addr
+ * .@offset_within_address_space + .@size <= @addr + @size
+ * .@size = 0 iff no overlap was found
+ * .@mr is non-%NULL iff an overlap was found
+ *
+ * @address_space: a top-level (i.e. parentless) region that contains
+ * the region to be found
+ * @addr: start of the area within @address_space to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *address_space,
+ hwaddr addr, uint64_t size);
+
+/**
+ * memory_region_section_addr: get offset within MemoryRegionSection
+ *
+ * Returns offset within MemoryRegionSection
+ *
+ * @section: the memory region section being queried
+ * @addr: address in address space
+ */
+static inline hwaddr
+memory_region_section_addr(MemoryRegionSection *section,
+ hwaddr addr)
+{
+ addr -= section->offset_within_address_space;
+ addr += section->offset_within_region;
+ return addr;
+}
+
+/**
+ * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for an entire address space.
+ * @address_space: a top-level (i.e. parentless) region that contains the
+ * memory being synchronized
+ */
+void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ * visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ * sections are mapped or unmapped into an address
+ * space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ */
+void memory_global_dirty_log_start(void);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ */
+void memory_global_dirty_log_stop(void);
+
+void mtree_info(fprintf_function mon_printf, void *f);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addesses for the address space
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root);
+
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space. After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+ int len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_write(AddressSpace *as, hwaddr addr,
+ const uint8_t *buf, int len);
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+ hwaddr *plen, bool is_write);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true. @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @addr: address within that address space
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+
+
+#endif
+
+#endif
--- /dev/null
+/* Poison identifiers that should not be used when building
+ target independent device code. */
+
+#ifndef HW_POISON_H
+#define HW_POISON_H
+#ifdef __GNUC__
+
+#pragma GCC poison TARGET_I386
+#pragma GCC poison TARGET_X86_64
+#pragma GCC poison TARGET_ALPHA
+#pragma GCC poison TARGET_ARM
+#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_M68K
+#pragma GCC poison TARGET_MIPS
+#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_OPENRISC
+#pragma GCC poison TARGET_PPC
+#pragma GCC poison TARGET_PPCEMB
+#pragma GCC poison TARGET_PPC64
+#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_SH4
+#pragma GCC poison TARGET_SPARC
+#pragma GCC poison TARGET_SPARC64
+
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison BSWAP_NEEDED
+
+#pragma GCC poison TARGET_LONG_BITS
+#pragma GCC poison TARGET_FMT_lx
+#pragma GCC poison TARGET_FMT_ld
+
+#pragma GCC poison TARGET_PAGE_SIZE
+#pragma GCC poison TARGET_PAGE_MASK
+#pragma GCC poison TARGET_PAGE_BITS
+#pragma GCC poison TARGET_PAGE_ALIGN
+
+#pragma GCC poison CPUArchState
+#pragma GCC poison env
+
+#pragma GCC poison lduw_phys
+#pragma GCC poison ldl_phys
+#pragma GCC poison ldq_phys
+#pragma GCC poison stl_phys_notdirty
+#pragma GCC poison stq_phys_notdirty
+#pragma GCC poison stw_phys
+#pragma GCC poison stl_phys
+#pragma GCC poison stq_phys
+
+#pragma GCC poison CPU_INTERRUPT_HARD
+#pragma GCC poison CPU_INTERRUPT_EXITTB
+#pragma GCC poison CPU_INTERRUPT_HALT
+#pragma GCC poison CPU_INTERRUPT_DEBUG
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+
+#endif
+#endif
--- /dev/null
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+#ifndef SOFTMMU_SEMI_H
+#define SOFTMMU_SEMI_H 1
+
+static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
+{
+ uint32_t val;
+
+ cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
+ return tswap32(val);
+}
+static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
+{
+ uint8_t val;
+
+ cpu_memory_rw_debug(env, addr, &val, 1, 0);
+ return val;
+}
+
+#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
+#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
+{
+ val = tswap32(val);
+ cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
+}
+#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
+ int copy)
+{
+ uint8_t *p;
+ /* TODO: Make this something that isn't fixed size. */
+ p = malloc(len);
+ if (p && copy)
+ cpu_memory_rw_debug(env, addr, p, len, 0);
+ return p;
+}
+#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
+static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
+{
+ char *p;
+ char *s;
+ uint8_t c;
+ /* TODO: Make this something that isn't fixed size. */
+ s = p = malloc(1024);
+ if (!s) {
+ return NULL;
+ }
+ do {
+ cpu_memory_rw_debug(env, addr, &c, 1, 0);
+ addr++;
+ *(p++) = c;
+ } while (c);
+ return s;
+}
+#define lock_user_string(p) softmmu_lock_user_string(env, p)
+static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
+ target_ulong len)
+{
+ if (len)
+ cpu_memory_rw_debug(env, addr, p, len, 1);
+ free(p);
+}
+#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
+
+#endif
--- /dev/null
+/*
+ * Software MMU support
+ *
+ * Declare helpers used by TCG for qemu_ld/st ops.
+ *
+ * Used by softmmu_exec.h, TCG targets and exec-all.h.
+ *
+ */
+#ifndef SOFTMMU_DEFS_H
+#define SOFTMMU_DEFS_H
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx);
+
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
+int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx);
+#endif
--- /dev/null
+/*
+ * Software MMU support
+ *
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * MMU mode suffixes are defined in target cpu.h.
+ */
+
+/* XXX: find something cleaner.
+ * Furthermore, this is false for 64 bits targets
+ */
+#define ldul_user ldl_user
+#define ldul_kernel ldl_kernel
+#define ldul_hypv ldl_hypv
+#define ldul_executive ldl_executive
+#define ldul_supervisor ldl_supervisor
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE 0
+#define MEMSUFFIX MMU_MODE0_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ACCESS_TYPE 1
+#define MEMSUFFIX MMU_MODE1_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#if (NB_MMU_MODES >= 3)
+
+#define ACCESS_TYPE 2
+#define MEMSUFFIX MMU_MODE2_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 3) */
+
+#if (NB_MMU_MODES >= 4)
+
+#define ACCESS_TYPE 3
+#define MEMSUFFIX MMU_MODE3_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 4) */
+
+#if (NB_MMU_MODES >= 5)
+
+#define ACCESS_TYPE 4
+#define MEMSUFFIX MMU_MODE4_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 5) */
+
+#if (NB_MMU_MODES >= 6)
+
+#define ACCESS_TYPE 5
+#define MEMSUFFIX MMU_MODE5_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 6) */
+
+#if (NB_MMU_MODES > 6)
+#error "NB_MMU_MODES > 6 is not supported for now"
+#endif /* (NB_MMU_MODES > 6) */
+
+/* these access are slower, they must be as rare as possible */
+#define ACCESS_TYPE (NB_MMU_MODES)
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ldub(p) ldub_data(p)
+#define ldsb(p) ldsb_data(p)
+#define lduw(p) lduw_data(p)
+#define ldsw(p) ldsw_data(p)
+#define ldl(p) ldl_data(p)
+#define ldq(p) ldq_data(p)
+
+#define stb(p, v) stb_data(p, v)
+#define stw(p, v) stw_data(p, v)
+#define stl(p, v) stl_data(p, v)
+#define stq(p, v) stq_data(p, v)
--- /dev/null
+/*
+ * Software MMU support
+ *
+ * Generate inline load/store functions for one MMU mode and data
+ * size.
+ *
+ * Generate a store function as well as signed and unsigned loads. For
+ * 32 and 64 bit cases, also generate floating point functions with
+ * the same size.
+ *
+ * Not used directly but included from softmmu_exec.h and exec-all.h.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+
+#if ACCESS_TYPE < (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX ACCESS_TYPE
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _cmmu
+
+#else
+#error invalid ACCESS_TYPE
+#endif
+
+#if DATA_SIZE == 8
+#define RES_TYPE uint64_t
+#else
+#define RES_TYPE uint32_t
+#endif
+
+#if ACCESS_TYPE == (NB_MMU_MODES + 1)
+#define ADDR_READ addr_code
+#else
+#define ADDR_READ addr_read
+#endif
+
+/* generic load/store macros */
+
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int page_index;
+ RES_TYPE res;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
+ }
+ return res;
+}
+
+#if DATA_SIZE <= 2
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int res, page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
+ MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
+ }
+ return res;
+}
+#endif
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+/* generic store macro */
+
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
+{
+ int page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ glue(glue(st, SUFFIX), _raw)(hostaddr, v);
+ }
+}
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+#if DATA_SIZE == 8
+static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
+ return u.d;
+}
+
+static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float64 v)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.d = v;
+ glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 8 */
+
+#if DATA_SIZE == 4
+static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
+ return u.f;
+}
+
+static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float32 v)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = v;
+ glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 4 */
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#undef RES_TYPE
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef CPU_MMU_INDEX
+#undef MMUSUFFIX
+#undef ADDR_READ
--- /dev/null
+/*
+ * Software MMU support
+ *
+ * Generate helpers used by TCG for qemu_ld/st ops and code load
+ * functions.
+ *
+ * Included from target op helpers and exec.c.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu-timer.h"
+#include "exec/memory.h"
+
+#define DATA_SIZE (1 << SHIFT)
+
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#else
+#error unsupported data size
+#endif
+
+#ifdef SOFTMMU_CODE_ACCESS
+#define READ_ACCESS_TYPE 2
+#define ADDR_READ addr_code
+#else
+#define READ_ACCESS_TYPE 0
+#define ADDR_READ addr_read
+#endif
+
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr);
+static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
+ hwaddr physaddr,
+ target_ulong addr,
+ uintptr_t retaddr)
+{
+ DATA_TYPE res;
+ MemoryRegion *mr = iotlb_to_region(physaddr);
+
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+ env->mem_io_pc = retaddr;
+ if (mr != &io_mem_ram && mr != &io_mem_rom
+ && mr != &io_mem_unassigned
+ && mr != &io_mem_notdirty
+ && !can_do_io(env)) {
+ cpu_io_recompile(env, retaddr);
+ }
+
+ env->mem_io_vaddr = addr;
+#if SHIFT <= 2
+ res = io_mem_read(mr, physaddr, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+ res = io_mem_read(mr, physaddr, 4) << 32;
+ res |= io_mem_read(mr, physaddr + 4, 4);
+#else
+ res = io_mem_read(mr, physaddr, 4);
+ res |= io_mem_read(mr, physaddr + 4, 4) << 32;
+#endif
+#endif /* SHIFT > 2 */
+ return res;
+}
+
+/* handle all cases except unaligned access which span two pages */
+DATA_TYPE
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+ int mmu_idx)
+{
+ DATA_TYPE res;
+ int index;
+ target_ulong tlb_addr;
+ hwaddr ioaddr;
+ uintptr_t retaddr;
+
+ /* test if there is match for unaligned or IO access */
+ /* XXX: could done more in memory macro in a non portable way */
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ retaddr = GETPC_EXT();
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
+ do_unaligned_access:
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+ res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
+ mmu_idx, retaddr);
+ } else {
+ /* unaligned/aligned access in the same page */
+ uintptr_t addend;
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC_EXT();
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ }
+#endif
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend));
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+ tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ goto redo;
+ }
+ return res;
+}
+
+/* handle all unaligned cases */
+static DATA_TYPE
+glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ DATA_TYPE res, res1, res2;
+ int index, shift;
+ hwaddr ioaddr;
+ target_ulong tlb_addr, addr1, addr2;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ /* slow unaligned access (it spans two pages) */
+ addr1 = addr & ~(DATA_SIZE - 1);
+ addr2 = addr1 + DATA_SIZE;
+ res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
+ mmu_idx, retaddr);
+ res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
+ mmu_idx, retaddr);
+ shift = (addr & (DATA_SIZE - 1)) * 8;
+#ifdef TARGET_WORDS_BIGENDIAN
+ res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
+#else
+ res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
+#endif
+ res = (DATA_TYPE)res;
+ } else {
+ /* unaligned/aligned access in the same page */
+ uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend));
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ goto redo;
+ }
+ return res;
+}
+
+#ifndef SOFTMMU_CODE_ACCESS
+
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ DATA_TYPE val,
+ int mmu_idx,
+ uintptr_t retaddr);
+
+static inline void glue(io_write, SUFFIX)(CPUArchState *env,
+ hwaddr physaddr,
+ DATA_TYPE val,
+ target_ulong addr,
+ uintptr_t retaddr)
+{
+ MemoryRegion *mr = iotlb_to_region(physaddr);
+
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+ if (mr != &io_mem_ram && mr != &io_mem_rom
+ && mr != &io_mem_unassigned
+ && mr != &io_mem_notdirty
+ && !can_do_io(env)) {
+ cpu_io_recompile(env, retaddr);
+ }
+
+ env->mem_io_vaddr = addr;
+ env->mem_io_pc = retaddr;
+#if SHIFT <= 2
+ io_mem_write(mr, physaddr, val, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+ io_mem_write(mr, physaddr, (val >> 32), 4);
+ io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
+#else
+ io_mem_write(mr, physaddr, (uint32_t)val, 4);
+ io_mem_write(mr, physaddr + 4, val >> 32, 4);
+#endif
+#endif /* SHIFT > 2 */
+}
+
+void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, DATA_TYPE val,
+ int mmu_idx)
+{
+ hwaddr ioaddr;
+ target_ulong tlb_addr;
+ uintptr_t retaddr;
+ int index;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ retaddr = GETPC_EXT();
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
+ mmu_idx, retaddr);
+ } else {
+ /* aligned/unaligned access in the same page */
+ uintptr_t addend;
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC_EXT();
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+ }
+#endif
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend), val);
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ goto redo;
+ }
+}
+
+/* handles all unaligned cases */
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ DATA_TYPE val,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ hwaddr ioaddr;
+ target_ulong tlb_addr;
+ int index, i;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ /* XXX: not efficient, but simple */
+ /* Note: relies on the fact that tlb_fill() does not remove the
+ * previous page from the TLB cache. */
+ for(i = DATA_SIZE - 1; i >= 0; i--) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
+ val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
+ mmu_idx, retaddr);
+#else
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
+ val >> (i * 8),
+ mmu_idx, retaddr);
+#endif
+ }
+ } else {
+ /* aligned/unaligned access in the same page */
+ uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend), val);
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ goto redo;
+ }
+}
+
+#endif /* !defined(SOFTMMU_CODE_ACCESS) */
+
+#undef READ_ACCESS_TYPE
+#undef SHIFT
+#undef DATA_TYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef ADDR_READ
--- /dev/null
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+/* configure guarantees us that we have pthreads on any host except
+ * mingw32, which doesn't support any of the user-only targets.
+ * So we can simply assume we have pthread mutexes here.
+ */
+#if defined(CONFIG_USER_ONLY)
+
+#include <pthread.h>
+#define spin_lock pthread_mutex_lock
+#define spin_unlock pthread_mutex_unlock
+#define spinlock_t pthread_mutex_t
+#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+#else
+
+/* Empty implementations, on the theory that system mode emulation
+ * is single-threaded. This means that these functions should only
+ * be used from code run in the TCG cpu thread, and cannot protect
+ * data structures which might also be accessed from the IO thread
+ * or from signal handlers.
+ */
+typedef int spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+#endif
--- /dev/null
+#ifndef QEMU_TYPES_H
+#define QEMU_TYPES_H
+#include "cpu.h"
+
+#ifdef TARGET_ABI32
+typedef uint32_t abi_ulong;
+typedef int32_t abi_long;
+#define TARGET_ABI_FMT_lx "%08x"
+#define TARGET_ABI_FMT_ld "%d"
+#define TARGET_ABI_FMT_lu "%u"
+#define TARGET_ABI_BITS 32
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswap32(v);
+}
+
+#else
+typedef target_ulong abi_ulong;
+typedef target_long abi_long;
+#define TARGET_ABI_FMT_lx TARGET_FMT_lx
+#define TARGET_ABI_FMT_ld TARGET_FMT_ld
+#define TARGET_ABI_FMT_lu TARGET_FMT_lu
+#define TARGET_ABI_BITS TARGET_LONG_BITS
+/* for consistency, define ABI32 too */
+#if TARGET_ABI_BITS == 32
+#define TARGET_ABI32 1
+#endif
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswapl(v);
+}
+
+#endif
+#endif
--- /dev/null
+/*
+ * Generic thunking code to convert data between host and target CPU
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef THUNK_H
+#define THUNK_H
+
+#include <inttypes.h>
+#include "cpu.h"
+
+/* types enums definitions */
+
+typedef enum argtype {
+ TYPE_NULL,
+ TYPE_CHAR,
+ TYPE_SHORT,
+ TYPE_INT,
+ TYPE_LONG,
+ TYPE_ULONG,
+ TYPE_PTRVOID, /* pointer on unknown data */
+ TYPE_LONGLONG,
+ TYPE_ULONGLONG,
+ TYPE_PTR,
+ TYPE_ARRAY,
+ TYPE_STRUCT,
+ TYPE_OLDDEVT,
+} argtype;
+
+#define MK_PTR(type) TYPE_PTR, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_STRUCT(id) TYPE_STRUCT, id
+
+#define THUNK_TARGET 0
+#define THUNK_HOST 1
+
+typedef struct {
+ /* standard struct handling */
+ const argtype *field_types;
+ int nb_fields;
+ int *field_offsets[2];
+ /* special handling */
+ void (*convert[2])(void *dst, const void *src);
+ int size[2];
+ int align[2];
+ const char *name;
+} StructEntry;
+
+/* Translation table for bitmasks... */
+typedef struct bitmask_transtbl {
+ unsigned int x86_mask;
+ unsigned int x86_bits;
+ unsigned int alpha_mask;
+ unsigned int alpha_bits;
+} bitmask_transtbl;
+
+void thunk_register_struct(int id, const char *name, const argtype *types);
+void thunk_register_struct_direct(int id, const char *name,
+ const StructEntry *se1);
+const argtype *thunk_convert(void *dst, const void *src,
+ const argtype *type_ptr, int to_host);
+#ifndef NO_THUNK_TYPE_SIZE
+
+extern StructEntry struct_entries[];
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host);
+int thunk_type_align_array(const argtype *type_ptr, int is_host);
+
+static inline int thunk_type_size(const argtype *type_ptr, int is_host)
+{
+ int type, size;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ if (is_host) {
+#if defined(HOST_X86_64)
+ return 8;
+#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
+ defined(HOST_PARISC) || defined(HOST_SPARC64)
+ return 4;
+#elif defined(HOST_PPC)
+ return sizeof(void *);
+#else
+ return 2;
+#endif
+ } else {
+#if defined(TARGET_X86_64)
+ return 8;
+#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
+ defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+ return 4;
+#elif defined(TARGET_PPC)
+ return TARGET_ABI_BITS / 8;
+#else
+ return 2;
+#endif
+ }
+ break;
+ case TYPE_ARRAY:
+ size = type_ptr[1];
+ return size * thunk_type_size_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->size[is_host];
+ default:
+ return -1;
+ }
+}
+
+static inline int thunk_type_align(const argtype *type_ptr, int is_host)
+{
+ int type;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ return thunk_type_size(type_ptr, is_host);
+ case TYPE_ARRAY:
+ return thunk_type_align_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->align[is_host];
+ default:
+ return -1;
+ }
+}
+
+#endif /* NO_THUNK_TYPE_SIZE */
+
+unsigned int target_to_host_bitmask(unsigned int x86_mask,
+ const bitmask_transtbl * trans_tbl);
+unsigned int host_to_target_bitmask(unsigned int alpha_mask,
+ const bitmask_transtbl * trans_tbl);
+
+#endif
* splitted out ioport related stuffs from vl.c.
*/
-#include "ioport.h"
+#include "exec/ioport.h"
#include "trace.h"
-#include "memory.h"
+#include "exec/memory.h"
/***********************************************************/
/* IO Port */
+++ /dev/null
-/*
- * defines ioport related functions
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************************
- * IO ports API
- */
-
-#ifndef IOPORT_H
-#define IOPORT_H
-
-#include "qemu-common.h"
-#include "iorange.h"
-
-typedef uint32_t pio_addr_t;
-#define FMT_pioaddr PRIx32
-
-#define MAX_IOPORTS (64 * 1024)
-#define IOPORTS_MASK (MAX_IOPORTS - 1)
-
-/* These should really be in isa.h, but are here to make pc.h happy. */
-typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
-typedef uint32_t (IOPortReadFunc)(void *opaque, uint32_t address);
-typedef void (IOPortDestructor)(void *opaque);
-
-void ioport_register(IORange *iorange);
-int register_ioport_read(pio_addr_t start, int length, int size,
- IOPortReadFunc *func, void *opaque);
-int register_ioport_write(pio_addr_t start, int length, int size,
- IOPortWriteFunc *func, void *opaque);
-void isa_unassign_ioport(pio_addr_t start, int length);
-bool isa_is_ioport_assigned(pio_addr_t start);
-
-void cpu_outb(pio_addr_t addr, uint8_t val);
-void cpu_outw(pio_addr_t addr, uint16_t val);
-void cpu_outl(pio_addr_t addr, uint32_t val);
-uint8_t cpu_inb(pio_addr_t addr);
-uint16_t cpu_inw(pio_addr_t addr);
-uint32_t cpu_inl(pio_addr_t addr);
-
-struct MemoryRegion;
-struct MemoryRegionPortio;
-
-typedef struct PortioList {
- const struct MemoryRegionPortio *ports;
- struct MemoryRegion *address_space;
- unsigned nr;
- struct MemoryRegion **regions;
- struct MemoryRegion **aliases;
- void *opaque;
- const char *name;
-} PortioList;
-
-void portio_list_init(PortioList *piolist,
- const struct MemoryRegionPortio *callbacks,
- void *opaque, const char *name);
-void portio_list_destroy(PortioList *piolist);
-void portio_list_add(PortioList *piolist,
- struct MemoryRegion *address_space,
- uint32_t addr);
-void portio_list_del(PortioList *piolist);
-
-#endif /* IOPORT_H */
+++ /dev/null
-#ifndef IORANGE_H
-#define IORANGE_H
-
-#include <stdint.h>
-
-typedef struct IORange IORange;
-typedef struct IORangeOps IORangeOps;
-
-struct IORangeOps {
- void (*read)(IORange *iorange, uint64_t offset, unsigned width,
- uint64_t *data);
- void (*write)(IORange *iorange, uint64_t offset, unsigned width,
- uint64_t data);
- void (*destructor)(IORange *iorange);
-};
-
-struct IORange {
- const IORangeOps *ops;
- uint64_t base;
- uint64_t len;
-};
-
-static inline void iorange_init(IORange *iorange, const IORangeOps *ops,
- uint64_t base, uint64_t len)
-{
- iorange->ops = ops;
- iorange->base = base;
- iorange->len = len;
-}
-
-#endif
#include "sysemu.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "kvm.h"
#include "bswap.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#include "event_notifier.h"
/* This check must be after config-host.h is included */
#include "hw/hw.h"
#include "hw/pci/msi.h"
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "kvm.h"
KVMState *kvm_state;
#include <stdlib.h>
#endif /* DEBUG_REMAP */
-#include "qemu-user-types.h"
+#include "exec/user/abitypes.h"
-#include "thunk.h"
+#include "exec/user/thunk.h"
#include "syscall_defs.h"
#include "syscall.h"
#include "target_signal.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "qemu-queue.h"
#if defined(CONFIG_USE_NPTL)
+++ /dev/null
-/*
- * Declarations for obsolete exec.c functions
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- *
- */
-
-/*
- * This header is for use by exec.c and memory.c ONLY. Do not include it.
- * The functions declared here will be removed soon.
- */
-
-#ifndef MEMORY_INTERNAL_H
-#define MEMORY_INTERNAL_H
-
-#ifndef CONFIG_USER_ONLY
-#include "hw/xen.h"
-
-typedef struct PhysPageEntry PhysPageEntry;
-
-struct PhysPageEntry {
- uint16_t is_leaf : 1;
- /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
- uint16_t ptr : 15;
-};
-
-typedef struct AddressSpaceDispatch AddressSpaceDispatch;
-
-struct AddressSpaceDispatch {
- /* This is a multi-level map on the physical address space.
- * The bottom level has pointers to MemoryRegionSections.
- */
- PhysPageEntry phys_map;
- MemoryListener listener;
-};
-
-void address_space_init_dispatch(AddressSpace *as);
-void address_space_destroy_dispatch(AddressSpace *as);
-
-ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
- MemoryRegion *mr);
-ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
-void qemu_ram_free(ram_addr_t addr);
-void qemu_ram_free_from_ptr(ram_addr_t addr);
-
-struct MemoryRegion;
-struct MemoryRegionSection;
-
-void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size);
-void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size);
-
-#define VGA_DIRTY_FLAG 0x01
-#define CODE_DIRTY_FLAG 0x02
-#define MIGRATION_DIRTY_FLAG 0x08
-
-static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
-{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
-}
-
-/* read dirty bit (return 0 or 1) */
-static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
-{
- return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
-}
-
-static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
- ram_addr_t length,
- int dirty_flags)
-{
- int ret = 0;
- ram_addr_t addr, end;
-
- end = TARGET_PAGE_ALIGN(start + length);
- start &= TARGET_PAGE_MASK;
- for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
- }
- return ret;
-}
-
-static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
- int dirty_flags)
-{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
-}
-
-static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
-{
- cpu_physical_memory_set_dirty_flags(addr, 0xff);
-}
-
-static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
- int dirty_flags)
-{
- int mask = ~dirty_flags;
-
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
-}
-
-static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
- ram_addr_t length,
- int dirty_flags)
-{
- ram_addr_t addr, end;
-
- end = TARGET_PAGE_ALIGN(start + length);
- start &= TARGET_PAGE_MASK;
- for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
- }
- xen_modified_memory(addr, length);
-}
-
-static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
- ram_addr_t length,
- int dirty_flags)
-{
- ram_addr_t addr, end;
-
- end = TARGET_PAGE_ALIGN(start + length);
- start &= TARGET_PAGE_MASK;
- for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
- }
-}
-
-void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
- int dirty_flags);
-
-extern const IORangeOps memory_region_iorange_ops;
-
-#endif
-
-#endif
* GNU GPL, version 2 or (at your option) any later version.
*/
-#include "memory.h"
-#include "exec-memory.h"
-#include "ioport.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+#include "exec/ioport.h"
#include "bitops.h"
#include "kvm.h"
#include <assert.h>
-#include "memory-internal.h"
+#include "exec/memory-internal.h"
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
+++ /dev/null
-/*
- * Physical memory management API
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- * Avi Kivity <avi@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef MEMORY_H
-#define MEMORY_H
-
-#ifndef CONFIG_USER_ONLY
-
-#include <stdint.h>
-#include <stdbool.h>
-#include "qemu-common.h"
-#include "cpu-common.h"
-#include "hwaddr.h"
-#include "qemu-queue.h"
-#include "iorange.h"
-#include "ioport.h"
-#include "int128.h"
-
-typedef struct MemoryRegionOps MemoryRegionOps;
-typedef struct MemoryRegion MemoryRegion;
-typedef struct MemoryRegionPortio MemoryRegionPortio;
-typedef struct MemoryRegionMmio MemoryRegionMmio;
-
-/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
- * registration.
- */
-#define DIRTY_MEMORY_VGA 0
-#define DIRTY_MEMORY_CODE 1
-#define DIRTY_MEMORY_MIGRATION 3
-
-struct MemoryRegionMmio {
- CPUReadMemoryFunc *read[3];
- CPUWriteMemoryFunc *write[3];
-};
-
-/* Internal use; thunks between old-style IORange and MemoryRegions. */
-typedef struct MemoryRegionIORange MemoryRegionIORange;
-struct MemoryRegionIORange {
- IORange iorange;
- MemoryRegion *mr;
- hwaddr offset;
-};
-
-/*
- * Memory region callbacks
- */
-struct MemoryRegionOps {
- /* Read from the memory region. @addr is relative to @mr; @size is
- * in bytes. */
- uint64_t (*read)(void *opaque,
- hwaddr addr,
- unsigned size);
- /* Write to the memory region. @addr is relative to @mr; @size is
- * in bytes. */
- void (*write)(void *opaque,
- hwaddr addr,
- uint64_t data,
- unsigned size);
-
- enum device_endian endianness;
- /* Guest-visible constraints: */
- struct {
- /* If nonzero, specify bounds on access sizes beyond which a machine
- * check is thrown.
- */
- unsigned min_access_size;
- unsigned max_access_size;
- /* If true, unaligned accesses are supported. Otherwise unaligned
- * accesses throw machine checks.
- */
- bool unaligned;
- /*
- * If present, and returns #false, the transaction is not accepted
- * by the device (and results in machine dependent behaviour such
- * as a machine check exception).
- */
- bool (*accepts)(void *opaque, hwaddr addr,
- unsigned size, bool is_write);
- } valid;
- /* Internal implementation constraints: */
- struct {
- /* If nonzero, specifies the minimum size implemented. Smaller sizes
- * will be rounded upwards and a partial result will be returned.
- */
- unsigned min_access_size;
- /* If nonzero, specifies the maximum size implemented. Larger sizes
- * will be done as a series of accesses with smaller sizes.
- */
- unsigned max_access_size;
- /* If true, unaligned accesses are supported. Otherwise all accesses
- * are converted to (possibly multiple) naturally aligned accesses.
- */
- bool unaligned;
- } impl;
-
- /* If .read and .write are not present, old_portio may be used for
- * backwards compatibility with old portio registration
- */
- const MemoryRegionPortio *old_portio;
- /* If .read and .write are not present, old_mmio may be used for
- * backwards compatibility with old mmio registration
- */
- const MemoryRegionMmio old_mmio;
-};
-
-typedef struct CoalescedMemoryRange CoalescedMemoryRange;
-typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
-
-struct MemoryRegion {
- /* All fields are private - violators will be prosecuted */
- const MemoryRegionOps *ops;
- void *opaque;
- MemoryRegion *parent;
- Int128 size;
- hwaddr addr;
- void (*destructor)(MemoryRegion *mr);
- ram_addr_t ram_addr;
- bool subpage;
- bool terminates;
- bool readable;
- bool ram;
- bool readonly; /* For RAM regions */
- bool enabled;
- bool rom_device;
- bool warning_printed; /* For reservations */
- bool flush_coalesced_mmio;
- MemoryRegion *alias;
- hwaddr alias_offset;
- unsigned priority;
- bool may_overlap;
- QTAILQ_HEAD(subregions, MemoryRegion) subregions;
- QTAILQ_ENTRY(MemoryRegion) subregions_link;
- QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
- const char *name;
- uint8_t dirty_log_mask;
- unsigned ioeventfd_nb;
- MemoryRegionIoeventfd *ioeventfds;
-};
-
-struct MemoryRegionPortio {
- uint32_t offset;
- uint32_t len;
- unsigned size;
- IOPortReadFunc *read;
- IOPortWriteFunc *write;
-};
-
-#define PORTIO_END_OF_LIST() { }
-
-typedef struct AddressSpace AddressSpace;
-
-/**
- * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
- */
-struct AddressSpace {
- /* All fields are private. */
- const char *name;
- MemoryRegion *root;
- struct FlatView *current_map;
- int ioeventfd_nb;
- struct MemoryRegionIoeventfd *ioeventfds;
- struct AddressSpaceDispatch *dispatch;
- QTAILQ_ENTRY(AddressSpace) address_spaces_link;
-};
-
-typedef struct MemoryRegionSection MemoryRegionSection;
-
-/**
- * MemoryRegionSection: describes a fragment of a #MemoryRegion
- *
- * @mr: the region, or %NULL if empty
- * @address_space: the address space the region is mapped in
- * @offset_within_region: the beginning of the section, relative to @mr's start
- * @size: the size of the section; will not exceed @mr's boundaries
- * @offset_within_address_space: the address of the first byte of the section
- * relative to the region's address space
- * @readonly: writes to this section are ignored
- */
-struct MemoryRegionSection {
- MemoryRegion *mr;
- AddressSpace *address_space;
- hwaddr offset_within_region;
- uint64_t size;
- hwaddr offset_within_address_space;
- bool readonly;
-};
-
-typedef struct MemoryListener MemoryListener;
-
-/**
- * MemoryListener: callbacks structure for updates to the physical memory map
- *
- * Allows a component to adjust to changes in the guest-visible memory map.
- * Use with memory_listener_register() and memory_listener_unregister().
- */
-struct MemoryListener {
- void (*begin)(MemoryListener *listener);
- void (*commit)(MemoryListener *listener);
- void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
- void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
- void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
- void (*log_global_start)(MemoryListener *listener);
- void (*log_global_stop)(MemoryListener *listener);
- void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
- void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, EventNotifier *e);
- void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
- void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
- hwaddr addr, hwaddr len);
- /* Lower = earlier (during add), later (during del) */
- unsigned priority;
- AddressSpace *address_space_filter;
- QTAILQ_ENTRY(MemoryListener) link;
-};
-
-/**
- * memory_region_init: Initialize a memory region
- *
- * The region typically acts as a container for other memory regions. Use
- * memory_region_add_subregion() to add subregions.
- *
- * @mr: the #MemoryRegion to be initialized
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region; any subregions beyond this size will be clipped
- */
-void memory_region_init(MemoryRegion *mr,
- const char *name,
- uint64_t size);
-/**
- * memory_region_init_io: Initialize an I/O memory region.
- *
- * Accesses into the region will cause the callbacks in @ops to be called.
- * if @size is nonzero, subregions will be clipped to @size.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @ops: a structure containing read and write callbacks to be used when
- * I/O is performed on the region.
- * @opaque: passed to to the read and write callbacks of the @ops structure.
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-void memory_region_init_io(MemoryRegion *mr,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_init_ram: Initialize RAM memory region. Accesses into the
- * region will modify memory directly.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @name: the name of the region.
- * @size: size of the region.
- */
-void memory_region_init_ram(MemoryRegion *mr,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_init_ram_ptr: Initialize RAM memory region from a
- * user-provided pointer. Accesses into the
- * region will modify memory directly.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @name: the name of the region.
- * @size: size of the region.
- * @ptr: memory to be mapped; must contain at least @size bytes.
- */
-void memory_region_init_ram_ptr(MemoryRegion *mr,
- const char *name,
- uint64_t size,
- void *ptr);
-
-/**
- * memory_region_init_alias: Initialize a memory region that aliases all or a
- * part of another memory region.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @name: used for debugging; not visible to the user or ABI
- * @orig: the region to be referenced; @mr will be equivalent to
- * @orig between @offset and @offset + @size - 1.
- * @offset: start of the section in @orig to be referenced.
- * @size: size of the region.
- */
-void memory_region_init_alias(MemoryRegion *mr,
- const char *name,
- MemoryRegion *orig,
- hwaddr offset,
- uint64_t size);
-
-/**
- * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
- * handled via callbacks.
- *
- * @mr: the #MemoryRegion to be initialized.
- * @ops: callbacks for write access handling.
- * @name: the name of the region.
- * @size: size of the region.
- */
-void memory_region_init_rom_device(MemoryRegion *mr,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size);
-
-/**
- * memory_region_init_reservation: Initialize a memory region that reserves
- * I/O space.
- *
- * A reservation region primariy serves debugging purposes. It claims I/O
- * space that is not supposed to be handled by QEMU itself. Any access via
- * the memory API will cause an abort().
- *
- * @mr: the #MemoryRegion to be initialized
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-void memory_region_init_reservation(MemoryRegion *mr,
- const char *name,
- uint64_t size);
-/**
- * memory_region_destroy: Destroy a memory region and reclaim all resources.
- *
- * @mr: the region to be destroyed. May not currently be a subregion
- * (see memory_region_add_subregion()) or referenced in an alias
- * (see memory_region_init_alias()).
- */
-void memory_region_destroy(MemoryRegion *mr);
-
-/**
- * memory_region_size: get a memory region's size.
- *
- * @mr: the memory region being queried.
- */
-uint64_t memory_region_size(MemoryRegion *mr);
-
-/**
- * memory_region_is_ram: check whether a memory region is random access
- *
- * Returns %true is a memory region is random access.
- *
- * @mr: the memory region being queried
- */
-bool memory_region_is_ram(MemoryRegion *mr);
-
-/**
- * memory_region_is_romd: check whether a memory region is ROMD
- *
- * Returns %true is a memory region is ROMD and currently set to allow
- * direct reads.
- *
- * @mr: the memory region being queried
- */
-static inline bool memory_region_is_romd(MemoryRegion *mr)
-{
- return mr->rom_device && mr->readable;
-}
-
-/**
- * memory_region_name: get a memory region's name
- *
- * Returns the string that was used to initialize the memory region.
- *
- * @mr: the memory region being queried
- */
-const char *memory_region_name(MemoryRegion *mr);
-
-/**
- * memory_region_is_logging: return whether a memory region is logging writes
- *
- * Returns %true if the memory region is logging writes
- *
- * @mr: the memory region being queried
- */
-bool memory_region_is_logging(MemoryRegion *mr);
-
-/**
- * memory_region_is_rom: check whether a memory region is ROM
- *
- * Returns %true is a memory region is read-only memory.
- *
- * @mr: the memory region being queried
- */
-bool memory_region_is_rom(MemoryRegion *mr);
-
-/**
- * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
- *
- * Returns a host pointer to a RAM memory region (created with
- * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
- * care.
- *
- * @mr: the memory region being queried.
- */
-void *memory_region_get_ram_ptr(MemoryRegion *mr);
-
-/**
- * memory_region_set_log: Turn dirty logging on or off for a region.
- *
- * Turns dirty logging on or off for a specified client (display, migration).
- * Only meaningful for RAM regions.
- *
- * @mr: the memory region being updated.
- * @log: whether dirty logging is to be enabled or disabled.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
-
-/**
- * memory_region_get_dirty: Check whether a range of bytes is dirty
- * for a specified client.
- *
- * Checks whether a range of bytes has been written to since the last
- * call to memory_region_reset_dirty() with the same @client. Dirty logging
- * must be enabled.
- *
- * @mr: the memory region being queried.
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
-/**
- * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
- *
- * Marks a range of bytes as dirty, after it has been dirtied outside
- * guest code.
- *
- * @mr: the memory region being dirtied.
- * @addr: the address (relative to the start of the region) being dirtied.
- * @size: size of the range being dirtied.
- */
-void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size);
-
-/**
- * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
- * any external TLBs (e.g. kvm)
- *
- * Flushes dirty information from accelerators such as kvm and vhost-net
- * and makes it available to users of the memory API.
- *
- * @mr: the region being flushed.
- */
-void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
-
-/**
- * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
- * client.
- *
- * Marks a range of pages as no longer dirty.
- *
- * @mr: the region being updated.
- * @addr: the start of the subrange being cleaned.
- * @size: the size of the subrange being cleaned.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
-/**
- * memory_region_set_readonly: Turn a memory region read-only (or read-write)
- *
- * Allows a memory region to be marked as read-only (turning it into a ROM).
- * only useful on RAM regions.
- *
- * @mr: the region being updated.
- * @readonly: whether rhe region is to be ROM or RAM.
- */
-void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
-
-/**
- * memory_region_rom_device_set_readable: enable/disable ROM readability
- *
- * Allows a ROM device (initialized with memory_region_init_rom_device() to
- * to be marked as readable (default) or not readable. When it is readable,
- * the device is mapped to guest memory. When not readable, reads are
- * forwarded to the #MemoryRegion.read function.
- *
- * @mr: the memory region to be updated
- * @readable: whether reads are satisified directly (%true) or via callbacks
- * (%false)
- */
-void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
-
-/**
- * memory_region_set_coalescing: Enable memory coalescing for the region.
- *
- * Enabled writes to a region to be queued for later processing. MMIO ->write
- * callbacks may be delayed until a non-coalesced MMIO is issued.
- * Only useful for IO regions. Roughly similar to write-combining hardware.
- *
- * @mr: the memory region to be write coalesced
- */
-void memory_region_set_coalescing(MemoryRegion *mr);
-
-/**
- * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
- * a region.
- *
- * Like memory_region_set_coalescing(), but works on a sub-range of a region.
- * Multiple calls can be issued coalesced disjoint ranges.
- *
- * @mr: the memory region to be updated.
- * @offset: the start of the range within the region to be coalesced.
- * @size: the size of the subrange to be coalesced.
- */
-void memory_region_add_coalescing(MemoryRegion *mr,
- hwaddr offset,
- uint64_t size);
-
-/**
- * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
- *
- * Disables any coalescing caused by memory_region_set_coalescing() or
- * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
- * hardware.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_coalescing(MemoryRegion *mr);
-
-/**
- * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
- * accesses.
- *
- * Ensure that pending coalesced MMIO request are flushed before the memory
- * region is accessed. This property is automatically enabled for all regions
- * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_set_flush_coalesced(MemoryRegion *mr);
-
-/**
- * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
- * accesses.
- *
- * Clear the automatic coalesced MMIO flushing enabled via
- * memory_region_set_flush_coalesced. Note that this service has no effect on
- * memory regions that have MMIO coalescing enabled for themselves. For them,
- * automatic flushing will stop once coalescing is disabled.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_flush_coalesced(MemoryRegion *mr);
-
-/**
- * memory_region_add_eventfd: Request an eventfd to be triggered when a word
- * is written to a location.
- *
- * Marks a word in an IO region (initialized with memory_region_init_io())
- * as a trigger for an eventfd event. The I/O callback will not be called.
- * The caller must be prepared to handle failure (that is, take the required
- * action if the callback _is_ called).
- *
- * @mr: the memory region being updated.
- * @addr: the address within @mr that is to be monitored
- * @size: the size of the access to trigger the eventfd
- * @match_data: whether to match against @data, instead of just @addr
- * @data: the data to match against the guest write
- * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
- **/
-void memory_region_add_eventfd(MemoryRegion *mr,
- hwaddr addr,
- unsigned size,
- bool match_data,
- uint64_t data,
- EventNotifier *e);
-
-/**
- * memory_region_del_eventfd: Cancel an eventfd.
- *
- * Cancels an eventfd trigger requested by a previous
- * memory_region_add_eventfd() call.
- *
- * @mr: the memory region being updated.
- * @addr: the address within @mr that is to be monitored
- * @size: the size of the access to trigger the eventfd
- * @match_data: whether to match against @data, instead of just @addr
- * @data: the data to match against the guest write
- * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
- */
-void memory_region_del_eventfd(MemoryRegion *mr,
- hwaddr addr,
- unsigned size,
- bool match_data,
- uint64_t data,
- EventNotifier *e);
-
-/**
- * memory_region_add_subregion: Add a subregion to a container.
- *
- * Adds a subregion at @offset. The subregion may not overlap with other
- * subregions (except for those explicitly marked as overlapping). A region
- * may only be added once as a subregion (unless removed with
- * memory_region_del_subregion()); use memory_region_init_alias() if you
- * want a region to be a subregion in multiple locations.
- *
- * @mr: the region to contain the new subregion; must be a container
- * initialized with memory_region_init().
- * @offset: the offset relative to @mr where @subregion is added.
- * @subregion: the subregion to be added.
- */
-void memory_region_add_subregion(MemoryRegion *mr,
- hwaddr offset,
- MemoryRegion *subregion);
-/**
- * memory_region_add_subregion_overlap: Add a subregion to a container
- * with overlap.
- *
- * Adds a subregion at @offset. The subregion may overlap with other
- * subregions. Conflicts are resolved by having a higher @priority hide a
- * lower @priority. Subregions without priority are taken as @priority 0.
- * A region may only be added once as a subregion (unless removed with
- * memory_region_del_subregion()); use memory_region_init_alias() if you
- * want a region to be a subregion in multiple locations.
- *
- * @mr: the region to contain the new subregion; must be a container
- * initialized with memory_region_init().
- * @offset: the offset relative to @mr where @subregion is added.
- * @subregion: the subregion to be added.
- * @priority: used for resolving overlaps; highest priority wins.
- */
-void memory_region_add_subregion_overlap(MemoryRegion *mr,
- hwaddr offset,
- MemoryRegion *subregion,
- unsigned priority);
-
-/**
- * memory_region_get_ram_addr: Get the ram address associated with a memory
- * region
- *
- * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
- * code is being reworked.
- */
-ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
-
-/**
- * memory_region_del_subregion: Remove a subregion.
- *
- * Removes a subregion from its container.
- *
- * @mr: the container to be updated.
- * @subregion: the region being removed; must be a current subregion of @mr.
- */
-void memory_region_del_subregion(MemoryRegion *mr,
- MemoryRegion *subregion);
-
-/*
- * memory_region_set_enabled: dynamically enable or disable a region
- *
- * Enables or disables a memory region. A disabled memory region
- * ignores all accesses to itself and its subregions. It does not
- * obscure sibling subregions with lower priority - it simply behaves as
- * if it was removed from the hierarchy.
- *
- * Regions default to being enabled.
- *
- * @mr: the region to be updated
- * @enabled: whether to enable or disable the region
- */
-void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
-
-/*
- * memory_region_set_address: dynamically update the address of a region
- *
- * Dynamically updates the address of a region, relative to its parent.
- * May be used on regions are currently part of a memory hierarchy.
- *
- * @mr: the region to be updated
- * @addr: new address, relative to parent region
- */
-void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
-
-/*
- * memory_region_set_alias_offset: dynamically update a memory alias's offset
- *
- * Dynamically updates the offset into the target region that an alias points
- * to, as if the fourth argument to memory_region_init_alias() has changed.
- *
- * @mr: the #MemoryRegion to be updated; should be an alias.
- * @offset: the new offset into the target memory region
- */
-void memory_region_set_alias_offset(MemoryRegion *mr,
- hwaddr offset);
-
-/**
- * memory_region_find: locate a MemoryRegion in an address space
- *
- * Locates the first #MemoryRegion within an address space given by
- * @address_space that overlaps the range given by @addr and @size.
- *
- * Returns a #MemoryRegionSection that describes a contiguous overlap.
- * It will have the following characteristics:
- * .@offset_within_address_space >= @addr
- * .@offset_within_address_space + .@size <= @addr + @size
- * .@size = 0 iff no overlap was found
- * .@mr is non-%NULL iff an overlap was found
- *
- * @address_space: a top-level (i.e. parentless) region that contains
- * the region to be found
- * @addr: start of the area within @address_space to be searched
- * @size: size of the area to be searched
- */
-MemoryRegionSection memory_region_find(MemoryRegion *address_space,
- hwaddr addr, uint64_t size);
-
-/**
- * memory_region_section_addr: get offset within MemoryRegionSection
- *
- * Returns offset within MemoryRegionSection
- *
- * @section: the memory region section being queried
- * @addr: address in address space
- */
-static inline hwaddr
-memory_region_section_addr(MemoryRegionSection *section,
- hwaddr addr)
-{
- addr -= section->offset_within_address_space;
- addr += section->offset_within_region;
- return addr;
-}
-
-/**
- * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
- *
- * Synchronizes the dirty page log for an entire address space.
- * @address_space: a top-level (i.e. parentless) region that contains the
- * memory being synchronized
- */
-void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
-
-/**
- * memory_region_transaction_begin: Start a transaction.
- *
- * During a transaction, changes will be accumulated and made visible
- * only when the transaction ends (is committed).
- */
-void memory_region_transaction_begin(void);
-
-/**
- * memory_region_transaction_commit: Commit a transaction and make changes
- * visible to the guest.
- */
-void memory_region_transaction_commit(void);
-
-/**
- * memory_listener_register: register callbacks to be called when memory
- * sections are mapped or unmapped into an address
- * space
- *
- * @listener: an object containing the callbacks to be called
- * @filter: if non-%NULL, only regions in this address space will be observed
- */
-void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
-
-/**
- * memory_listener_unregister: undo the effect of memory_listener_register()
- *
- * @listener: an object containing the callbacks to be removed
- */
-void memory_listener_unregister(MemoryListener *listener);
-
-/**
- * memory_global_dirty_log_start: begin dirty logging for all regions
- */
-void memory_global_dirty_log_start(void);
-
-/**
- * memory_global_dirty_log_stop: end dirty logging for all regions
- */
-void memory_global_dirty_log_stop(void);
-
-void mtree_info(fprintf_function mon_printf, void *f);
-
-/**
- * address_space_init: initializes an address space
- *
- * @as: an uninitialized #AddressSpace
- * @root: a #MemoryRegion that routes addesses for the address space
- */
-void address_space_init(AddressSpace *as, MemoryRegion *root);
-
-
-/**
- * address_space_destroy: destroy an address space
- *
- * Releases all resources associated with an address space. After an address space
- * is destroyed, its root memory region (given by address_space_init()) may be destroyed
- * as well.
- *
- * @as: address space to be destroyed
- */
-void address_space_destroy(AddressSpace *as);
-
-/**
- * address_space_rw: read from or write to an address space.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- * @is_write: indicates the transfer direction
- */
-void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
- int len, bool is_write);
-
-/**
- * address_space_write: write to address space.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- */
-void address_space_write(AddressSpace *as, hwaddr addr,
- const uint8_t *buf, int len);
-
-/**
- * address_space_read: read from an address space.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @buf: buffer with the data transferred
- */
-void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
-
-/* address_space_map: map a physical memory region into a host virtual address
- *
- * May map a subset of the requested range, given by and returned in @plen.
- * May return %NULL if resources needed to perform the mapping are exhausted.
- * Use only for reads OR writes - not for read-modify-write operations.
- * Use cpu_register_map_client() to know when retrying the map operation is
- * likely to succeed.
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @plen: pointer to length of buffer; updated on return
- * @is_write: indicates the transfer direction
- */
-void *address_space_map(AddressSpace *as, hwaddr addr,
- hwaddr *plen, bool is_write);
-
-/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
- *
- * Will also mark the memory as dirty if @is_write == %true. @access_len gives
- * the amount of memory that was actually read or written by the caller.
- *
- * @as: #AddressSpace used
- * @addr: address within that address space
- * @len: buffer length as returned by address_space_map()
- * @access_len: amount of data actually transferred
- * @is_write: indicates the transfer direction
- */
-void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
- int is_write, hwaddr access_len);
-
-
-#endif
-
-#endif
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "memory_mapping.h"
int qemu_get_guest_memory_mapping(MemoryMappingList *list)
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "memory_mapping.h"
static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
#include "hw/pci/pci.h"
#include "hw/watchdog.h"
#include "hw/loader.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "net/net.h"
#include "net/slirp.h"
#include "qemu-char.h"
#include "trace/simple.h"
#endif
#include "ui/qemu-spice.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "qmp-commands.h"
#include "hmp.h"
#include "qemu-thread.h"
+++ /dev/null
-/* Poison identifiers that should not be used when building
- target independent device code. */
-
-#ifndef HW_POISON_H
-#define HW_POISON_H
-#ifdef __GNUC__
-
-#pragma GCC poison TARGET_I386
-#pragma GCC poison TARGET_X86_64
-#pragma GCC poison TARGET_ALPHA
-#pragma GCC poison TARGET_ARM
-#pragma GCC poison TARGET_CRIS
-#pragma GCC poison TARGET_LM32
-#pragma GCC poison TARGET_M68K
-#pragma GCC poison TARGET_MIPS
-#pragma GCC poison TARGET_MIPS64
-#pragma GCC poison TARGET_OPENRISC
-#pragma GCC poison TARGET_PPC
-#pragma GCC poison TARGET_PPCEMB
-#pragma GCC poison TARGET_PPC64
-#pragma GCC poison TARGET_ABI32
-#pragma GCC poison TARGET_SH4
-#pragma GCC poison TARGET_SPARC
-#pragma GCC poison TARGET_SPARC64
-
-#pragma GCC poison TARGET_WORDS_BIGENDIAN
-#pragma GCC poison BSWAP_NEEDED
-
-#pragma GCC poison TARGET_LONG_BITS
-#pragma GCC poison TARGET_FMT_lx
-#pragma GCC poison TARGET_FMT_ld
-
-#pragma GCC poison TARGET_PAGE_SIZE
-#pragma GCC poison TARGET_PAGE_MASK
-#pragma GCC poison TARGET_PAGE_BITS
-#pragma GCC poison TARGET_PAGE_ALIGN
-
-#pragma GCC poison CPUArchState
-#pragma GCC poison env
-
-#pragma GCC poison lduw_phys
-#pragma GCC poison ldl_phys
-#pragma GCC poison ldq_phys
-#pragma GCC poison stl_phys_notdirty
-#pragma GCC poison stq_phys_notdirty
-#pragma GCC poison stw_phys
-#pragma GCC poison stl_phys
-#pragma GCC poison stq_phys
-
-#pragma GCC poison CPU_INTERRUPT_HARD
-#pragma GCC poison CPU_INTERRUPT_EXITTB
-#pragma GCC poison CPU_INTERRUPT_HALT
-#pragma GCC poison CPU_INTERRUPT_DEBUG
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
-#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
-#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
-
-#endif
-#endif
+++ /dev/null
-/*
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
- */
-
-/* configure guarantees us that we have pthreads on any host except
- * mingw32, which doesn't support any of the user-only targets.
- * So we can simply assume we have pthread mutexes here.
- */
-#if defined(CONFIG_USER_ONLY)
-
-#include <pthread.h>
-#define spin_lock pthread_mutex_lock
-#define spin_unlock pthread_mutex_unlock
-#define spinlock_t pthread_mutex_t
-#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
-
-#else
-
-/* Empty implementations, on the theory that system mode emulation
- * is single-threaded. This means that these functions should only
- * be used from code run in the TCG cpu thread, and cannot protect
- * data structures which might also be accessed from the IO thread
- * or from signal handlers.
- */
-typedef int spinlock_t;
-#define SPIN_LOCK_UNLOCKED 0
-
-static inline void spin_lock(spinlock_t *lock)
-{
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-}
-
-#endif
+++ /dev/null
-#ifndef QEMU_TYPES_H
-#define QEMU_TYPES_H
-#include "cpu.h"
-
-#ifdef TARGET_ABI32
-typedef uint32_t abi_ulong;
-typedef int32_t abi_long;
-#define TARGET_ABI_FMT_lx "%08x"
-#define TARGET_ABI_FMT_ld "%d"
-#define TARGET_ABI_FMT_lu "%u"
-#define TARGET_ABI_BITS 32
-
-static inline abi_ulong tswapal(abi_ulong v)
-{
- return tswap32(v);
-}
-
-#else
-typedef target_ulong abi_ulong;
-typedef target_long abi_long;
-#define TARGET_ABI_FMT_lx TARGET_FMT_lx
-#define TARGET_ABI_FMT_ld TARGET_FMT_ld
-#define TARGET_ABI_FMT_lu TARGET_FMT_lu
-#define TARGET_ABI_BITS TARGET_LONG_BITS
-/* for consistency, define ABI32 too */
-#if TARGET_ABI_BITS == 32
-#define TARGET_ABI32 1
-#endif
-
-static inline abi_ulong tswapal(abi_ulong v)
-{
- return tswapl(v);
-}
-
-#endif
-#endif
#include "qtest.h"
#include "hw/qdev.h"
#include "qemu-char.h"
-#include "ioport.h"
-#include "memory.h"
+#include "exec/ioport.h"
+#include "exec/memory.h"
#include "hw/irq.h"
#include "sysemu.h"
#include "cpus.h"
#include "qemu-queue.h"
#include "qemu-timer.h"
#include "cpus.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "qmp-commands.h"
#include "trace.h"
#include "bitops.h"
${AWK:-awk} 'BEGIN { n = 0
printf "#include \"config.h\"\n"
printf "#include \"qemu-common.h\"\n"
- printf "#include \"gdbstub.h\"\n"
+ printf "#include \"exec/gdbstub.h\"\n"
print "static const char '$arrayname'[] = {"
for (i = 0; i < 255; i++)
_ord_[sprintf("%c", i)] = i
+++ /dev/null
-/*
- * Helper routines to provide target memory access for semihosting
- * syscalls in system emulation mode.
- *
- * Copyright (c) 2007 CodeSourcery.
- *
- * This code is licensed under the GPL
- */
-#ifndef SOFTMMU_SEMI_H
-#define SOFTMMU_SEMI_H 1
-
-static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
-{
- uint32_t val;
-
- cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
- return tswap32(val);
-}
-static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
-{
- uint8_t val;
-
- cpu_memory_rw_debug(env, addr, &val, 1, 0);
- return val;
-}
-
-#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
-#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
-#define get_user_ual(arg, p) get_user_u32(arg, p)
-
-static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
-{
- val = tswap32(val);
- cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
-}
-#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
-#define put_user_ual(arg, p) put_user_u32(arg, p)
-
-static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
- int copy)
-{
- uint8_t *p;
- /* TODO: Make this something that isn't fixed size. */
- p = malloc(len);
- if (p && copy)
- cpu_memory_rw_debug(env, addr, p, len, 0);
- return p;
-}
-#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
-static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
-{
- char *p;
- char *s;
- uint8_t c;
- /* TODO: Make this something that isn't fixed size. */
- s = p = malloc(1024);
- if (!s) {
- return NULL;
- }
- do {
- cpu_memory_rw_debug(env, addr, &c, 1, 0);
- addr++;
- *(p++) = c;
- } while (c);
- return s;
-}
-#define lock_user_string(p) softmmu_lock_user_string(env, p)
-static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
- target_ulong len)
-{
- if (len)
- cpu_memory_rw_debug(env, addr, p, len, 1);
- free(p);
-}
-#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
-
-#endif
+++ /dev/null
-/*
- * Software MMU support
- *
- * Declare helpers used by TCG for qemu_ld/st ops.
- *
- * Used by softmmu_exec.h, TCG targets and exec-all.h.
- *
- */
-#ifndef SOFTMMU_DEFS_H
-#define SOFTMMU_DEFS_H
-
-uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- int mmu_idx);
-uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx);
-uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx);
-uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx);
-
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
-int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx);
-#endif
+++ /dev/null
-/*
- * Software MMU support
- *
- * Generate inline load/store functions for all MMU modes (typically
- * at least _user and _kernel) as well as _data versions, for all data
- * sizes.
- *
- * Used by target op helpers.
- *
- * MMU mode suffixes are defined in target cpu.h.
- */
-
-/* XXX: find something cleaner.
- * Furthermore, this is false for 64 bits targets
- */
-#define ldul_user ldl_user
-#define ldul_kernel ldl_kernel
-#define ldul_hypv ldl_hypv
-#define ldul_executive ldl_executive
-#define ldul_supervisor ldl_supervisor
-
-#include "softmmu_defs.h"
-
-#define ACCESS_TYPE 0
-#define MEMSUFFIX MMU_MODE0_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ACCESS_TYPE 1
-#define MEMSUFFIX MMU_MODE1_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#if (NB_MMU_MODES >= 3)
-
-#define ACCESS_TYPE 2
-#define MEMSUFFIX MMU_MODE2_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 3) */
-
-#if (NB_MMU_MODES >= 4)
-
-#define ACCESS_TYPE 3
-#define MEMSUFFIX MMU_MODE3_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 4) */
-
-#if (NB_MMU_MODES >= 5)
-
-#define ACCESS_TYPE 4
-#define MEMSUFFIX MMU_MODE4_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 5) */
-
-#if (NB_MMU_MODES >= 6)
-
-#define ACCESS_TYPE 5
-#define MEMSUFFIX MMU_MODE5_SUFFIX
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 6) */
-
-#if (NB_MMU_MODES > 6)
-#error "NB_MMU_MODES > 6 is not supported for now"
-#endif /* (NB_MMU_MODES > 6) */
-
-/* these access are slower, they must be as rare as possible */
-#define ACCESS_TYPE (NB_MMU_MODES)
-#define MEMSUFFIX _data
-#define DATA_SIZE 1
-#include "softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ldub(p) ldub_data(p)
-#define ldsb(p) ldsb_data(p)
-#define lduw(p) lduw_data(p)
-#define ldsw(p) ldsw_data(p)
-#define ldl(p) ldl_data(p)
-#define ldq(p) ldq_data(p)
-
-#define stb(p, v) stb_data(p, v)
-#define stw(p, v) stw_data(p, v)
-#define stl(p, v) stl_data(p, v)
-#define stq(p, v) stq_data(p, v)
+++ /dev/null
-/*
- * Software MMU support
- *
- * Generate inline load/store functions for one MMU mode and data
- * size.
- *
- * Generate a store function as well as signed and unsigned loads. For
- * 32 and 64 bit cases, also generate floating point functions with
- * the same size.
- *
- * Not used directly but included from softmmu_exec.h and exec-all.h.
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
-#define DATA_STYPE int16_t
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
-#define DATA_STYPE int8_t
-#else
-#error unsupported data size
-#endif
-
-#if ACCESS_TYPE < (NB_MMU_MODES)
-
-#define CPU_MMU_INDEX ACCESS_TYPE
-#define MMUSUFFIX _mmu
-
-#elif ACCESS_TYPE == (NB_MMU_MODES)
-
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
-#define MMUSUFFIX _mmu
-
-#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
-
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
-#define MMUSUFFIX _cmmu
-
-#else
-#error invalid ACCESS_TYPE
-#endif
-
-#if DATA_SIZE == 8
-#define RES_TYPE uint64_t
-#else
-#define RES_TYPE uint32_t
-#endif
-
-#if ACCESS_TYPE == (NB_MMU_MODES + 1)
-#define ADDR_READ addr_code
-#else
-#define ADDR_READ addr_read
-#endif
-
-/* generic load/store macros */
-
-static inline RES_TYPE
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
-{
- int page_index;
- RES_TYPE res;
- target_ulong addr;
- int mmu_idx;
-
- addr = ptr;
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = CPU_MMU_INDEX;
- if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
- } else {
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
- res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
- }
- return res;
-}
-
-#if DATA_SIZE <= 2
-static inline int
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
-{
- int res, page_index;
- target_ulong addr;
- int mmu_idx;
-
- addr = ptr;
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = CPU_MMU_INDEX;
- if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
- MMUSUFFIX)(env, addr, mmu_idx);
- } else {
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
- res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
- }
- return res;
-}
-#endif
-
-#if ACCESS_TYPE != (NB_MMU_MODES + 1)
-
-/* generic store macro */
-
-static inline void
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
- RES_TYPE v)
-{
- int page_index;
- target_ulong addr;
- int mmu_idx;
-
- addr = ptr;
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = CPU_MMU_INDEX;
- if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
- } else {
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
- glue(glue(st, SUFFIX), _raw)(hostaddr, v);
- }
-}
-
-#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
-
-#if ACCESS_TYPE != (NB_MMU_MODES + 1)
-
-#if DATA_SIZE == 8
-static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
- target_ulong ptr)
-{
- union {
- float64 d;
- uint64_t i;
- } u;
- u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
- return u.d;
-}
-
-static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
- target_ulong ptr, float64 v)
-{
- union {
- float64 d;
- uint64_t i;
- } u;
- u.d = v;
- glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
-}
-#endif /* DATA_SIZE == 8 */
-
-#if DATA_SIZE == 4
-static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
- target_ulong ptr)
-{
- union {
- float32 f;
- uint32_t i;
- } u;
- u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
- return u.f;
-}
-
-static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
- target_ulong ptr, float32 v)
-{
- union {
- float32 f;
- uint32_t i;
- } u;
- u.f = v;
- glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
-}
-#endif /* DATA_SIZE == 4 */
-
-#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
-
-#undef RES_TYPE
-#undef DATA_TYPE
-#undef DATA_STYPE
-#undef SUFFIX
-#undef USUFFIX
-#undef DATA_SIZE
-#undef CPU_MMU_INDEX
-#undef MMUSUFFIX
-#undef ADDR_READ
+++ /dev/null
-/*
- * Software MMU support
- *
- * Generate helpers used by TCG for qemu_ld/st ops and code load
- * functions.
- *
- * Included from target op helpers and exec.c.
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu-timer.h"
-#include "memory.h"
-
-#define DATA_SIZE (1 << SHIFT)
-
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
-#else
-#error unsupported data size
-#endif
-
-#ifdef SOFTMMU_CODE_ACCESS
-#define READ_ACCESS_TYPE 2
-#define ADDR_READ addr_code
-#else
-#define READ_ACCESS_TYPE 0
-#define ADDR_READ addr_read
-#endif
-
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- int mmu_idx,
- uintptr_t retaddr);
-static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
- target_ulong addr,
- uintptr_t retaddr)
-{
- DATA_TYPE res;
- MemoryRegion *mr = iotlb_to_region(physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- env->mem_io_pc = retaddr;
- if (mr != &io_mem_ram && mr != &io_mem_rom
- && mr != &io_mem_unassigned
- && mr != &io_mem_notdirty
- && !can_do_io(env)) {
- cpu_io_recompile(env, retaddr);
- }
-
- env->mem_io_vaddr = addr;
-#if SHIFT <= 2
- res = io_mem_read(mr, physaddr, 1 << SHIFT);
-#else
-#ifdef TARGET_WORDS_BIGENDIAN
- res = io_mem_read(mr, physaddr, 4) << 32;
- res |= io_mem_read(mr, physaddr + 4, 4);
-#else
- res = io_mem_read(mr, physaddr, 4);
- res |= io_mem_read(mr, physaddr + 4, 4) << 32;
-#endif
-#endif /* SHIFT > 2 */
- return res;
-}
-
-/* handle all cases except unaligned access which span two pages */
-DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- int mmu_idx)
-{
- DATA_TYPE res;
- int index;
- target_ulong tlb_addr;
- hwaddr ioaddr;
- uintptr_t retaddr;
-
- /* test if there is match for unaligned or IO access */
- /* XXX: could done more in memory macro in a non portable way */
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- retaddr = GETPC_EXT();
- ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- /* slow unaligned access (it spans two pages or IO) */
- do_unaligned_access:
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
-#endif
- res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
- mmu_idx, retaddr);
- } else {
- /* unaligned/aligned access in the same page */
- uintptr_t addend;
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- retaddr = GETPC_EXT();
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
- addend = env->tlb_table[mmu_idx][index].addend;
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend));
- }
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
-#endif
- tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- goto redo;
- }
- return res;
-}
-
-/* handle all unaligned cases */
-static DATA_TYPE
-glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- int mmu_idx,
- uintptr_t retaddr)
-{
- DATA_TYPE res, res1, res2;
- int index, shift;
- hwaddr ioaddr;
- target_ulong tlb_addr, addr1, addr2;
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- /* slow unaligned access (it spans two pages) */
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
- mmu_idx, retaddr);
- res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
- mmu_idx, retaddr);
- shift = (addr & (DATA_SIZE - 1)) * 8;
-#ifdef TARGET_WORDS_BIGENDIAN
- res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
-#else
- res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
-#endif
- res = (DATA_TYPE)res;
- } else {
- /* unaligned/aligned access in the same page */
- uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend));
- }
- } else {
- /* the page is not in the TLB : fill it */
- tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- goto redo;
- }
- return res;
-}
-
-#ifndef SOFTMMU_CODE_ACCESS
-
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- DATA_TYPE val,
- int mmu_idx,
- uintptr_t retaddr);
-
-static inline void glue(io_write, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
- DATA_TYPE val,
- target_ulong addr,
- uintptr_t retaddr)
-{
- MemoryRegion *mr = iotlb_to_region(physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- if (mr != &io_mem_ram && mr != &io_mem_rom
- && mr != &io_mem_unassigned
- && mr != &io_mem_notdirty
- && !can_do_io(env)) {
- cpu_io_recompile(env, retaddr);
- }
-
- env->mem_io_vaddr = addr;
- env->mem_io_pc = retaddr;
-#if SHIFT <= 2
- io_mem_write(mr, physaddr, val, 1 << SHIFT);
-#else
-#ifdef TARGET_WORDS_BIGENDIAN
- io_mem_write(mr, physaddr, (val >> 32), 4);
- io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
-#else
- io_mem_write(mr, physaddr, (uint32_t)val, 4);
- io_mem_write(mr, physaddr + 4, val >> 32, 4);
-#endif
-#endif /* SHIFT > 2 */
-}
-
-void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, DATA_TYPE val,
- int mmu_idx)
-{
- hwaddr ioaddr;
- target_ulong tlb_addr;
- uintptr_t retaddr;
- int index;
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- retaddr = GETPC_EXT();
- ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
-#endif
- glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
- mmu_idx, retaddr);
- } else {
- /* aligned/unaligned access in the same page */
- uintptr_t addend;
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- retaddr = GETPC_EXT();
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
- addend = env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend), val);
- }
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC_EXT();
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
-#endif
- tlb_fill(env, addr, 1, mmu_idx, retaddr);
- goto redo;
- }
-}
-
-/* handles all unaligned cases */
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr,
- DATA_TYPE val,
- int mmu_idx,
- uintptr_t retaddr)
-{
- hwaddr ioaddr;
- target_ulong tlb_addr;
- int index, i;
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
- do_unaligned_access:
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for(i = DATA_SIZE - 1; i >= 0; i--) {
-#ifdef TARGET_WORDS_BIGENDIAN
- glue(slow_stb, MMUSUFFIX)(env, addr + i,
- val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
- mmu_idx, retaddr);
-#else
- glue(slow_stb, MMUSUFFIX)(env, addr + i,
- val >> (i * 8),
- mmu_idx, retaddr);
-#endif
- }
- } else {
- /* aligned/unaligned access in the same page */
- uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
- (addr + addend), val);
- }
- } else {
- /* the page is not in the TLB : fill it */
- tlb_fill(env, addr, 1, mmu_idx, retaddr);
- goto redo;
- }
-}
-
-#endif /* !defined(SOFTMMU_CODE_ACCESS) */
-
-#undef READ_ACCESS_TYPE
-#undef SHIFT
-#undef DATA_TYPE
-#undef SUFFIX
-#undef USUFFIX
-#undef DATA_SIZE
-#undef ADDR_READ
#define CPUArchState struct CPUAlphaState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
#define cpu_gen_code cpu_alpha_gen_code
#define cpu_signal_handler cpu_alpha_signal_handler
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "cpu-qom.h"
enum {
| CPU_INTERRUPT_MCHK);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUAlphaState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_3(excp, noreturn, env, int, int)
DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env)
DEF_HELPER_FLAGS_2(set_alarm, TCG_CALL_NO_RWG, void, env, i64)
#endif
-#include "def-helper.h"
+#include "exec/def-helper.h"
dynamic_excp(env, 0, EXCP_MCHK, 0);
}
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
/* register names */
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static void alpha_translate_init(void)
{
#define ARM_ANGEL_HEAP_SIZE (128 * 1024 * 1024)
#else
#include "qemu-common.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "hw/arm-misc.h"
#endif
return code;
}
-#include "softmmu-semi.h"
+#include "exec/softmmu-semi.h"
#endif
static target_ulong arm_semi_syscall_len;
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
/* Bit usage in the TB flags field: */
#define ARM_TBFLAG_THUMB_SHIFT 0
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
{
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "helper.h"
#include "host-utils.h"
#include "sysemu.h"
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_3(neon_qzip16, void, env, i32, i32)
DEF_HELPER_3(neon_qzip32, void, env, i32, i32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#include <stdio.h>
#include "cpu.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
#include "helper.h"
/* iwMMXt macros extracted from GNU gdb. */
#include <stdio.h>
#include "cpu.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
#include "helper.h"
#define SIGNBIT (uint32_t)0x80000000
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
static TCGv cpu_F0s, cpu_F1s;
static TCGv_i64 cpu_F0d, cpu_F1d;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static const char *regnames[] =
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
#define CPUArchState struct CPUCRISState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#define TARGET_HAS_ICE 1
#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5
#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
return env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUCRISState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_2(tlb_flush_pid, void, env, i32)
DEF_HELPER_1(evaluate_flags, void, env)
DEF_HELPER_1(top_evaluate_flags, void, env)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#endif
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
static TCGv env_btarget;
static TCGv env_pc;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
/* This is the state at translation time. */
typedef struct DisasContext {
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "dump.h"
#include "elf.h"
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "memory_mapping.h"
/* PAE Paging or IA-32e Paging */
#define CPUArchState struct CPUX86State
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
CPU_INTERRUPT_MCE));
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#define FPU_RC_MASK 0xc00
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_FLAGS_2(cc_compute_all, TCG_CALL_NO_SE, i32, env, int)
DEF_HELPER_FLAGS_2(cc_compute_c, TCG_CALL_NO_SE, i32, env, int)
DEF_HELPER_3(rcrq, tl, env, tl, tl)
#endif
-#include "def-helper.h"
+#include "exec/def-helper.h"
#include "qemu.h"
#include "qemu-common.h"
-#include "ioport.h"
+#include "exec/ioport.h"
void cpu_outb(pio_addr_t addr, uint8_t val)
{
#include "kvm.h"
#include "kvm_i386.h"
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#include "qemu-config.h"
#include "hw/pc.h"
#include "hw/apic.h"
-#include "ioport.h"
+#include "exec/ioport.h"
#include "hyperv.h"
#include "hw/pci/pci.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* broken thread support */
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#endif
*/
#include "cpu.h"
-#include "ioport.h"
+#include "exec/ioport.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* check if Port I/O is allowed in TSS */
//#define DEBUG_PCALL
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#ifdef DEBUG_PCALL
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* Secure Virtual Machine helpers */
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
#ifdef TARGET_X86_64
static int x86_64_hregs;
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
struct CPULM32State;
typedef struct CPULM32State CPULM32State;
return env->ie & IE_IE;
}
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
static inline target_ulong cpu_get_pc(CPULM32State *env)
{
return env->interrupt_request & CPU_INTERRUPT_HARD;
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPULM32State *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_1(hlt, void, env)
DEF_HELPER_1(rcsr_jtx, i32, env)
DEF_HELPER_1(rcsr_jrx, i32, env)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
void helper_raise_exception(CPULM32State *env, uint32_t index)
{
static TCGv cpu_bp[4];
static TCGv cpu_wp[4];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
enum {
OP_FMT_RI,
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
return env->interrupt_request & CPU_INTERRUPT_HARD;
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUM68KState *env, TranslationBlock *tb)
{
*/
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "helpers.h"
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_1(bitrev, i32, i32)
DEF_HELPER_1(ff1, i32, i32)
DEF_HELPER_2(flush_flags, void, env, i32)
DEF_HELPER_2(raise_exception, void, env, i32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#define SEMIHOSTING_HEAP_SIZE (128 * 1024 * 1024)
#else
#include "qemu-common.h"
-#include "gdbstub.h"
-#include "softmmu-semi.h"
+#include "exec/gdbstub.h"
+#include "exec/softmmu-semi.h"
#endif
#include "sysemu.h"
extern int semihosting_enabled;
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
/* Used to distinguish stores from bad addressing modes. */
static TCGv store_dummy;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
void m68k_tcg_init(void)
{
#define CPUArchState struct CPUMBState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
struct CPUMBState;
typedef struct CPUMBState CPUMBState;
return env->sregs[SR_MSR] & MSR_IE;
}
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
static inline target_ulong cpu_get_pc(CPUMBState *env)
{
return env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUMBState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_1(debug, void, env)
DEF_HELPER_2(get, i32, i32, i32)
DEF_HELPER_3(put, void, i32, i32, i32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#define D(x)
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
static TCGv env_btarget;
static TCGv env_iflags;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
/* This is the state at translation time. */
typedef struct DisasContext {
#include "config.h"
#include "qemu-common.h"
#include "mips-defs.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
struct CPUMIPSState;
return r;
}
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
/* Memory access type :
* may be needed for precise access rights control and precise exceptions.
return has_work;
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUMIPSState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
DEF_HELPER_2(raise_exception, noreturn, env, i32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#ifndef CONFIG_USER_ONLY
#define ALIGNED_ONLY
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
int is_write, int is_user, uintptr_t retaddr)
static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
static target_ulong gen_opc_btarget[OPC_BUF_SIZE];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
#define gen_helper_0e0i(name, arg) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg); \
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
#include "qemu/cpu.h"
#include "qapi/error.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
target_ulong *pc,
CPU_INTERRUPT_TIMER);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline target_ulong cpu_get_pc(CPUOpenRISCState *env)
{
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "def-helper.h"
+#include "exec/def-helper.h"
/* exception */
DEF_HELPER_FLAGS_2(exception, 0, void, env, i32)
DEF_HELPER_FLAGS_4(mtspr, 0, void, env, tl, tl, tl)
DEF_HELPER_FLAGS_4(mfspr, 0, tl, env, tl, tl, tl)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#include "cpu.h"
#include "qemu-common.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#ifndef CONFIG_USER_ONLY
#include "hw/loader.h"
#include "cpu.h"
#include "qemu-common.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#ifndef CONFIG_USER_ONLY
#include "hw/loader.h"
#include "cpu.h"
#ifndef CONFIG_USER_ONLY
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
*/
#include "cpu.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu-common.h"
static TCGv machi, maclo;
static TCGv fpmaddhi, fpmaddlo;
static TCGv_i32 env_flags;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
void openrisc_translate_init(void)
{
#define CPUArchState struct CPUPPCState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
/*****************************************************************************/
/* CRF definitions */
return msr_ee && (env->interrupt_request & CPU_INTERRUPT_HARD);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUPPCState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_3(raise_exception_err, void, env, i32, i32)
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_3(store_601_batu, void, env, i32, tl)
#endif
-#include "def-helper.h"
+#include "exec/def-helper.h"
#ifndef __KVM_PPC_H__
#define __KVM_PPC_H__
-#include "memory.h"
+#include "exec/memory.h"
void kvmppc_init(void);
#include "helper_regs.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
//#define DEBUG_OP
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
static TCGv cpu_fpscr;
static TCGv_i32 cpu_access_type;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
void ppc_translate_init(void)
{
*/
#include "disas/bfd.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include <kvm.h>
#include "kvm_ppc.h"
#include "arch_init.h"
#define CPUArchState struct CPUS390XState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "softfloat.h"
#define cpu_gen_code cpu_s390x_gen_code
#define cpu_signal_handler cpu_s390x_signal_handler
-#include "exec-all.h"
+#include "exec/exec-all.h"
#ifdef CONFIG_USER_ONLY
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif
/* #define DEBUG_HELPER */
*/
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "qemu-timer.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu.h"
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_2(exception, void, env, i32)
DEF_HELPER_4(nc, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE,
i32, env, i32, i64, i64, i64)
-#include "def-helper.h"
+#include "exec/def-helper.h"
/*****************************************************************************/
/* Softmmu support */
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
*/
#include "cpu.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "host-utils.h"
#include "helper.h"
#include <string.h>
#endif
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#include "sysemu.h"
#endif
/* global register indexes */
static TCGv_ptr cpu_env;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
#include "helper.h"
#define GEN_HELPER 1
#include "helper.h"
#define CPUArchState struct CPUSH4State
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
/* Memory access type */
enum {
return env->interrupt_request & CPU_INTERRUPT_HARD;
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUSH4State *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_1(ldtlb, void, env)
DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
DEF_HELPER_3(fipr, void, env, i32, i32)
DEF_HELPER_2(ftrv, void, env, i32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#include "helper.h"
#ifndef CONFIG_USER_ONLY
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
void tlb_fill(CPUSH4State *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static void sh4_translate_init(void)
{
#define CPUArchState struct CPUSPARCState
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#ifdef TARGET_SPARC64
/* sun4u.c */
cpu_interrupts_enabled(env1);
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUSPARCState *env, TranslationBlock *tb)
{
-#include "def-helper.h"
+#include "exec/def-helper.h"
#ifndef TARGET_SPARC64
DEF_HELPER_1(rett, void, env)
DEF_HELPER_1(compute_psr, void, env);
DEF_HELPER_1(compute_C_icc, i32, env);
-#include "def-helper.h"
+#include "exec/def-helper.h"
static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
target_ulong addr, int is_write,
int is_user, uintptr_t retaddr);
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#endif
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
#include "cpu.h"
#include "trace.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
/* Sparc MMU emulation */
static target_ulong gen_opc_npc[OPC_BUF_SIZE];
static target_ulong gen_opc_jump_pc[2];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
typedef struct DisasContext {
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "softfloat.h"
#define NB_MMU_MODES 2
env->regs[16] = newtls;
}
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "cpu-qom.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUUniCore32State *env, TranslationBlock *tb)
{
*/
#include "cpu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "helper.h"
#include "host-utils.h"
#include "ui/console.h"
* published by the Free Software Foundation, or (at your option) any
* later version. See the COPYING file in the top-level directory.
*/
-#include "def-helper.h"
+#include "exec/def-helper.h"
#ifndef CONFIG_USER_ONLY
DEF_HELPER_4(cp0_set, void, env, i32, i32, i32)
DEF_HELPER_2(ucf64_sf2si, f32, f32, env)
DEF_HELPER_2(ucf64_df2si, f32, f64, env)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
void tlb_fill(CPUUniCore32State *env, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
static TCGv cpu_F0s, cpu_F1s;
static TCGv_i64 cpu_F0d, cpu_F1d;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static const char *regnames[] = {
"r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
*/
#include "cpu.h"
-#include "exec-all.h"
-#include "gdbstub.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#include "core-dc232b/core-isa.h"
*/
#include "cpu.h"
-#include "exec-all.h"
-#include "gdbstub.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
#include "qemu-common.h"
#include "host-utils.h"
*/
#include "cpu.h"
-#include "exec-all.h"
-#include "gdbstub.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#include "core-fsf/core-isa.h"
#include "config.h"
#include "qemu-common.h"
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
#include "fpu/softfloat.h"
#define TARGET_HAS_ICE 1
}
}
-#include "cpu-all.h"
-#include "exec-all.h"
+#include "exec/cpu-all.h"
+#include "exec/exec-all.h"
static inline int cpu_has_work(CPUState *cpu)
{
*/
#include "cpu.h"
-#include "exec-all.h"
-#include "gdbstub.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
#include "host-utils.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
-#include "def-helper.h"
+#include "exec/def-helper.h"
DEF_HELPER_2(exception, noreturn, env, i32)
DEF_HELPER_3(exception_cause, noreturn, env, i32, i32)
DEF_HELPER_4(ole_s, void, env, i32, f32, f32)
DEF_HELPER_4(ule_s, void, env, i32, f32, f32)
-#include "def-helper.h"
+#include "exec/def-helper.h"
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr)
#include <stdio.h>
#include "cpu.h"
-#include "exec-all.h"
+#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu-log.h"
static TCGv_i32 cpu_SR[256];
static TCGv_i32 cpu_UR[256];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
typedef struct XtensaReg {
const char *name;
#ifdef CONFIG_SOFTMMU
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
}
#if defined(CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#if defined(CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#if defined(CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* Load and compare a TLB entry, and return the result in (p6, p7).
R2 is loaded with the address of the addend TLB entry.
#if defined(CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
label->label_ptr[0] = label_ptr;
}
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#if defined (CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#ifdef CONFIG_SOFTMMU
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#if defined(CONFIG_SOFTMMU)
-#include "../../softmmu_defs.h"
+#include "exec/softmmu_defs.h"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
#endif
#include "qemu-common.h"
-#include "exec-all.h" /* MAX_OPC_PARAM_IARGS */
+#include "exec/exec-all.h" /* MAX_OPC_PARAM_IARGS */
#include "tcg-op.h"
/* Marker for missing code. */
#include <stdarg.h>
#include "qemu.h"
-#include "thunk.h"
+#include "exec/user/thunk.h"
//#define DEBUG
+++ /dev/null
-/*
- * Generic thunking code to convert data between host and target CPU
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef THUNK_H
-#define THUNK_H
-
-#include <inttypes.h>
-#include "cpu.h"
-
-/* types enums definitions */
-
-typedef enum argtype {
- TYPE_NULL,
- TYPE_CHAR,
- TYPE_SHORT,
- TYPE_INT,
- TYPE_LONG,
- TYPE_ULONG,
- TYPE_PTRVOID, /* pointer on unknown data */
- TYPE_LONGLONG,
- TYPE_ULONGLONG,
- TYPE_PTR,
- TYPE_ARRAY,
- TYPE_STRUCT,
- TYPE_OLDDEVT,
-} argtype;
-
-#define MK_PTR(type) TYPE_PTR, type
-#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
-#define MK_STRUCT(id) TYPE_STRUCT, id
-
-#define THUNK_TARGET 0
-#define THUNK_HOST 1
-
-typedef struct {
- /* standard struct handling */
- const argtype *field_types;
- int nb_fields;
- int *field_offsets[2];
- /* special handling */
- void (*convert[2])(void *dst, const void *src);
- int size[2];
- int align[2];
- const char *name;
-} StructEntry;
-
-/* Translation table for bitmasks... */
-typedef struct bitmask_transtbl {
- unsigned int x86_mask;
- unsigned int x86_bits;
- unsigned int alpha_mask;
- unsigned int alpha_bits;
-} bitmask_transtbl;
-
-void thunk_register_struct(int id, const char *name, const argtype *types);
-void thunk_register_struct_direct(int id, const char *name,
- const StructEntry *se1);
-const argtype *thunk_convert(void *dst, const void *src,
- const argtype *type_ptr, int to_host);
-#ifndef NO_THUNK_TYPE_SIZE
-
-extern StructEntry struct_entries[];
-
-int thunk_type_size_array(const argtype *type_ptr, int is_host);
-int thunk_type_align_array(const argtype *type_ptr, int is_host);
-
-static inline int thunk_type_size(const argtype *type_ptr, int is_host)
-{
- int type, size;
- const StructEntry *se;
-
- type = *type_ptr;
- switch(type) {
- case TYPE_CHAR:
- return 1;
- case TYPE_SHORT:
- return 2;
- case TYPE_INT:
- return 4;
- case TYPE_LONGLONG:
- case TYPE_ULONGLONG:
- return 8;
- case TYPE_LONG:
- case TYPE_ULONG:
- case TYPE_PTRVOID:
- case TYPE_PTR:
- if (is_host) {
- return sizeof(void *);
- } else {
- return TARGET_ABI_BITS / 8;
- }
- break;
- case TYPE_OLDDEVT:
- if (is_host) {
-#if defined(HOST_X86_64)
- return 8;
-#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
- defined(HOST_PARISC) || defined(HOST_SPARC64)
- return 4;
-#elif defined(HOST_PPC)
- return sizeof(void *);
-#else
- return 2;
-#endif
- } else {
-#if defined(TARGET_X86_64)
- return 8;
-#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
- defined(TARGET_PARISC) || defined(TARGET_SPARC64)
- return 4;
-#elif defined(TARGET_PPC)
- return TARGET_ABI_BITS / 8;
-#else
- return 2;
-#endif
- }
- break;
- case TYPE_ARRAY:
- size = type_ptr[1];
- return size * thunk_type_size_array(type_ptr + 2, is_host);
- case TYPE_STRUCT:
- se = struct_entries + type_ptr[1];
- return se->size[is_host];
- default:
- return -1;
- }
-}
-
-static inline int thunk_type_align(const argtype *type_ptr, int is_host)
-{
- int type;
- const StructEntry *se;
-
- type = *type_ptr;
- switch(type) {
- case TYPE_CHAR:
- return 1;
- case TYPE_SHORT:
- return 2;
- case TYPE_INT:
- return 4;
- case TYPE_LONGLONG:
- case TYPE_ULONGLONG:
- return 8;
- case TYPE_LONG:
- case TYPE_ULONG:
- case TYPE_PTRVOID:
- case TYPE_PTR:
- if (is_host) {
- return sizeof(void *);
- } else {
- return TARGET_ABI_BITS / 8;
- }
- break;
- case TYPE_OLDDEVT:
- return thunk_type_size(type_ptr, is_host);
- case TYPE_ARRAY:
- return thunk_type_align_array(type_ptr + 2, is_host);
- case TYPE_STRUCT:
- se = struct_entries + type_ptr[1];
- return se->align[is_host];
- default:
- return -1;
- }
-}
-
-#endif /* NO_THUNK_TYPE_SIZE */
-
-unsigned int target_to_host_bitmask(unsigned int x86_mask,
- const bitmask_transtbl * trans_tbl);
-unsigned int host_to_target_bitmask(unsigned int alpha_mask,
- const bitmask_transtbl * trans_tbl);
-
-#endif
#include "disas/disas.h"
#include "tcg.h"
#include "qemu-timer.h"
-#include "memory.h"
-#include "exec-memory.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#endif
#endif
-#include "cputlb.h"
+#include "exec/cputlb.h"
#include "translate-all.h"
//#define DEBUG_TB_INVALIDATE
#include "monitor.h"
#include "ui/console.h"
#include "sysemu.h"
-#include "gdbstub.h"
+#include "exec/gdbstub.h"
#include "qemu-timer.h"
#include "qemu-char.h"
#include "cache-utils.h"
#include "range.h"
#include "xen-mapcache.h"
#include "trace.h"
-#include "exec-memory.h"
+#include "exec/address-spaces.h"
#include <xen/hvm/ioreq.h>
#include <xen/hvm/params.h>
#include "qemu-common.h"
#include "hw/xen.h"
-#include "memory.h"
+#include "exec/memory.h"
#include "qmp-commands.h"
void xenstore_store_pv_console_info(int i, CharDriverState *chr)