* user : user mode access using soft MMU
* kernel : kernel mode access using soft MMU
*/
-static inline int ldub_raw(void *ptr)
+static inline int ldub_p(void *ptr)
{
return *(uint8_t *)ptr;
}
-static inline int ldsb_raw(void *ptr)
+static inline int ldsb_p(void *ptr)
{
return *(int8_t *)ptr;
}
-static inline void stb_raw(void *ptr, int v)
+static inline void stb_p(void *ptr, int v)
{
*(uint8_t *)ptr = v;
}
#if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
/* conservative code for little endian unaligned accesses */
-static inline int lduw_raw(void *ptr)
+static inline int lduw_p(void *ptr)
{
#ifdef __powerpc__
int val;
#endif
}
-static inline int ldsw_raw(void *ptr)
+static inline int ldsw_p(void *ptr)
{
#ifdef __powerpc__
int val;
#endif
}
-static inline int ldl_raw(void *ptr)
+static inline int ldl_p(void *ptr)
{
#ifdef __powerpc__
int val;
#endif
}
-static inline uint64_t ldq_raw(void *ptr)
+static inline uint64_t ldq_p(void *ptr)
{
uint8_t *p = ptr;
uint32_t v1, v2;
- v1 = ldl_raw(p);
- v2 = ldl_raw(p + 4);
+ v1 = ldl_p(p);
+ v2 = ldl_p(p + 4);
return v1 | ((uint64_t)v2 << 32);
}
-static inline void stw_raw(void *ptr, int v)
+static inline void stw_p(void *ptr, int v)
{
#ifdef __powerpc__
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
#endif
}
-static inline void stl_raw(void *ptr, int v)
+static inline void stl_p(void *ptr, int v)
{
#ifdef __powerpc__
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
#endif
}
-static inline void stq_raw(void *ptr, uint64_t v)
+static inline void stq_p(void *ptr, uint64_t v)
{
uint8_t *p = ptr;
- stl_raw(p, (uint32_t)v);
- stl_raw(p + 4, v >> 32);
+ stl_p(p, (uint32_t)v);
+ stl_p(p + 4, v >> 32);
}
/* float access */
-static inline float ldfl_raw(void *ptr)
+static inline float ldfl_p(void *ptr)
{
union {
float f;
uint32_t i;
} u;
- u.i = ldl_raw(ptr);
+ u.i = ldl_p(ptr);
return u.f;
}
-static inline void stfl_raw(void *ptr, float v)
+static inline void stfl_p(void *ptr, float v)
{
union {
float f;
uint32_t i;
} u;
u.f = v;
- stl_raw(ptr, u.i);
+ stl_p(ptr, u.i);
}
-static inline double ldfq_raw(void *ptr)
+static inline double ldfq_p(void *ptr)
{
CPU_DoubleU u;
- u.l.lower = ldl_raw(ptr);
- u.l.upper = ldl_raw(ptr + 4);
+ u.l.lower = ldl_p(ptr);
+ u.l.upper = ldl_p(ptr + 4);
return u.d;
}
-static inline void stfq_raw(void *ptr, double v)
+static inline void stfq_p(void *ptr, double v)
{
CPU_DoubleU u;
u.d = v;
- stl_raw(ptr, u.l.lower);
- stl_raw(ptr + 4, u.l.upper);
+ stl_p(ptr, u.l.lower);
+ stl_p(ptr + 4, u.l.upper);
}
#elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
-static inline int lduw_raw(void *ptr)
+static inline int lduw_p(void *ptr)
{
#if defined(__i386__)
int val;
#endif
}
-static inline int ldsw_raw(void *ptr)
+static inline int ldsw_p(void *ptr)
{
#if defined(__i386__)
int val;
#endif
}
-static inline int ldl_raw(void *ptr)
+static inline int ldl_p(void *ptr)
{
#if defined(__i386__) || defined(__x86_64__)
int val;
#endif
}
-static inline uint64_t ldq_raw(void *ptr)
+static inline uint64_t ldq_p(void *ptr)
{
uint32_t a,b;
- a = ldl_raw(ptr);
- b = ldl_raw(ptr+4);
+ a = ldl_p(ptr);
+ b = ldl_p(ptr+4);
return (((uint64_t)a<<32)|b);
}
-static inline void stw_raw(void *ptr, int v)
+static inline void stw_p(void *ptr, int v)
{
#if defined(__i386__)
asm volatile ("xchgb %b0, %h0\n"
#endif
}
-static inline void stl_raw(void *ptr, int v)
+static inline void stl_p(void *ptr, int v)
{
#if defined(__i386__) || defined(__x86_64__)
asm volatile ("bswap %0\n"
#endif
}
-static inline void stq_raw(void *ptr, uint64_t v)
+static inline void stq_p(void *ptr, uint64_t v)
{
- stl_raw(ptr, v >> 32);
- stl_raw(ptr + 4, v);
+ stl_p(ptr, v >> 32);
+ stl_p(ptr + 4, v);
}
/* float access */
-static inline float ldfl_raw(void *ptr)
+static inline float ldfl_p(void *ptr)
{
union {
float f;
uint32_t i;
} u;
- u.i = ldl_raw(ptr);
+ u.i = ldl_p(ptr);
return u.f;
}
-static inline void stfl_raw(void *ptr, float v)
+static inline void stfl_p(void *ptr, float v)
{
union {
float f;
uint32_t i;
} u;
u.f = v;
- stl_raw(ptr, u.i);
+ stl_p(ptr, u.i);
}
-static inline double ldfq_raw(void *ptr)
+static inline double ldfq_p(void *ptr)
{
CPU_DoubleU u;
- u.l.upper = ldl_raw(ptr);
- u.l.lower = ldl_raw(ptr + 4);
+ u.l.upper = ldl_p(ptr);
+ u.l.lower = ldl_p(ptr + 4);
return u.d;
}
-static inline void stfq_raw(void *ptr, double v)
+static inline void stfq_p(void *ptr, double v)
{
CPU_DoubleU u;
u.d = v;
- stl_raw(ptr, u.l.upper);
- stl_raw(ptr + 4, u.l.lower);
+ stl_p(ptr, u.l.upper);
+ stl_p(ptr + 4, u.l.lower);
}
#else
-static inline int lduw_raw(void *ptr)
+static inline int lduw_p(void *ptr)
{
return *(uint16_t *)ptr;
}
-static inline int ldsw_raw(void *ptr)
+static inline int ldsw_p(void *ptr)
{
return *(int16_t *)ptr;
}
-static inline int ldl_raw(void *ptr)
+static inline int ldl_p(void *ptr)
{
return *(uint32_t *)ptr;
}
-static inline uint64_t ldq_raw(void *ptr)
+static inline uint64_t ldq_p(void *ptr)
{
return *(uint64_t *)ptr;
}
-static inline void stw_raw(void *ptr, int v)
+static inline void stw_p(void *ptr, int v)
{
*(uint16_t *)ptr = v;
}
-static inline void stl_raw(void *ptr, int v)
+static inline void stl_p(void *ptr, int v)
{
*(uint32_t *)ptr = v;
}
-static inline void stq_raw(void *ptr, uint64_t v)
+static inline void stq_p(void *ptr, uint64_t v)
{
*(uint64_t *)ptr = v;
}
/* float access */
-static inline float ldfl_raw(void *ptr)
+static inline float ldfl_p(void *ptr)
{
return *(float *)ptr;
}
-static inline double ldfq_raw(void *ptr)
+static inline double ldfq_p(void *ptr)
{
return *(double *)ptr;
}
-static inline void stfl_raw(void *ptr, float v)
+static inline void stfl_p(void *ptr, float v)
{
*(float *)ptr = v;
}
-static inline void stfq_raw(void *ptr, double v)
+static inline void stfq_p(void *ptr, double v)
{
*(double *)ptr = v;
}
/* MMU memory access macros */
+/* NOTE: we use double casts if pointers and target_ulong have
+ different sizes */
+#define ldub_raw(p) ldub_p((uint8_t *)(long)(p))
+#define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p))
+#define lduw_raw(p) lduw_p((uint8_t *)(long)(p))
+#define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p))
+#define ldl_raw(p) ldl_p((uint8_t *)(long)(p))
+#define ldq_raw(p) ldq_p((uint8_t *)(long)(p))
+#define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p))
+#define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p))
+#define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v)
+#define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v)
+#define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v)
+#define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v)
+#define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v)
+#define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v)
+
+
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#if TARGET_LONG_SIZE == 4
typedef int32_t target_long;
typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
#elif TARGET_LONG_SIZE == 8
typedef int64_t target_long;
typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016llx"
#else
#error TARGET_LONG_SIZE undefined
#endif
int code_gen_size, ret, interrupt_request;
void (*gen_func)(void);
TranslationBlock *tb, **ptb;
- uint8_t *tc_ptr, *cs_base, *pc;
+ target_ulong cs_base, pc;
+ uint8_t *tc_ptr;
unsigned int flags;
/* first we save global registers */
+ saved_env = env;
+ env = env1;
saved_T0 = T0;
saved_T1 = T1;
saved_T2 = T2;
- saved_env = env;
- env = env1;
#ifdef __sparc__
/* we also save i7 because longjmp may not restore it */
asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
}
}
#ifdef DEBUG_EXEC
- if (loglevel & CPU_LOG_EXEC) {
+ if ((loglevel & CPU_LOG_EXEC)) {
#if defined(TARGET_I386)
/* restore flags in standard format */
env->regs[R_EAX] = EAX;
#elif defined(TARGET_ARM)
flags = 0;
cs_base = 0;
- pc = (uint8_t *)env->regs[15];
+ pc = env->regs[15];
#elif defined(TARGET_SPARC)
flags = 0;
- cs_base = (uint8_t *)env->npc;
- pc = (uint8_t *) env->pc;
+ cs_base = env->npc;
+ pc = env->pc;
#elif defined(TARGET_PPC)
flags = 0;
cs_base = 0;
- pc = (uint8_t *)env->nip;
+ pc = env->nip;
#else
#error unsupported CPU
#endif
- tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
+ tb = tb_find(&ptb, pc, cs_base,
flags);
if (!tb) {
TranslationBlock **ptb1;
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
/* find translated block using physical mappings */
- phys_pc = get_phys_addr_code(env, (unsigned long)pc);
+ phys_pc = get_phys_addr_code(env, pc);
phys_page1 = phys_pc & TARGET_PAGE_MASK;
phys_page2 = -1;
h = tb_phys_hash_func(phys_pc);
tb = *ptb1;
if (!tb)
goto not_found;
- if (tb->pc == (unsigned long)pc &&
+ if (tb->pc == pc &&
tb->page_addr[0] == phys_page1 &&
- tb->cs_base == (unsigned long)cs_base &&
+ tb->cs_base == cs_base &&
tb->flags == flags) {
/* check next page if needed */
if (tb->page_addr[1] != -1) {
- virt_page2 = ((unsigned long)pc & TARGET_PAGE_MASK) +
+ virt_page2 = (pc & TARGET_PAGE_MASK) +
TARGET_PAGE_SIZE;
phys_page2 = get_phys_addr_code(env, virt_page2);
if (tb->page_addr[1] == phys_page2)
}
not_found:
/* if no translated code available, then translate it now */
- tb = tb_alloc((unsigned long)pc);
+ tb = tb_alloc(pc);
if (!tb) {
/* flush must be done */
tb_flush(env);
/* cannot fail at this point */
- tb = tb_alloc((unsigned long)pc);
+ tb = tb_alloc(pc);
/* don't forget to invalidate previous TB info */
- ptb = &tb_hash[tb_hash_func((unsigned long)pc)];
+ ptb = &tb_hash[tb_hash_func(pc)];
T0 = 0;
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
- tb->cs_base = (unsigned long)cs_base;
+ tb->cs_base = cs_base;
tb->flags = flags;
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
- virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
+ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
- if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
+ if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_phys_addr_code(env, virt_page2);
}
tb_link_phys(tb, phys_pc, phys_page2);
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
- ptb = &tb_hash[tb_hash_func((unsigned long)pc)];
+ ptb = &tb_hash[tb_hash_func(pc)];
while (*ptb != NULL)
ptb = &(*ptb)->hash_next;
T0 = 0;
spin_unlock(&tb_lock);
}
#ifdef DEBUG_EXEC
- if (loglevel & CPU_LOG_EXEC) {
- fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",
- (long)tb->tc_ptr, (long)tb->pc,
- lookup_symbol((void *)tb->pc));
+ if ((loglevel & CPU_LOG_EXEC) && (env->hflags & HF_LMA_MASK)) {
+ fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
+ (long)tb->tc_ptr, tb->pc,
+ lookup_symbol(tb->pc));
}
#endif
#ifdef __sparc__
T0 = tmp_T0;
#endif
/* see if we can patch the calling TB. */
- if (T0 != 0
+ {
+ if (T0 != 0
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
&& (tb->cflags & CF_CODE_COPY) ==
(((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
#endif
) {
spin_lock(&tb_lock);
- tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
+ tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
#if defined(USE_CODE_COPY)
/* propagates the FP use info */
((TranslationBlock *)(T0 & ~3))->cflags |=
#endif
spin_unlock(&tb_lock);
}
+ }
tc_ptr = tb->tc_ptr;
env->current_tb = tb;
/* execute the generated code */
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
- (uint8_t *)(selector << 4), 0xffff, 0);
+ (selector << 4), 0xffff, 0);
} else {
load_seg(seg_reg, selector);
}
saved_env = env;
env = s;
- helper_fsave(ptr, data32);
+ helper_fsave((target_ulong)ptr, data32);
env = saved_env;
}
saved_env = env;
env = s;
- helper_frstor(ptr, data32);
+ helper_frstor((target_ulong)ptr, data32);
env = saved_env;
}
#ifndef DIS_ASM_H
#define DIS_ASM_H
+#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
typedef uint8_t bfd_byte;
#define sprintf_vma(s,x) sprintf (s, "%0" PRIx64, x)
+#define BFD64
+
enum bfd_flavour {
bfd_target_unknown_flavour,
bfd_target_aout_flavour,
return 0;
}
-#if !defined(CONFIG_USER_ONLY)
/* Get LENGTH bytes from info's buffer, at target address memaddr.
Transfer them to myaddr. */
static int
-target_read_memory (memaddr, myaddr, length, info)
- bfd_vma memaddr;
- bfd_byte *myaddr;
- int length;
- struct disassemble_info *info;
+target_read_memory (bfd_vma memaddr,
+ bfd_byte *myaddr,
+ int length,
+ struct disassemble_info *info)
{
int i;
for(i = 0; i < length; i++) {
- myaddr[i] = ldub_code((void *)((long)memaddr + i));
+ myaddr[i] = ldub_code(memaddr + i);
}
return 0;
}
-#endif
/* Print an error message. We can assume that this is in response to
an error return from buffer_read_memory. */
/* Disassemble this for me please... (debugging). 'flags' is only used
for i386: non zero means 16 bit code */
-void disas(FILE *out, void *code, unsigned long size, int is_host, int flags)
+void target_disas(FILE *out, target_ulong code, unsigned long size, int flags)
{
- uint8_t *pc;
+ target_ulong pc;
int count;
struct disassemble_info disasm_info;
int (*print_insn)(bfd_vma pc, disassemble_info *info);
INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
-#if !defined(CONFIG_USER_ONLY)
- if (!is_host) {
- disasm_info.read_memory_func = target_read_memory;
- }
+ disasm_info.read_memory_func = target_read_memory;
+ disasm_info.buffer_vma = code;
+ disasm_info.buffer_length = size;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ disasm_info.endian = BFD_ENDIAN_BIG;
+#else
+ disasm_info.endian = BFD_ENDIAN_LITTLE;
+#endif
+#if defined(TARGET_I386)
+ if (flags == 2)
+ disasm_info.mach = bfd_mach_x86_64;
+ else if (flags == 1)
+ disasm_info.mach = bfd_mach_i386_i8086;
+ else
+ disasm_info.mach = bfd_mach_i386_i386;
+ print_insn = print_insn_i386;
+#elif defined(TARGET_ARM)
+ print_insn = print_insn_arm;
+#elif defined(TARGET_SPARC)
+ print_insn = print_insn_sparc;
+#elif defined(TARGET_PPC)
+ print_insn = print_insn_ppc;
+#else
+ fprintf(out, "Asm output not supported on this arch\n");
+ return;
#endif
+ for (pc = code; pc < code + size; pc += count) {
+#if TARGET_LONG_BITS == 64
+ fprintf(out, "0x%016llx: ", pc);
+#else
+ fprintf(out, "0x%08x: ", pc);
+#endif
+ count = print_insn(pc, &disasm_info);
+#if 0
+ {
+ int i;
+ uint8_t b;
+ fprintf(out, " {");
+ for(i = 0; i < count; i++) {
+ target_read_memory(pc + i, &b, 1, &disasm_info);
+ fprintf(out, " %02x", b);
+ }
+ fprintf(out, " }");
+ }
+#endif
+ fprintf(out, "\n");
+ if (count < 0)
+ break;
+ }
+}
+
+/* Disassemble this for me please... (debugging). */
+void disas(FILE *out, void *code, unsigned long size)
+{
+ unsigned long pc;
+ int count;
+ struct disassemble_info disasm_info;
+ int (*print_insn)(bfd_vma pc, disassemble_info *info);
+
+ INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
+
disasm_info.buffer = code;
disasm_info.buffer_vma = (unsigned long)code;
disasm_info.buffer_length = size;
- if (is_host) {
#ifdef WORDS_BIGENDIAN
- disasm_info.endian = BFD_ENDIAN_BIG;
+ disasm_info.endian = BFD_ENDIAN_BIG;
#else
- disasm_info.endian = BFD_ENDIAN_LITTLE;
+ disasm_info.endian = BFD_ENDIAN_LITTLE;
#endif
#if defined(__i386__)
- disasm_info.mach = bfd_mach_i386_i386;
- print_insn = print_insn_i386;
+ disasm_info.mach = bfd_mach_i386_i386;
+ print_insn = print_insn_i386;
#elif defined(__x86_64__)
- disasm_info.mach = bfd_mach_x86_64;
- print_insn = print_insn_i386;
+ disasm_info.mach = bfd_mach_x86_64;
+ print_insn = print_insn_i386;
#elif defined(__powerpc__)
- print_insn = print_insn_ppc;
+ print_insn = print_insn_ppc;
#elif defined(__alpha__)
- print_insn = print_insn_alpha;
+ print_insn = print_insn_alpha;
#elif defined(__sparc__)
- print_insn = print_insn_sparc;
+ print_insn = print_insn_sparc;
#elif defined(__arm__)
- print_insn = print_insn_arm;
-#else
- fprintf(out, "Asm output not supported on this arch\n");
- return;
-#endif
- } else {
-#ifdef TARGET_WORDS_BIGENDIAN
- disasm_info.endian = BFD_ENDIAN_BIG;
-#else
- disasm_info.endian = BFD_ENDIAN_LITTLE;
-#endif
-#if defined(TARGET_I386)
- if (!flags)
- disasm_info.mach = bfd_mach_i386_i386;
- else
- disasm_info.mach = bfd_mach_i386_i8086;
- print_insn = print_insn_i386;
-#elif defined(TARGET_ARM)
- print_insn = print_insn_arm;
-#elif defined(TARGET_SPARC)
- print_insn = print_insn_sparc;
-#elif defined(TARGET_PPC)
- print_insn = print_insn_ppc;
+ print_insn = print_insn_arm;
#else
- fprintf(out, "Asm output not supported on this arch\n");
- return;
+ fprintf(out, "Asm output not supported on this arch\n");
+ return;
#endif
- }
-
- for (pc = code; pc < (uint8_t *)code + size; pc += count) {
- fprintf(out, "0x%08lx: ", (long)pc);
+ for (pc = (unsigned long)code; pc < (unsigned long)code + size; pc += count) {
+ fprintf(out, "0x%08lx: ", pc);
#ifdef __arm__
/* since data are included in the code, it is better to
display code data too */
fprintf(out, "%08x ", (int)bfd_getl32((const bfd_byte *)pc));
}
#endif
- count = print_insn((unsigned long)pc, &disasm_info);
+ count = print_insn(pc, &disasm_info);
fprintf(out, "\n");
if (count < 0)
break;
}
/* Look up symbol for debugging purpose. Returns "" if unknown. */
-const char *lookup_symbol(void *orig_addr)
+const char *lookup_symbol(target_ulong orig_addr)
{
unsigned int i;
/* Hack, because we know this is x86. */
if (ELF_ST_TYPE(sym[i].st_info) != STT_FUNC)
continue;
- if ((long)orig_addr >= sym[i].st_value
- && (long)orig_addr < sym[i].st_value + sym[i].st_size)
+ if (orig_addr >= sym[i].st_value
+ && orig_addr < sym[i].st_value + sym[i].st_size)
return s->disas_strtab + sym[i].st_name;
}
}
#define _QEMU_DISAS_H
/* Disassemble this for me please... (debugging). */
-void disas(FILE *out, void *code, unsigned long size, int is_host, int flags);
+void disas(FILE *out, void *code, unsigned long size);
+void target_disas(FILE *out, target_ulong code, unsigned long size, int flags);
void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags);
/* Look up symbol for debugging purpose. Returns "" if unknown. */
-const char *lookup_symbol(void *orig_addr);
+const char *lookup_symbol(target_ulong orig_addr);
/* Filled in by elfload.c. Simplistic, but will do for now. */
extern struct syminfo {
extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
-extern uint32_t gen_opc_pc[OPC_BUF_SIZE];
-extern uint32_t gen_opc_npc[OPC_BUF_SIZE];
+extern long gen_labels[OPC_BUF_SIZE];
+extern int nb_gen_labels;
+extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
+extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
struct TranslationBlock *jmp_first;
} TranslationBlock;
-static inline unsigned int tb_hash_func(unsigned long pc)
+static inline unsigned int tb_hash_func(target_ulong pc)
{
return pc & (CODE_GEN_HASH_SIZE - 1);
}
return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
-TranslationBlock *tb_alloc(unsigned long pc);
+TranslationBlock *tb_alloc(target_ulong pc);
void tb_flush(CPUState *env);
void tb_link(TranslationBlock *tb);
void tb_link_phys(TranslationBlock *tb,
"b " ASM_NAME(__op_jmp) #n "\n"\
"1:\n");\
T0 = (long)(tbparam) + (n);\
- EIP = eip;\
+ EIP = (int32_t)eip;\
EXIT_TB();\
} while (0)
#elif defined(__i386__) && defined(USE_DIRECT_JUMP)
/* we patch the jump instruction directly */
+#define GOTO_TB(opname, n)\
+do {\
+ asm volatile (".section .data\n"\
+ ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
+ ".long 1f\n"\
+ ASM_PREVIOUS_SECTION \
+ "jmp " ASM_NAME(__op_jmp) #n "\n"\
+ "1:\n");\
+} while (0)
+
#define JUMP_TB(opname, tbparam, n, eip)\
do {\
asm volatile (".section .data\n"\
"jmp " ASM_NAME(__op_jmp) #n "\n"\
"1:\n");\
T0 = (long)(tbparam) + (n);\
- EIP = eip;\
+ EIP = (int32_t)eip;\
EXIT_TB();\
} while (0)
goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
label ## n:\
T0 = (long)(tbparam) + (n);\
- EIP = eip;\
+ EIP = (int32_t)eip;\
dummy_label ## n:\
EXIT_TB();\
} while (0)
#if !defined(CONFIG_USER_ONLY)
-void tlb_fill(unsigned long addr, int is_write, int is_user,
+void tlb_fill(target_ulong addr, int is_write, int is_user,
void *retaddr);
#define ACCESS_TYPE 3
#define DATA_SIZE 4
#include "softmmu_header.h"
+#define DATA_SIZE 8
+#include "softmmu_header.h"
+
#undef ACCESS_TYPE
#undef MEMSUFFIX
#undef env
/* XXX: i386 target specific */
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
{
- int is_user, index;
+ int is_user, index, pd;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
#if defined(TARGET_I386)
#endif
if (__builtin_expect(env->tlb_read[is_user][index].address !=
(addr & TARGET_PAGE_MASK), 0)) {
- ldub_code((void *)addr);
+ ldub_code(addr);
+ }
+ pd = env->tlb_read[is_user][index].address & ~TARGET_PAGE_MASK;
+ if (pd > IO_MEM_ROM) {
+ cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
}
return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
}
{
VirtPageDesc **lp, *p;
+ /* XXX: should not truncate for 64 bit addresses */
+#if TARGET_LONG_BITS > 32
+ index &= (L1_SIZE - 1);
+#endif
lp = &l1_virt_map[index >> L2_BITS];
p = *lp;
if (!p) {
target_ulong phys_pc, phys_page2, virt_page2;
int code_gen_size;
- phys_pc = get_phys_addr_code(env, (unsigned long)pc);
- tb = tb_alloc((unsigned long)pc);
+ phys_pc = get_phys_addr_code(env, pc);
+ tb = tb_alloc(pc);
if (!tb) {
/* flush must be done */
tb_flush(env);
/* cannot fail at this point */
- tb = tb_alloc((unsigned long)pc);
+ tb = tb_alloc(pc);
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
- virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
+ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
- if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
+ if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_phys_addr_code(env, virt_page2);
}
tb_link_phys(tb, phys_pc, phys_page2);
/* Allocate a new translation block. Flush the translation buffer if
too many translation blocks or too much generated code. */
-TranslationBlock *tb_alloc(unsigned long pc)
+TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
tb_reset_jump_recursive2(tb, 1);
}
+#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
target_ulong phys_addr;
phys_addr = cpu_get_phys_page_debug(env, pc);
tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
}
+#endif
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
breakpoint is reached */
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(phys_addr, 1);
#endif
- stb_raw((uint8_t *)addr, val);
+ stb_p((uint8_t *)(long)addr, val);
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(phys_addr, 2);
#endif
- stw_raw((uint8_t *)addr, val);
+ stw_p((uint8_t *)(long)addr, val);
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(phys_addr, 4);
#endif
- stl_raw((uint8_t *)addr, val);
+ stl_p((uint8_t *)(long)addr, val);
phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
- stb_raw((uint8_t *)addr, val);
+ stb_p((uint8_t *)(long)addr, val);
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
{
- stw_raw((uint8_t *)addr, val);
+ stw_p((uint8_t *)(long)addr, val);
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
- stl_raw((uint8_t *)addr, val);
+ stl_p((uint8_t *)(long)addr, val);
tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
}
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (l >= 4 && ((addr & 3) == 0)) {
/* 32 bit read access */
- val = ldl_raw(buf);
+ val = ldl_p(buf);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
l = 4;
} else if (l >= 2 && ((addr & 1) == 0)) {
/* 16 bit read access */
- val = lduw_raw(buf);
+ val = lduw_p(buf);
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
l = 2;
} else {
/* 8 bit access */
- val = ldub_raw(buf);
+ val = ldub_p(buf);
io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
l = 1;
}
if (l >= 4 && ((addr & 3) == 0)) {
/* 32 bit read access */
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
- stl_raw(buf, val);
+ stl_p(buf, val);
l = 4;
} else if (l >= 2 && ((addr & 1) == 0)) {
/* 16 bit read access */
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
- stw_raw(buf, val);
+ stw_p(buf, val);
l = 2;
} else {
/* 8 bit access */
val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
- stb_raw(buf, val);
+ stb_p(buf, val);
l = 1;
}
} else {
}
while (len > 0) {
- term_printf("0x%08x:", addr);
+ term_printf(TARGET_FMT_lx ":", addr);
l = len;
if (l > line_size)
l = line_size;
#endif
-DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
+DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user);
-void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, DATA_TYPE v, int is_user);
+void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
-#if (DATA_SIZE <= 4) && defined(__i386__) && (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
+#if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
+ (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
-static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr)
+static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
{
int res;
}
#if DATA_SIZE <= 2
-static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
+static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
{
int res;
}
#endif
-static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
+static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
{
asm volatile ("movl %0, %%edx\n"
"movl %0, %%eax\n"
/* generic load/store macros */
-static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr)
+static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
{
int index;
RES_TYPE res;
- unsigned long addr, physaddr;
+ target_ulong addr;
+ unsigned long physaddr;
int is_user;
- addr = (unsigned long)ptr;
+ addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_read[is_user][index].address !=
}
#if DATA_SIZE <= 2
-static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
+static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
{
int res, index;
- unsigned long addr, physaddr;
+ target_ulong addr;
+ unsigned long physaddr;
int is_user;
- addr = (unsigned long)ptr;
+ addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_read[is_user][index].address !=
/* generic store macro */
-static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
+static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
{
int index;
- unsigned long addr, physaddr;
+ target_ulong addr;
+ unsigned long physaddr;
int is_user;
- addr = (unsigned long)ptr;
+ addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_write[is_user][index].address !=
#endif
#if DATA_SIZE == 8
-static inline double glue(ldfq, MEMSUFFIX)(void *ptr)
+static inline double glue(ldfq, MEMSUFFIX)(target_ulong ptr)
{
union {
double d;
return u.d;
}
-static inline void glue(stfq, MEMSUFFIX)(void *ptr, double v)
+static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, double v)
{
union {
double d;
#endif /* DATA_SIZE == 8 */
#if DATA_SIZE == 4
-static inline float glue(ldfl, MEMSUFFIX)(void *ptr)
+static inline float glue(ldfl, MEMSUFFIX)(target_ulong ptr)
{
union {
float f;
return u.f;
}
-static inline void glue(stfl, MEMSUFFIX)(void *ptr, float v)
+static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float v)
{
union {
float f;
#define READ_ACCESS_TYPE 0
#endif
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user,
void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
- unsigned long tlb_addr)
+ target_ulong tlb_addr)
{
DATA_TYPE res;
int index;
}
/* handle all cases except unaligned access which span two pages */
-DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
+DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user)
{
DATA_TYPE res;
int index;
- unsigned long physaddr, tlb_addr;
+ target_ulong tlb_addr;
+ unsigned long physaddr;
void *retaddr;
/* test if there is match for unaligned or IO access */
}
/* handle all unaligned cases */
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user,
void *retaddr)
{
DATA_TYPE res, res1, res2;
int index, shift;
- unsigned long physaddr, tlb_addr, addr1, addr2;
+ unsigned long physaddr;
+ target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
#ifndef SOFTMMU_CODE_ACCESS
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr,
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int is_user,
void *retaddr);
static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
DATA_TYPE val,
- unsigned long tlb_addr,
+ target_ulong tlb_addr,
void *retaddr)
{
int index;
#endif /* SHIFT > 2 */
}
-void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr,
+void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int is_user)
{
- unsigned long physaddr, tlb_addr;
+ unsigned long physaddr;
+ target_ulong tlb_addr;
void *retaddr;
int index;
}
/* handles all unaligned cases */
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr,
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int is_user,
void *retaddr)
{
- unsigned long physaddr, tlb_addr;
+ unsigned long physaddr;
+ target_ulong tlb_addr;
int index, i;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
*pv = qemu_get_byte(f);
}
+#if TARGET_LONG_BITS == 64
+#define qemu_put_betl qemu_put_be64
+#define qemu_get_betl qemu_get_be64
+#define qemu_put_betls qemu_put_be64s
+#define qemu_get_betls qemu_get_be64s
+#else
+#define qemu_put_betl qemu_put_be32
+#define qemu_get_betl qemu_get_be32
+#define qemu_put_betls qemu_put_be32s
+#define qemu_get_betls qemu_get_be32s
+#endif
+
int64_t qemu_ftell(QEMUFile *f);
int64_t qemu_fseek(QEMUFile *f, int64_t pos, int whence);
void pic_info(void);
void irq_info(void);
+/* APIC */
+int apic_init(CPUState *env);
+int apic_get_interrupt(CPUState *env);
+
/* i8254.c */
#define PIT_FREQ 1193182