Prepares for changing cpu_single_step() argument to CPUState.
Acked-by: Michael Walle <michael@walle.cc> (for lm32)
Signed-off-by: Andreas Färber <afaerber@suse.de>
for(;;) {
interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
- if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+ if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
CPUArchState *env = cpu->env_ptr;
qemu_clock_enable(vm_clock,
- (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
+ (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
if (cpu_can_run(cpu)) {
r = tcg_cpu_exec(env);
void cpu_single_step(CPUArchState *env, int enabled)
{
#if defined(TARGET_HAS_ICE)
- if (env->singlestep_enabled != enabled) {
- env->singlestep_enabled = enabled;
- if (kvm_enabled())
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ if (cpu->singlestep_enabled != enabled) {
+ cpu->singlestep_enabled = enabled;
+ if (kvm_enabled()) {
kvm_update_guest_debug(env, 0);
- else {
+ } else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
tb_flush(env);
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
- int singlestep_enabled; \
\
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
CPUWatchpoint *watchpoint_hit; \
* @stopped: Indicates the CPU has been artificially stopped.
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop.
+ * @singlestep_enabled: Flags for single-stepping.
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB.
* @next_cpu: Next CPU sharing TB cache.
volatile sig_atomic_t exit_request;
volatile sig_atomic_t tcg_exit_req;
uint32_t interrupt_request;
+ int singlestep_enabled;
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
data.dbg.control = reinject_trap;
- if (env->singlestep_enabled) {
+ if (cpu->singlestep_enabled) {
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
}
kvm_arch_update_guest_debug(cpu, &data.dbg);
TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUAlphaState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
target_ulong pc_start;
ctx.pc = pc_start;
ctx.mem_idx = cpu_mmu_index(env);
ctx.implver = env->implver;
- ctx.singlestep_enabled = env->singlestep_enabled;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
/* ??? Every TB begins with unset rounding mode, to be initialized on
the first fp insn of the TB. Alternately we could define a proper
TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
* ensures prefetch aborts occur at the right place. */
num_insns ++;
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
- !env->singlestep_enabled &&
+ !cs->singlestep_enabled &&
!singlestep &&
dc->pc < next_page_start &&
num_insns < max_insns);
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
gen_set_condexec(dc);
gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUCRISState *env = &cpu->env;
uint16_t *gen_opc_end;
uint32_t pc_start;
dc->is_jmp = DISAS_NEXT;
dc->ppc = pc_start;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->flags_uptodate = 1;
dc->flagx_known = 1;
dc->flags_x = tb->flags & X_FLAG;
/* If we are rexecuting a branch due to exceptions on
delay slots dont break. */
- if (!(tb->pc & 1) && env->singlestep_enabled) {
+ if (!(tb->pc & 1) && cs->singlestep_enabled) {
break;
}
} while (!dc->is_jmp && !dc->cpustate_changed
cris_evaluate_flags(dc);
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(env_pc, npc);
}
static int kvm_guest_debug_workarounds(X86CPU *cpu)
{
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
int ret = 0;
unsigned long reinject_trap = 0;
* reinject them via SET_GUEST_DEBUG.
*/
if (reinject_trap ||
- (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
+ (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
ret = kvm_update_guest_debug(env, reinject_trap);
}
return ret;
static int kvm_handle_debug(X86CPU *cpu,
struct kvm_debug_exit_arch *arch_info)
{
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
int ret = 0;
int n;
if (arch_info->exception == 1) {
if (arch_info->dr6 & (1 << 14)) {
- if (env->singlestep_enabled) {
+ if (cs->singlestep_enabled) {
ret = EXCP_DEBUG;
}
} else {
TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
dc->iopl = (flags >> IOPL_SHIFT) & 3;
dc->tf = (flags >> TF_SHIFT) & 1;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->cc_op = CC_OP_DYNAMIC;
dc->cc_op_dirty = false;
dc->cs_base = cs_base;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
#endif
dc->flags = flags;
- dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
+ dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
(flags & HF_INHIBIT_IRQ_MASK)
#ifndef CONFIG_SOFTMMU
|| (flags & HF_SOFTMMU_MASK)
void gen_intermediate_code_internal(LM32CPU *cpu,
TranslationBlock *tb, bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPULM32State *env = &cpu->env;
struct DisasContext ctx, *dc = &ctx;
uint16_t *gen_opc_end;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->nr_nops = 0;
if (pc_start & 3) {
} while (!dc->is_jmp
&& tcg_ctx.gen_opc_ptr < gen_opc_end
- && !env->singlestep_enabled
+ && !cs->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
gen_io_end();
}
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
}
gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
uint16_t *gen_opc_end;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->cc_op = CC_OP_DYNAMIC;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->fpcr = env->fpcr;
dc->user = (env->sr & SR_S) == 0;
dc->is_mem = 0;
disas_m68k_insn(env, dc);
num_insns++;
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
- !env->singlestep_enabled &&
+ !cs->singlestep_enabled &&
!singlestep &&
(pc_offset) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns);
if (tb->cflags & CF_LAST_IO)
gen_io_end();
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (!dc->is_jmp) {
gen_flush_cc_op(dc);
gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUMBState *env = &cpu->env;
uint16_t *gen_opc_end;
uint32_t pc_start;
dc->jmp = JMP_INDIRECT;
}
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->cpustate_changed = 0;
dc->abort_at_next_insn = 0;
dc->nr_nops = 0;
break;
}
}
- if (env->singlestep_enabled)
+ if (cs->singlestep_enabled) {
break;
+ }
} while (!dc->is_jmp && !dc->cpustate_changed
&& tcg_ctx.gen_opc_ptr < gen_opc_end
&& !singlestep
}
t_sync_flags(dc);
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
if (dc->is_jmp != DISAS_JUMP) {
gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUMIPSState *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.saved_pc = -1;
- ctx.singlestep_enabled = env->singlestep_enabled;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.insn_flags = env->insn_flags;
ctx.tb = tb;
ctx.bstate = BS_NONE;
This is what GDB expects and is consistent with what the
hardware does (e.g. if a delay slot instruction faults, the
reported PC is the PC of the branch). */
- if (env->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0)
+ if (cs->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0) {
break;
+ }
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
break;
if (singlestep)
break;
}
- if (tb->cflags & CF_LAST_IO)
+ if (tb->cflags & CF_LAST_IO) {
gen_io_end();
- if (env->singlestep_enabled && ctx.bstate != BS_BRANCH) {
+ }
+ if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
save_cpu_state(&ctx, ctx.bstate == BS_NONE);
gen_helper_0e0i(raise_exception, EXCP_DEBUG);
} else {
gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
DisasContext ctx;
target_ulong pc_start;
uint16_t *gen_opc_end;
ctx.pc += decode_opc(cpu, &ctx);
num_insns++;
- if (env->singlestep_enabled) {
+ if (cs->singlestep_enabled) {
break;
}
}
} while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end);
- if (env->singlestep_enabled) {
+ if (cs->singlestep_enabled) {
tcg_gen_movi_tl(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
} else {
TranslationBlock *tb,
int search_pc)
{
+ CPUState *cs = CPU(cpu);
struct DisasContext ctx, *dc = &ctx;
uint16_t *gen_opc_end;
uint32_t pc_start;
dc->mem_idx = cpu_mmu_index(&cpu->env);
dc->synced_flags = dc->tb_flags = tb->flags;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
- dc->singlestep_enabled = cpu->env.singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("-----------------------------------------\n");
log_cpu_state(CPU(cpu), 0);
}
} while (!dc->is_jmp
&& tcg_ctx.gen_opc_ptr < gen_opc_end
- && !cpu->env.singlestep_enabled
+ && !cs->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
dc->is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(cpu_pc, dc->pc);
}
- if (unlikely(cpu->env.singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
}
TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
ctx.singlestep_enabled = 0;
if ((env->flags & POWERPC_FLAG_BE) && msr_be)
ctx.singlestep_enabled |= CPU_BRANCH_STEP;
- if (unlikely(env->singlestep_enabled))
+ if (unlikely(cs->singlestep_enabled)) {
ctx.singlestep_enabled |= GDBSTUB_SINGLE_STEP;
+ }
#if defined (DO_SINGLE_STEP) && 0
/* Single step trace mode */
msr_se = 1;
ctx.exception != POWERPC_EXCP_BRANCH)) {
gen_exception(ctxp, POWERPC_EXCP_TRACE);
} else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
- (env->singlestep_enabled) ||
+ (cs->singlestep_enabled) ||
singlestep ||
num_insns >= max_insns)) {
/* if we reach a page boundary or are single stepping, stop
if (ctx.exception == POWERPC_EXCP_NONE) {
gen_goto_tb(&ctx, 0, ctx.nip);
} else if (ctx.exception != POWERPC_EXCP_BRANCH) {
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
gen_debug_exception(ctxp);
}
/* Generate the return instruction */
TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
DisasContext dc;
target_ulong pc_start;
dc.tb = tb;
dc.pc = pc_start;
dc.cc_op = CC_OP_DYNAMIC;
- do_debug = dc.singlestep_enabled = env->singlestep_enabled;
+ do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
|| tcg_ctx.gen_opc_ptr >= gen_opc_end
|| num_insns >= max_insns
|| singlestep
- || env->singlestep_enabled)) {
+ || cs->singlestep_enabled)) {
status = EXIT_PC_STALE;
}
} while (status == NO_EXIT);
gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUSH4State *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
so assume it is a dynamic branch. */
ctx.delayed_pc = -1; /* use delayed pc from env pointer */
ctx.tb = tb;
- ctx.singlestep_enabled = env->singlestep_enabled;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.features = env->features;
ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
ctx.pc += 2;
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
break;
- if (env->singlestep_enabled)
+ if (cs->singlestep_enabled) {
break;
+ }
if (num_insns >= max_insns)
break;
if (singlestep)
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
- if (env->singlestep_enabled) {
+ if (cs->singlestep_enabled) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
} else {
TranslationBlock *tb,
bool spc)
{
+ CPUState *cs = CPU(cpu);
CPUSPARCState *env = &cpu->env;
target_ulong pc_start, last_pc;
uint16_t *gen_opc_end;
dc->def = env->def;
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);
- dc->singlestep = (env->singlestep_enabled || singlestep);
+ dc->singlestep = (cs->singlestep_enabled || singlestep);
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
num_insns = 0;
static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
TranslationBlock *tb, bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUUniCore32State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
* ensures prefetch aborts occur at the right place. */
num_insns++;
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
- !env->singlestep_enabled &&
+ !cs->singlestep_enabled &&
!singlestep &&
dc->pc < next_page_start &&
num_insns < max_insns);
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
if (dc->is_jmp == DISAS_SYSCALL) {
void gen_intermediate_code_internal(XtensaCPU *cpu,
TranslationBlock *tb, bool search_pc)
{
+ CPUState *cs = CPU(cpu);
CPUXtensaState *env = &cpu->env;
DisasContext dc;
int insn_count = 0;
}
dc.config = env->config;
- dc.singlestep_enabled = env->singlestep_enabled;
+ dc.singlestep_enabled = cs->singlestep_enabled;
dc.tb = tb;
dc.pc = pc_start;
dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
gen_tb_start();
- if (env->singlestep_enabled && env->exception_taken) {
+ if (cs->singlestep_enabled && env->exception_taken) {
env->exception_taken = 0;
tcg_gen_movi_i32(cpu_pc, dc.pc);
gen_exception(&dc, EXCP_DEBUG);
if (dc.icount) {
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
}
- if (env->singlestep_enabled) {
+ if (cs->singlestep_enabled) {
tcg_gen_movi_i32(cpu_pc, dc.pc);
gen_exception(&dc, EXCP_DEBUG);
break;