S: Supported
F: qom/cpu.c
F: include/qemu/cpu.h
+F: target-i386/cpu.c
Device Tree
M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
common-obj-y += bt-hci-csr.o
common-obj-y += msmouse.o ps2.o
common-obj-y += qdev.o qdev-properties.o qdev-monitor.o
+common-obj-y += qdev-properties-system.o
common-obj-$(CONFIG_BRLAPI) += baum.o
# xen backend driver support
const char *kernel_filename = args->kernel_filename;
const char *kernel_cmdline = args->kernel_cmdline;
const char *initrd_filename = args->initrd_filename;
- CPUAlphaState *cpus[4];
+ AlphaCPU *cpus[4];
PCIBus *pci_bus;
ISABus *isa_bus;
qemu_irq rtc_irq;
/* Create up to 4 cpus. */
memset(cpus, 0, sizeof(cpus));
for (i = 0; i < smp_cpus; ++i) {
- cpus[i] = cpu_init(cpu_model ? cpu_model : "ev67");
+ cpus[i] = cpu_alpha_init(cpu_model ? cpu_model : "ev67");
}
- cpus[0]->trap_arg0 = ram_size;
- cpus[0]->trap_arg1 = 0;
- cpus[0]->trap_arg2 = smp_cpus;
+ cpus[0]->env.trap_arg0 = ram_size;
+ cpus[0]->env.trap_arg1 = 0;
+ cpus[0]->env.trap_arg2 = smp_cpus;
/* Init the chipset. */
pci_bus = typhoon_init(ram_size, &isa_bus, &rtc_irq, cpus,
/* Start all cpus at the PALcode RESET entry point. */
for (i = 0; i < smp_cpus; ++i) {
- cpus[i]->pal_mode = 1;
- cpus[i]->pc = palcode_entry;
- cpus[i]->palbr = palcode_entry;
+ cpus[i]->env.pal_mode = 1;
+ cpus[i]->env.pc = palcode_entry;
+ cpus[i]->env.palbr = palcode_entry;
}
/* Load a kernel. */
exit(1);
}
- cpus[0]->trap_arg1 = kernel_entry;
+ cpus[0]->env.trap_arg1 = kernel_entry;
param_offset = kernel_low - 0x6000;
#include "irq.h"
-PCIBus *typhoon_init(ram_addr_t, ISABus **, qemu_irq *, CPUAlphaState *[4],
+PCIBus *typhoon_init(ram_addr_t, ISABus **, qemu_irq *, AlphaCPU *[4],
pci_map_irq_fn);
/* alpha_pci.c. */
uint64_t drir;
uint64_t dim[4];
uint32_t iic[4];
- CPUAlphaState *cpu[4];
+ AlphaCPU *cpu[4];
} TyphoonCchip;
typedef struct TyphoonWindow {
} TyphoonState;
/* Called when one of DRIR or DIM changes. */
-static void cpu_irq_change(CPUAlphaState *env, uint64_t req)
+static void cpu_irq_change(AlphaCPU *cpu, uint64_t req)
{
/* If there are any non-masked interrupts, tell the cpu. */
- if (env) {
+ if (cpu != NULL) {
+ CPUAlphaState *env = &cpu->env;
if (req) {
cpu_interrupt(env, CPU_INTERRUPT_HARD);
} else {
if ((newval ^ oldval) & 0xff0) {
int i;
for (i = 0; i < 4; ++i) {
- CPUAlphaState *env = s->cchip.cpu[i];
- if (env) {
+ AlphaCPU *cpu = s->cchip.cpu[i];
+ if (cpu != NULL) {
+ CPUAlphaState *env = &cpu->env;
/* IPI can be either cleared or set by the write. */
if (newval & (1 << (i + 8))) {
cpu_interrupt(env, CPU_INTERRUPT_SMP);
/* Deliver the interrupt to each CPU, considering each CPU's IIC. */
for (i = 0; i < 4; ++i) {
- CPUAlphaState *env = s->cchip.cpu[i];
- if (env) {
+ AlphaCPU *cpu = s->cchip.cpu[i];
+ if (cpu != NULL) {
uint32_t iic = s->cchip.iic[i];
/* ??? The verbage in Section 10.2.2.10 isn't 100% clear.
/* Set the ITI bit for this cpu. */
s->cchip.misc |= 1 << (i + 4);
/* And signal the interrupt. */
- cpu_interrupt(env, CPU_INTERRUPT_TIMER);
+ cpu_interrupt(&cpu->env, CPU_INTERRUPT_TIMER);
}
}
}
/* Set the ITI bit for this cpu. */
s->cchip.misc |= 1 << (cpu + 4);
- cpu_interrupt(s->cchip.cpu[cpu], CPU_INTERRUPT_TIMER);
+ cpu_interrupt(&s->cchip.cpu[cpu]->env, CPU_INTERRUPT_TIMER);
}
PCIBus *typhoon_init(ram_addr_t ram_size, ISABus **isa_bus,
qemu_irq *p_rtc_irq,
- CPUAlphaState *cpus[4], pci_map_irq_fn sys_map_irq)
+ AlphaCPU *cpus[4], pci_map_irq_fn sys_map_irq)
{
const uint64_t MB = 1024 * 1024;
const uint64_t GB = 1024 * MB;
/* Remember the CPUs so that we can deliver interrupts to them. */
for (i = 0; i < 4; i++) {
- CPUAlphaState *env = cpus[i];
- s->cchip.cpu[i] = env;
- if (env) {
- env->alarm_timer = qemu_new_timer_ns(rtc_clock,
+ AlphaCPU *cpu = cpus[i];
+ s->cchip.cpu[i] = cpu;
+ if (cpu != NULL) {
+ cpu->alarm_timer = qemu_new_timer_ns(rtc_clock,
typhoon_alarm_timer,
(void *)((uintptr_t)s + i));
}
.enabled = enable
};
- kvm_vcpu_ioctl(&s->cpu->env, KVM_TPR_ACCESS_REPORTING, &ctl);
+ kvm_vcpu_ioctl(CPU(s->cpu), KVM_TPR_ACCESS_REPORTING, &ctl);
}
static void kvm_apic_vapic_base_update(APICCommonState *s)
};
int ret;
- ret = kvm_vcpu_ioctl(&s->cpu->env, KVM_SET_VAPIC_ADDR, &vapid_addr);
+ ret = kvm_vcpu_ioctl(CPU(s->cpu), KVM_SET_VAPIC_ADDR, &vapid_addr);
if (ret < 0) {
fprintf(stderr, "KVM: setting VAPIC address failed (%s)\n",
strerror(-ret));
static void do_inject_external_nmi(void *data)
{
APICCommonState *s = data;
- CPUX86State *env = &s->cpu->env;
+ CPUState *cpu = CPU(s->cpu);
uint32_t lvt;
int ret;
- cpu_synchronize_state(env);
+ cpu_synchronize_state(&s->cpu->env);
lvt = s->lvt[APIC_LVT_LINT1];
if (!(lvt & APIC_LVT_MASKED) && ((lvt >> 8) & 7) == APIC_DM_NMI) {
- ret = kvm_vcpu_ioctl(env, KVM_NMI);
+ ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
strerror(-ret));
return;
}
for (penv = first_cpu; penv != NULL; penv = penv->next_cpu) {
- ret = kvm_vcpu_ioctl(penv, KVM_KVMCLOCK_CTRL, 0);
+ ret = kvm_vcpu_ioctl(ENV_GET_CPU(penv), KVM_KVMCLOCK_CTRL, 0);
if (ret) {
if (ret != -EINVAL) {
fprintf(stderr, "%s: %s\n", __func__, strerror(-ret));
static void cpu_ppc_tb_stop (CPUPPCState *env);
static void cpu_ppc_tb_start (CPUPPCState *env);
-void ppc_set_irq(CPUPPCState *env, int n_IRQ, int level)
+void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
{
+ CPUPPCState *env = &cpu->env;
unsigned int old_pending = env->pending_interrupts;
if (level) {
if (old_pending != env->pending_interrupts) {
#ifdef CONFIG_KVM
- kvmppc_set_interrupt(env, n_IRQ, level);
+ kvmppc_set_interrupt(cpu, n_IRQ, level);
#endif
}
/* Level sensitive - active high */
LOG_IRQ("%s: set the external IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_EXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
break;
case PPC6xx_INPUT_SMI:
/* Level sensitive - active high */
LOG_IRQ("%s: set the SMI IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_SMI, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
break;
case PPC6xx_INPUT_MCP:
/* Negative edge sensitive */
if (cur_level == 1 && level == 0) {
LOG_IRQ("%s: raise machine check state\n",
__func__);
- ppc_set_irq(env, PPC_INTERRUPT_MCK, 1);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
}
break;
case PPC6xx_INPUT_CKSTP_IN:
case PPC6xx_INPUT_SRESET:
LOG_IRQ("%s: set the RESET IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_RESET, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
break;
default:
/* Unknown pin - do nothing */
/* Level sensitive - active high */
LOG_IRQ("%s: set the external IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_EXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
break;
case PPC970_INPUT_THINT:
/* Level sensitive - active high */
LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
level);
- ppc_set_irq(env, PPC_INTERRUPT_THERM, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
break;
case PPC970_INPUT_MCP:
/* Negative edge sensitive */
if (cur_level == 1 && level == 0) {
LOG_IRQ("%s: raise machine check state\n",
__func__);
- ppc_set_irq(env, PPC_INTERRUPT_MCK, 1);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
}
break;
case PPC970_INPUT_CKSTP:
case PPC970_INPUT_SRESET:
LOG_IRQ("%s: set the RESET IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_RESET, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
break;
case PPC970_INPUT_TBEN:
LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
/* Level sensitive - active high */
LOG_IRQ("%s: set the external IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_EXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
break;
default:
/* Unknown pin - do nothing */
/* Level sensitive - active high */
LOG_IRQ("%s: set the critical IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_CEXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
break;
case PPC40x_INPUT_INT:
/* Level sensitive - active high */
LOG_IRQ("%s: set the external IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_EXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
break;
case PPC40x_INPUT_HALT:
/* Level sensitive - active low */
/* Level sensitive - active high */
LOG_IRQ("%s: set the debug pin state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_DEBUG, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
break;
default:
/* Unknown pin - do nothing */
case PPCE500_INPUT_RESET_CORE:
if (level) {
LOG_IRQ("%s: reset the PowerPC core\n", __func__);
- ppc_set_irq(env, PPC_INTERRUPT_MCK, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
}
break;
case PPCE500_INPUT_CINT:
/* Level sensitive - active high */
LOG_IRQ("%s: set the critical IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_CEXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
break;
case PPCE500_INPUT_INT:
/* Level sensitive - active high */
LOG_IRQ("%s: set the core IRQ state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_EXT, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
break;
case PPCE500_INPUT_DEBUG:
/* Level sensitive - active high */
LOG_IRQ("%s: set the debug pin state to %d\n",
__func__, level);
- ppc_set_irq(env, PPC_INTERRUPT_DEBUG, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
break;
default:
/* Unknown pin - do nothing */
/* When decrementer expires,
* all we need to do is generate or queue a CPU exception
*/
-static inline void cpu_ppc_decr_excp(CPUPPCState *env)
+static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
{
/* Raise it */
LOG_TB("raise decrementer exception\n");
- ppc_set_irq(env, PPC_INTERRUPT_DECR, 1);
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
}
-static inline void cpu_ppc_hdecr_excp(CPUPPCState *env)
+static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
{
/* Raise it */
LOG_TB("raise decrementer exception\n");
- ppc_set_irq(env, PPC_INTERRUPT_HDECR, 1);
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
}
-static void __cpu_ppc_store_decr (CPUPPCState *env, uint64_t *nextp,
- struct QEMUTimer *timer,
- void (*raise_excp)(CPUPPCState *),
- uint32_t decr, uint32_t value,
- int is_excp)
+static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
+ struct QEMUTimer *timer,
+ void (*raise_excp)(PowerPCCPU *),
+ uint32_t decr, uint32_t value,
+ int is_excp)
{
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env = env->tb_env;
uint64_t now, next;
if ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED)
&& (value & 0x80000000)
&& !(decr & 0x80000000)) {
- (*raise_excp)(env);
+ (*raise_excp)(cpu);
}
}
-static inline void _cpu_ppc_store_decr(CPUPPCState *env, uint32_t decr,
+static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
uint32_t value, int is_excp)
{
- ppc_tb_t *tb_env = env->tb_env;
+ ppc_tb_t *tb_env = cpu->env.tb_env;
- __cpu_ppc_store_decr(env, &tb_env->decr_next, tb_env->decr_timer,
+ __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
&cpu_ppc_decr_excp, decr, value, is_excp);
}
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
{
- _cpu_ppc_store_decr(env, cpu_ppc_load_decr(env), value, 0);
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, 0);
}
-static void cpu_ppc_decr_cb (void *opaque)
+static void cpu_ppc_decr_cb(void *opaque)
{
- _cpu_ppc_store_decr(opaque, 0x00000000, 0xFFFFFFFF, 1);
+ PowerPCCPU *cpu = opaque;
+
+ _cpu_ppc_store_decr(cpu, 0x00000000, 0xFFFFFFFF, 1);
}
-static inline void _cpu_ppc_store_hdecr(CPUPPCState *env, uint32_t hdecr,
+static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
uint32_t value, int is_excp)
{
- ppc_tb_t *tb_env = env->tb_env;
+ ppc_tb_t *tb_env = cpu->env.tb_env;
if (tb_env->hdecr_timer != NULL) {
- __cpu_ppc_store_decr(env, &tb_env->hdecr_next, tb_env->hdecr_timer,
+ __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
&cpu_ppc_hdecr_excp, hdecr, value, is_excp);
}
}
void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
{
- _cpu_ppc_store_hdecr(env, cpu_ppc_load_hdecr(env), value, 0);
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, 0);
}
-static void cpu_ppc_hdecr_cb (void *opaque)
+static void cpu_ppc_hdecr_cb(void *opaque)
{
- _cpu_ppc_store_hdecr(opaque, 0x00000000, 0xFFFFFFFF, 1);
+ PowerPCCPU *cpu = opaque;
+
+ _cpu_ppc_store_hdecr(cpu, 0x00000000, 0xFFFFFFFF, 1);
}
-static void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
+static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
{
- ppc_tb_t *tb_env = env->tb_env;
+ ppc_tb_t *tb_env = cpu->env.tb_env;
tb_env->purr_load = value;
tb_env->purr_start = qemu_get_clock_ns(vm_clock);
static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
{
CPUPPCState *env = opaque;
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_tb_t *tb_env = env->tb_env;
tb_env->tb_freq = freq;
* if a decrementer exception is pending when it enables msr_ee at startup,
* it's not ready to handle it...
*/
- _cpu_ppc_store_decr(env, 0xFFFFFFFF, 0xFFFFFFFF, 0);
- _cpu_ppc_store_hdecr(env, 0xFFFFFFFF, 0xFFFFFFFF, 0);
- cpu_ppc_store_purr(env, 0x0000000000000000ULL);
+ _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+ _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+ cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
}
/* Set up (once) timebase frequency (in Hz) */
clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_tb_t *tb_env;
tb_env = g_malloc0(sizeof(ppc_tb_t));
env->tb_env = tb_env;
tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
/* Create new timer */
- tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_decr_cb, env);
+ tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_decr_cb, cpu);
if (0) {
/* XXX: find a suitable condition to enable the hypervisor decrementer
*/
- tb_env->hdecr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_hdecr_cb, env);
+ tb_env->hdecr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_hdecr_cb,
+ cpu);
} else {
tb_env->hdecr_timer = NULL;
}
/* Fixed interval timer */
static void cpu_4xx_fit_cb (void *opaque)
{
+ PowerPCCPU *cpu;
CPUPPCState *env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
uint64_t now, next;
env = opaque;
+ cpu = ppc_env_get_cpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_get_clock_ns(vm_clock);
next++;
qemu_mod_timer(ppc40x_timer->fit_timer, next);
env->spr[SPR_40x_TSR] |= 1 << 26;
- if ((env->spr[SPR_40x_TCR] >> 23) & 0x1)
- ppc_set_irq(env, PPC_INTERRUPT_FIT, 1);
+ if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
+ }
LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
(int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
static void cpu_4xx_pit_cb (void *opaque)
{
+ PowerPCCPU *cpu;
CPUPPCState *env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
env = opaque;
+ cpu = ppc_env_get_cpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
env->spr[SPR_40x_TSR] |= 1 << 27;
- if ((env->spr[SPR_40x_TCR] >> 26) & 0x1)
- ppc_set_irq(env, ppc40x_timer->decr_excp, 1);
+ if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
+ ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
+ }
start_stop_pit(env, tb_env, 1);
LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
"%016" PRIx64 "\n", __func__,
/* Watchdog timer */
static void cpu_4xx_wdt_cb (void *opaque)
{
+ PowerPCCPU *cpu;
CPUPPCState *env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
uint64_t now, next;
env = opaque;
+ cpu = ppc_env_get_cpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_get_clock_ns(vm_clock);
qemu_mod_timer(ppc40x_timer->wdt_timer, next);
ppc40x_timer->wdt_next = next;
env->spr[SPR_40x_TSR] |= 1 << 30;
- if ((env->spr[SPR_40x_TCR] >> 27) & 0x1)
- ppc_set_irq(env, PPC_INTERRUPT_WDT, 1);
+ if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
+ }
break;
case 0x3:
env->spr[SPR_40x_TSR] &= ~0x30000000;
#ifndef HW_PPC_H
#define HW_PPC_H 1
-void ppc_set_irq (CPUPPCState *env, int n_IRQ, int level);
+void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
/* PowerPC hardware exceptions management helpers */
typedef void (*clk_setup_cb)(void *opaque, uint32_t freq);
#define PPC_SERIAL_MM_BAUDBASE 399193
/* ppc_booke.c */
-void ppc_booke_timers_init(CPUPPCState *env, uint32_t freq, uint32_t flags);
+void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags);
#endif
env->mpic_cpu_base = MPC8544_CCSRBAR_BASE +
MPC8544_MPIC_REGS_OFFSET + 0x20000;
- ppc_booke_timers_init(env, 400000000, PPC_TIMER_E500);
+ ppc_booke_timers_init(cpu, 400000000, PPC_TIMER_E500);
/* Register reset handler */
if (!i) {
{
clk_setup_t clk_setup[PPC405CR_CLK_NB];
qemu_irq dma_irqs[4];
+ PowerPCCPU *cpu;
CPUPPCState *env;
qemu_irq *pic, *irqs;
memset(clk_setup, 0, sizeof(clk_setup));
- env = ppc4xx_init("405cr", &clk_setup[PPC405CR_CPU_CLK],
+ cpu = ppc4xx_init("405cr", &clk_setup[PPC405CR_CPU_CLK],
&clk_setup[PPC405CR_TMR_CLK], sysclk);
+ env = &cpu->env;
/* Memory mapped devices registers */
/* PLB arbitrer */
ppc4xx_plb_init(env);
{
clk_setup_t clk_setup[PPC405EP_CLK_NB], tlb_clk_setup;
qemu_irq dma_irqs[4], gpt_irqs[5], mal_irqs[4];
+ PowerPCCPU *cpu;
CPUPPCState *env;
qemu_irq *pic, *irqs;
memset(clk_setup, 0, sizeof(clk_setup));
/* init CPUs */
- env = ppc4xx_init("405ep", &clk_setup[PPC405EP_CPU_CLK],
+ cpu = ppc4xx_init("405ep", &clk_setup[PPC405EP_CPU_CLK],
&tlb_clk_setup, sysclk);
+ env = &cpu->env;
clk_setup[PPC405EP_CPU_CLK].cb = tlb_clk_setup.cb;
clk_setup[PPC405EP_CPU_CLK].opaque = tlb_clk_setup.opaque;
/* Internal devices init */
/* OBP arbitrer */
ppc4xx_opba_init(0xef600600);
/* Initialize timers */
- ppc_booke_timers_init(env, sysclk, 0);
+ ppc_booke_timers_init(cpu, sysclk, 0);
/* Universal interrupt controller */
irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
irqs[PPCUIC_OUTPUT_INT] =
env = &cpu->env;
qemu_register_reset(main_cpu_reset, cpu);
- ppc_booke_timers_init(env, 400000000, 0);
+ ppc_booke_timers_init(cpu, 400000000, 0);
ppc_dcr_init(env, NULL, NULL);
/* interrupt controller */
#include "pci/pci.h"
/* PowerPC 4xx core initialization */
-CPUPPCState *ppc4xx_init (const char *cpu_model,
- clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
- uint32_t sysclk);
+PowerPCCPU *ppc4xx_init(const char *cpu_model,
+ clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
+ uint32_t sysclk);
/* PowerPC 4xx universal interrupt controller */
enum {
/*****************************************************************************/
/* Generic PowerPC 4xx processor instantiation */
-CPUPPCState *ppc4xx_init (const char *cpu_model,
- clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
- uint32_t sysclk)
+PowerPCCPU *ppc4xx_init(const char *cpu_model,
+ clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
+ uint32_t sysclk)
{
PowerPCCPU *cpu;
CPUPPCState *env;
/* Register qemu callbacks */
qemu_register_reset(ppc4xx_reset, cpu);
- return env;
+ return cpu;
}
/*****************************************************************************/
uint32_t flags;
};
-static void booke_update_irq(CPUPPCState *env)
+static void booke_update_irq(PowerPCCPU *cpu)
{
- ppc_set_irq(env, PPC_INTERRUPT_DECR,
+ CPUPPCState *env = &cpu->env;
+
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR,
(env->spr[SPR_BOOKE_TSR] & TSR_DIS
&& env->spr[SPR_BOOKE_TCR] & TCR_DIE));
- ppc_set_irq(env, PPC_INTERRUPT_WDT,
+ ppc_set_irq(cpu, PPC_INTERRUPT_WDT,
(env->spr[SPR_BOOKE_TSR] & TSR_WIS
&& env->spr[SPR_BOOKE_TCR] & TCR_WIE));
- ppc_set_irq(env, PPC_INTERRUPT_FIT,
+ ppc_set_irq(cpu, PPC_INTERRUPT_FIT,
(env->spr[SPR_BOOKE_TSR] & TSR_FIS
&& env->spr[SPR_BOOKE_TCR] & TCR_FIE));
}
static void booke_decr_cb(void *opaque)
{
- CPUPPCState *env = opaque;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
env->spr[SPR_BOOKE_TSR] |= TSR_DIS;
- booke_update_irq(env);
+ booke_update_irq(cpu);
if (env->spr[SPR_BOOKE_TCR] & TCR_ARE) {
/* Auto Reload */
static void booke_fit_cb(void *opaque)
{
- CPUPPCState *env;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env;
booke_timer_t *booke_timer;
- env = opaque;
tb_env = env->tb_env;
booke_timer = tb_env->opaque;
env->spr[SPR_BOOKE_TSR] |= TSR_FIS;
- booke_update_irq(env);
+ booke_update_irq(cpu);
booke_update_fixed_timer(env,
booke_get_fit_target(env, tb_env),
static void booke_wdt_cb(void *opaque)
{
- CPUPPCState *env;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env;
booke_timer_t *booke_timer;
- env = opaque;
tb_env = env->tb_env;
booke_timer = tb_env->opaque;
/* TODO: There's lots of complicated stuff to do here */
- booke_update_irq(env);
+ booke_update_irq(cpu);
booke_update_fixed_timer(env,
booke_get_wdt_target(env, tb_env),
void store_booke_tsr(CPUPPCState *env, target_ulong val)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
env->spr[SPR_BOOKE_TSR] &= ~val;
- booke_update_irq(env);
+ booke_update_irq(cpu);
}
void store_booke_tcr(CPUPPCState *env, target_ulong val)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_tb_t *tb_env = env->tb_env;
booke_timer_t *booke_timer = tb_env->opaque;
tb_env = env->tb_env;
env->spr[SPR_BOOKE_TCR] = val;
- booke_update_irq(env);
+ booke_update_irq(cpu);
booke_update_fixed_timer(env,
booke_get_fit_target(env, tb_env),
}
-void ppc_booke_timers_init(CPUPPCState *env, uint32_t freq, uint32_t flags)
+void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags)
{
ppc_tb_t *tb_env;
booke_timer_t *booke_timer;
tb_env = g_malloc0(sizeof(ppc_tb_t));
booke_timer = g_malloc0(sizeof(booke_timer_t));
- env->tb_env = tb_env;
+ cpu->env.tb_env = tb_env;
tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED;
tb_env->tb_freq = freq;
tb_env->decr_freq = freq;
tb_env->opaque = booke_timer;
- tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &booke_decr_cb, env);
+ tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &booke_decr_cb, cpu);
booke_timer->fit_timer =
- qemu_new_timer_ns(vm_clock, &booke_fit_cb, env);
+ qemu_new_timer_ns(vm_clock, &booke_fit_cb, cpu);
booke_timer->wdt_timer =
- qemu_new_timer_ns(vm_clock, &booke_wdt_cb, env);
+ qemu_new_timer_ns(vm_clock, &booke_wdt_cb, cpu);
}
--- /dev/null
+/*
+ * qdev property parsing and global properties
+ * (parts specific for qemu-system-*)
+ *
+ * This file is based on code from hw/qdev-properties.c from
+ * commit 074a86fccd185616469dfcdc0e157f438aebba18,
+ * Copyright (c) Gerd Hoffmann <kraxel@redhat.com> and other contributors.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "net/net.h"
+#include "qdev.h"
+#include "qapi/qmp/qerror.h"
+#include "sysemu/blockdev.h"
+#include "hw/block-common.h"
+#include "net/hub.h"
+#include "qapi/visitor.h"
+#include "char/char.h"
+
+static void get_pointer(Object *obj, Visitor *v, Property *prop,
+ const char *(*print)(void *ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *p;
+
+ p = (char *) (*ptr ? print(*ptr) : "");
+ visit_type_str(v, &p, name, errp);
+}
+
+static void set_pointer(Object *obj, Visitor *v, Property *prop,
+ int (*parse)(DeviceState *dev, const char *str,
+ void **ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *str;
+ int ret;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_str(v, &str, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (!*str) {
+ g_free(str);
+ *ptr = NULL;
+ return;
+ }
+ ret = parse(dev, str, ptr);
+ error_set_from_qdev_prop_error(errp, ret, dev, prop, str);
+ g_free(str);
+}
+
+/* --- drive --- */
+
+static int parse_drive(DeviceState *dev, const char *str, void **ptr)
+{
+ BlockDriverState *bs;
+
+ bs = bdrv_find(str);
+ if (bs == NULL) {
+ return -ENOENT;
+ }
+ if (bdrv_attach_dev(bs, dev) < 0) {
+ return -EEXIST;
+ }
+ *ptr = bs;
+ return 0;
+}
+
+static void release_drive(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ BlockDriverState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ bdrv_detach_dev(*ptr, dev);
+ blockdev_auto_del(*ptr);
+ }
+}
+
+static const char *print_drive(void *ptr)
+{
+ return bdrv_get_device_name(ptr);
+}
+
+static void get_drive(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_drive, name, errp);
+}
+
+static void set_drive(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_drive, name, errp);
+}
+
+PropertyInfo qdev_prop_drive = {
+ .name = "drive",
+ .get = get_drive,
+ .set = set_drive,
+ .release = release_drive,
+};
+
+/* --- character device --- */
+
+static int parse_chr(DeviceState *dev, const char *str, void **ptr)
+{
+ CharDriverState *chr = qemu_chr_find(str);
+ if (chr == NULL) {
+ return -ENOENT;
+ }
+ if (chr->avail_connections < 1) {
+ return -EEXIST;
+ }
+ *ptr = chr;
+ --chr->avail_connections;
+ return 0;
+}
+
+static void release_chr(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ CharDriverState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ qemu_chr_add_handlers(*ptr, NULL, NULL, NULL, NULL);
+ }
+}
+
+
+static const char *print_chr(void *ptr)
+{
+ CharDriverState *chr = ptr;
+
+ return chr->label ? chr->label : "";
+}
+
+static void get_chr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_chr, name, errp);
+}
+
+static void set_chr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_chr, name, errp);
+}
+
+PropertyInfo qdev_prop_chr = {
+ .name = "chr",
+ .get = get_chr,
+ .set = set_chr,
+ .release = release_chr,
+};
+
+/* --- netdev device --- */
+
+static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
+{
+ NetClientState *netdev = qemu_find_netdev(str);
+
+ if (netdev == NULL) {
+ return -ENOENT;
+ }
+ if (netdev->peer) {
+ return -EEXIST;
+ }
+ *ptr = netdev;
+ return 0;
+}
+
+static const char *print_netdev(void *ptr)
+{
+ NetClientState *netdev = ptr;
+
+ return netdev->name ? netdev->name : "";
+}
+
+static void get_netdev(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_netdev, name, errp);
+}
+
+static void set_netdev(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_netdev, name, errp);
+}
+
+PropertyInfo qdev_prop_netdev = {
+ .name = "netdev",
+ .get = get_netdev,
+ .set = set_netdev,
+};
+
+/* --- vlan --- */
+
+static int print_vlan(DeviceState *dev, Property *prop, char *dest, size_t len)
+{
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ int id;
+ if (!net_hub_id_for_client(*ptr, &id)) {
+ return snprintf(dest, len, "%d", id);
+ }
+ }
+
+ return snprintf(dest, len, "<null>");
+}
+
+static void get_vlan(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+ int32_t id = -1;
+
+ if (*ptr) {
+ int hub_id;
+ if (!net_hub_id_for_client(*ptr, &hub_id)) {
+ id = hub_id;
+ }
+ }
+
+ visit_type_int32(v, &id, name, errp);
+}
+
+static void set_vlan(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+ Error *local_err = NULL;
+ int32_t id;
+ NetClientState *hubport;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_int32(v, &id, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (id == -1) {
+ *ptr = NULL;
+ return;
+ }
+
+ hubport = net_hub_port_find(id);
+ if (!hubport) {
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE,
+ name, prop->info->name);
+ return;
+ }
+ *ptr = hubport;
+}
+
+PropertyInfo qdev_prop_vlan = {
+ .name = "vlan",
+ .print = print_vlan,
+ .get = get_vlan,
+ .set = set_vlan,
+};
+
+int qdev_prop_set_drive(DeviceState *dev, const char *name,
+ BlockDriverState *value)
+{
+ Error *errp = NULL;
+ const char *bdrv_name = value ? bdrv_get_device_name(value) : "";
+ object_property_set_str(OBJECT(dev), bdrv_name,
+ name, &errp);
+ if (errp) {
+ qerror_report_err(errp);
+ error_free(errp);
+ return -1;
+ }
+ return 0;
+}
+
+void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name,
+ BlockDriverState *value)
+{
+ if (qdev_prop_set_drive(dev, name, value) < 0) {
+ exit(1);
+ }
+}
+void qdev_prop_set_chr(DeviceState *dev, const char *name,
+ CharDriverState *value)
+{
+ Error *errp = NULL;
+ assert(!value || value->label);
+ object_property_set_str(OBJECT(dev),
+ value ? value->label : "", name, &errp);
+ assert_no_error(errp);
+}
+
+void qdev_prop_set_netdev(DeviceState *dev, const char *name,
+ NetClientState *value)
+{
+ Error *errp = NULL;
+ assert(!value || value->name);
+ object_property_set_str(OBJECT(dev),
+ value ? value->name : "", name, &errp);
+ assert_no_error(errp);
+}
+
+void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
+{
+ qdev_prop_set_macaddr(dev, "mac", nd->macaddr.a);
+ if (nd->netdev) {
+ qdev_prop_set_netdev(dev, "netdev", nd->netdev);
+ }
+ if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED &&
+ object_property_find(OBJECT(dev), "vectors", NULL)) {
+ qdev_prop_set_uint32(dev, "vectors", nd->nvectors);
+ }
+ nd->instantiated = 1;
+}
+
+static int qdev_add_one_global(QemuOpts *opts, void *opaque)
+{
+ GlobalProperty *g;
+
+ g = g_malloc0(sizeof(*g));
+ g->driver = qemu_opt_get(opts, "driver");
+ g->property = qemu_opt_get(opts, "property");
+ g->value = qemu_opt_get(opts, "value");
+ qdev_prop_register_global(g);
+ return 0;
+}
+
+void qemu_add_globals(void)
+{
+ qemu_opts_foreach(qemu_find_opts("global"), qdev_add_one_global, NULL, 0);
+}
return ptr;
}
-static void get_pointer(Object *obj, Visitor *v, Property *prop,
- const char *(*print)(void *ptr),
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- void **ptr = qdev_get_prop_ptr(dev, prop);
- char *p;
-
- p = (char *) (*ptr ? print(*ptr) : "");
- visit_type_str(v, &p, name, errp);
-}
-
-static void set_pointer(Object *obj, Visitor *v, Property *prop,
- int (*parse)(DeviceState *dev, const char *str,
- void **ptr),
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Error *local_err = NULL;
- void **ptr = qdev_get_prop_ptr(dev, prop);
- char *str;
- int ret;
-
- if (dev->state != DEV_STATE_CREATED) {
- error_set(errp, QERR_PERMISSION_DENIED);
- return;
- }
-
- visit_type_str(v, &str, name, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- if (!*str) {
- g_free(str);
- *ptr = NULL;
- return;
- }
- ret = parse(dev, str, ptr);
- error_set_from_qdev_prop_error(errp, ret, dev, prop, str);
- g_free(str);
-}
-
static void get_enum(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
{
uint32_t *p = qdev_get_prop_ptr(dev, props);
uint32_t mask = qdev_get_prop_mask(props);
- if (val)
+ if (val) {
*p |= mask;
- else
+ } else {
*p &= ~mask;
+ }
}
static int print_bit(DeviceState *dev, Property *prop, char *dest, size_t len)
g_free(*(char **)qdev_get_prop_ptr(DEVICE(obj), prop));
}
-static int print_string(DeviceState *dev, Property *prop, char *dest, size_t len)
+static int print_string(DeviceState *dev, Property *prop, char *dest,
+ size_t len)
{
char **ptr = qdev_get_prop_ptr(dev, prop);
- if (!*ptr)
+ if (!*ptr) {
return snprintf(dest, len, "<null>");
+ }
return snprintf(dest, len, "\"%s\"", *ptr);
}
.set = set_string,
};
-/* --- drive --- */
-
-static int parse_drive(DeviceState *dev, const char *str, void **ptr)
-{
- BlockDriverState *bs;
-
- bs = bdrv_find(str);
- if (bs == NULL)
- return -ENOENT;
- if (bdrv_attach_dev(bs, dev) < 0)
- return -EEXIST;
- *ptr = bs;
- return 0;
-}
-
-static void release_drive(Object *obj, const char *name, void *opaque)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- BlockDriverState **ptr = qdev_get_prop_ptr(dev, prop);
-
- if (*ptr) {
- bdrv_detach_dev(*ptr, dev);
- blockdev_auto_del(*ptr);
- }
-}
-
-static const char *print_drive(void *ptr)
-{
- return bdrv_get_device_name(ptr);
-}
-
-static void get_drive(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- get_pointer(obj, v, opaque, print_drive, name, errp);
-}
-
-static void set_drive(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- set_pointer(obj, v, opaque, parse_drive, name, errp);
-}
-
-PropertyInfo qdev_prop_drive = {
- .name = "drive",
- .get = get_drive,
- .set = set_drive,
- .release = release_drive,
-};
-
-/* --- character device --- */
-
-static int parse_chr(DeviceState *dev, const char *str, void **ptr)
-{
- CharDriverState *chr = qemu_chr_find(str);
- if (chr == NULL) {
- return -ENOENT;
- }
- if (chr->avail_connections < 1) {
- return -EEXIST;
- }
- *ptr = chr;
- --chr->avail_connections;
- return 0;
-}
-
-static void release_chr(Object *obj, const char *name, void *opaque)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- CharDriverState **ptr = qdev_get_prop_ptr(dev, prop);
-
- if (*ptr) {
- qemu_chr_add_handlers(*ptr, NULL, NULL, NULL, NULL);
- }
-}
-
-
-static const char *print_chr(void *ptr)
-{
- CharDriverState *chr = ptr;
-
- return chr->label ? chr->label : "";
-}
-
-static void get_chr(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- get_pointer(obj, v, opaque, print_chr, name, errp);
-}
-
-static void set_chr(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- set_pointer(obj, v, opaque, parse_chr, name, errp);
-}
-
-PropertyInfo qdev_prop_chr = {
- .name = "chr",
- .get = get_chr,
- .set = set_chr,
- .release = release_chr,
-};
-
-/* --- netdev device --- */
-
-static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
-{
- NetClientState *netdev = qemu_find_netdev(str);
-
- if (netdev == NULL) {
- return -ENOENT;
- }
- if (netdev->peer) {
- return -EEXIST;
- }
- *ptr = netdev;
- return 0;
-}
-
-static const char *print_netdev(void *ptr)
-{
- NetClientState *netdev = ptr;
-
- return netdev->name ? netdev->name : "";
-}
-
-static void get_netdev(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- get_pointer(obj, v, opaque, print_netdev, name, errp);
-}
-
-static void set_netdev(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- set_pointer(obj, v, opaque, parse_netdev, name, errp);
-}
-
-PropertyInfo qdev_prop_netdev = {
- .name = "netdev",
- .get = get_netdev,
- .set = set_netdev,
-};
-
-/* --- vlan --- */
-
-static int print_vlan(DeviceState *dev, Property *prop, char *dest, size_t len)
-{
- NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
-
- if (*ptr) {
- int id;
- if (!net_hub_id_for_client(*ptr, &id)) {
- return snprintf(dest, len, "%d", id);
- }
- }
-
- return snprintf(dest, len, "<null>");
-}
-
-static void get_vlan(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
- int32_t id = -1;
-
- if (*ptr) {
- int hub_id;
- if (!net_hub_id_for_client(*ptr, &hub_id)) {
- id = hub_id;
- }
- }
-
- visit_type_int32(v, &id, name, errp);
-}
-
-static void set_vlan(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
- Error *local_err = NULL;
- int32_t id;
- NetClientState *hubport;
-
- if (dev->state != DEV_STATE_CREATED) {
- error_set(errp, QERR_PERMISSION_DENIED);
- return;
- }
-
- visit_type_int32(v, &id, name, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- if (id == -1) {
- *ptr = NULL;
- return;
- }
-
- hubport = net_hub_port_find(id);
- if (!hubport) {
- error_set(errp, QERR_INVALID_PARAMETER_VALUE,
- name, prop->info->name);
- return;
- }
- *ptr = hubport;
-}
-
-PropertyInfo qdev_prop_vlan = {
- .name = "vlan",
- .print = print_vlan,
- .get = get_vlan,
- .set = set_vlan,
-};
-
/* --- pointer --- */
/* Not a proper property, just for dirty hacks. TODO Remove it! */
}
for (i = 0, pos = 0; i < 6; i++, pos += 3) {
- if (!qemu_isxdigit(str[pos]))
+ if (!qemu_isxdigit(str[pos])) {
goto inval;
- if (!qemu_isxdigit(str[pos+1]))
+ }
+ if (!qemu_isxdigit(str[pos+1])) {
goto inval;
+ }
if (i == 5) {
- if (str[pos+2] != '\0')
+ if (str[pos+2] != '\0') {
goto inval;
+ }
} else {
- if (str[pos+2] != ':' && str[pos+2] != '-')
+ if (str[pos+2] != ':' && str[pos+2] != '-') {
goto inval;
+ }
}
mac->a[i] = strtol(str+pos, &p, 16);
}
g_free(str);
}
-static int print_pci_devfn(DeviceState *dev, Property *prop, char *dest, size_t len)
+static int print_pci_devfn(DeviceState *dev, Property *prop, char *dest,
+ size_t len)
{
int32_t *ptr = qdev_get_prop_ptr(dev, prop);
static Property *qdev_prop_walk(Property *props, const char *name)
{
- if (!props)
+ if (!props) {
return NULL;
+ }
while (props->name) {
- if (strcmp(props->name, name) == 0)
+ if (strcmp(props->name, name) == 0) {
return props;
+ }
props++;
}
return NULL;
assert_no_error(errp);
}
-int qdev_prop_set_drive(DeviceState *dev, const char *name, BlockDriverState *value)
-{
- Error *errp = NULL;
- const char *bdrv_name = value ? bdrv_get_device_name(value) : "";
- object_property_set_str(OBJECT(dev), bdrv_name,
- name, &errp);
- if (errp) {
- qerror_report_err(errp);
- error_free(errp);
- return -1;
- }
- return 0;
-}
-
-void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name, BlockDriverState *value)
-{
- if (qdev_prop_set_drive(dev, name, value) < 0) {
- exit(1);
- }
-}
-void qdev_prop_set_chr(DeviceState *dev, const char *name, CharDriverState *value)
-{
- Error *errp = NULL;
- assert(!value || value->label);
- object_property_set_str(OBJECT(dev),
- value ? value->label : "", name, &errp);
- assert_no_error(errp);
-}
-
-void qdev_prop_set_netdev(DeviceState *dev, const char *name, NetClientState *value)
-{
- Error *errp = NULL;
- assert(!value || value->name);
- object_property_set_str(OBJECT(dev),
- value ? value->name : "", name, &errp);
- assert_no_error(errp);
-}
-
void qdev_prop_set_macaddr(DeviceState *dev, const char *name, uint8_t *value)
{
Error *errp = NULL;
*ptr = value;
}
-static QTAILQ_HEAD(, GlobalProperty) global_props = QTAILQ_HEAD_INITIALIZER(global_props);
+static QTAILQ_HEAD(, GlobalProperty) global_props =
+ QTAILQ_HEAD_INITIALIZER(global_props);
-static void qdev_prop_register_global(GlobalProperty *prop)
+void qdev_prop_register_global(GlobalProperty *prop)
{
QTAILQ_INSERT_TAIL(&global_props, prop, next);
}
class = object_class_get_parent(class);
} while (class);
}
-
-static int qdev_add_one_global(QemuOpts *opts, void *opaque)
-{
- GlobalProperty *g;
-
- g = g_malloc0(sizeof(*g));
- g->driver = qemu_opt_get(opts, "driver");
- g->property = qemu_opt_get(opts, "property");
- g->value = qemu_opt_get(opts, "value");
- qdev_prop_register_global(g);
- return 0;
-}
-
-void qemu_add_globals(void)
-{
- qemu_opts_foreach(qemu_find_opts("global"), qdev_add_one_global, NULL, 0);
-}
/* FIXME: Remove opaque pointer properties. */
void qdev_prop_set_ptr(DeviceState *dev, const char *name, void *value);
+void qdev_prop_register_global(GlobalProperty *prop);
void qdev_prop_register_global_list(GlobalProperty *props);
void qdev_prop_set_globals(DeviceState *dev);
void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev,
inherit from a particular bus (e.g. PCI or I2C) rather than
this API directly. */
-#include "net/net.h"
#include "qdev.h"
#include "sysemu/sysemu.h"
#include "qapi/error.h"
dev->gpio_out[n] = pin;
}
-void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
-{
- qdev_prop_set_macaddr(dev, "mac", nd->macaddr.a);
- if (nd->netdev)
- qdev_prop_set_netdev(dev, "netdev", nd->netdev);
- if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED &&
- object_property_find(OBJECT(dev), "vectors", NULL)) {
- qdev_prop_set_uint32(dev, "vectors", nd->nvectors);
- }
- nd->instantiated = 1;
-}
-
BusState *qdev_get_child_bus(DeviceState *dev, const char *name)
{
BusState *bus;
return bus;
}
-static void s390_virtio_irq(CPUS390XState *env, int config_change, uint64_t token)
+static void s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token)
{
+ CPUS390XState *env = &cpu->env;
+
if (kvm_enabled()) {
- kvm_s390_virtio_irq(env, config_change, token);
+ kvm_s390_virtio_irq(cpu, config_change, token);
} else {
cpu_inject_ext(env, VIRTIO_EXT_CODE, config_change, token);
}
s390_virtio_reset_idx(dev);
if (dev->qdev.hotplugged) {
S390CPU *cpu = s390_cpu_addr2state(0);
- CPUS390XState *env = &cpu->env;
- s390_virtio_irq(env, VIRTIO_PARAM_DEV_ADD, dev->dev_offs);
+ s390_virtio_irq(cpu, VIRTIO_PARAM_DEV_ADD, dev->dev_offs);
}
return 0;
VirtIOS390Device *dev = (VirtIOS390Device*)opaque;
uint64_t token = s390_virtio_device_vq_token(dev, vector);
S390CPU *cpu = s390_cpu_addr2state(0);
- CPUS390XState *env = &cpu->env;
- s390_virtio_irq(env, 0, token);
+ s390_virtio_irq(cpu, 0, token);
}
static unsigned virtio_s390_get_features(void *opaque)
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
- kvmppc_set_papr(env);
+ kvmppc_set_papr(cpu);
}
qemu_register_reset(spapr_cpu_reset, cpu);
}
env = &cpu->env;
- ppc_booke_timers_init(env, sysclk, 0/* no flags */);
+ ppc_booke_timers_init(cpu, sysclk, 0/* no flags */);
ppc_dcr_init(env, NULL, NULL);
#include "qemu/bswap.h"
#include "qemu/queue.h"
+/**
+ * CPUListState:
+ * @cpu_fprintf: Print function.
+ * @file: File to print to using @cpu_fprint.
+ *
+ * State commonly used for iterating over CPU models.
+ */
+typedef struct CPUListState {
+ fprintf_function cpu_fprintf;
+ FILE *file;
+} CPUListState;
+
#if !defined(CONFIG_USER_ONLY)
enum device_endian {
} icount_decr_u16;
#endif
-struct kvm_run;
-struct KVMState;
struct qemu_work_item;
typedef struct CPUBreakpoint {
/* user data */ \
void *opaque; \
\
- const char *cpu_model_str; \
- struct KVMState *kvm_state; \
- struct kvm_run *kvm_run; \
- int kvm_fd; \
- int kvm_vcpu_dirty;
+ const char *cpu_model_str;
#endif
void (*reset)(CPUState *cpu);
} CPUClass;
+struct KVMState;
+struct kvm_run;
+
/**
* CPUState:
* @created: Indicates whether the CPU thread has been successfully created.
* @stop: Indicates a pending stop request.
* @stopped: Indicates the CPU has been artificially stopped.
+ * @kvm_fd: vCPU file descriptor for KVM.
*
* State of one CPU core or thread.
*/
bool stop;
bool stopped;
+#if !defined(CONFIG_USER_ONLY)
+ int kvm_fd;
+ bool kvm_vcpu_dirty;
+#endif
+ struct KVMState *kvm_state;
+ struct kvm_run *kvm_run;
+
/* TODO Move common fields from CPUArchState here. */
};
int kvm_vm_ioctl(KVMState *s, int type, ...);
-int kvm_vcpu_ioctl(CPUArchState *env, int type, ...);
+int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
/* Arch specific hooks */
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
-void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run);
-void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run);
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
-int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run);
+int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
-int kvm_arch_process_async_events(CPUArchState *env);
+int kvm_arch_process_async_events(CPUState *cpu);
-int kvm_arch_get_registers(CPUArchState *env);
+int kvm_arch_get_registers(CPUState *cpu);
/* state subset only touched by the VCPU itself during runtime */
#define KVM_PUT_RUNTIME_STATE 1
/* full state set, modified during initialization or on vmload */
#define KVM_PUT_FULL_STATE 3
-int kvm_arch_put_registers(CPUArchState *env, int level);
+int kvm_arch_put_registers(CPUState *cpu, int level);
int kvm_arch_init(KVMState *s);
-int kvm_arch_init_vcpu(CPUArchState *env);
+int kvm_arch_init_vcpu(CPUState *cpu);
-void kvm_arch_reset_vcpu(CPUArchState *env);
+void kvm_arch_reset_vcpu(CPUState *cpu);
-int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
int kvm_arch_on_sigbus(int code, void *addr);
void kvm_arch_init_irq_routing(KVMState *s);
QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc);
-int kvm_sw_breakpoints_active(CPUArchState *env);
+int kvm_sw_breakpoints_active(CPUState *cpu);
-int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env,
+int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env,
+int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu,
struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
target_ulong len, int type);
target_ulong len, int type);
void kvm_arch_remove_all_hw_breakpoints(void);
-void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg);
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
-bool kvm_arch_stop_on_emulation_error(CPUArchState *env);
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
int kvm_check_extension(KVMState *s, unsigned int extension);
static void kvm_reset_vcpu(void *opaque)
{
- CPUArchState *env = opaque;
+ CPUState *cpu = opaque;
- kvm_arch_reset_vcpu(env);
+ kvm_arch_reset_vcpu(cpu);
}
int kvm_init_vcpu(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
KVMState *s = kvm_state;
long mmap_size;
int ret;
goto err;
}
- env->kvm_fd = ret;
- env->kvm_state = s;
- env->kvm_vcpu_dirty = 1;
+ cpu->kvm_fd = ret;
+ cpu->kvm_state = s;
+ cpu->kvm_vcpu_dirty = true;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
goto err;
}
- env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
- env->kvm_fd, 0);
- if (env->kvm_run == MAP_FAILED) {
+ cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ cpu->kvm_fd, 0);
+ if (cpu->kvm_run == MAP_FAILED) {
ret = -errno;
DPRINTF("mmap'ing vcpu state failed\n");
goto err;
if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
s->coalesced_mmio_ring =
- (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
+ (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
}
- ret = kvm_arch_init_vcpu(env);
+ ret = kvm_arch_init_vcpu(cpu);
if (ret == 0) {
- qemu_register_reset(kvm_reset_vcpu, env);
- kvm_arch_reset_vcpu(env);
+ qemu_register_reset(kvm_reset_vcpu, cpu);
+ kvm_arch_reset_vcpu(cpu);
}
err:
return ret;
static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
{
+ CPUState *cpu = ENV_GET_CPU(env);
+
fprintf(stderr, "KVM internal error.");
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
int i;
}
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
fprintf(stderr, "emulation failure\n");
- if (!kvm_arch_stop_on_emulation_error(env)) {
+ if (!kvm_arch_stop_on_emulation_error(cpu)) {
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
return EXCP_INTERRUPT;
}
s->coalesced_flush_in_progress = false;
}
-static void do_kvm_cpu_synchronize_state(void *_env)
+static void do_kvm_cpu_synchronize_state(void *arg)
{
- CPUArchState *env = _env;
+ CPUState *cpu = arg;
- if (!env->kvm_vcpu_dirty) {
- kvm_arch_get_registers(env);
- env->kvm_vcpu_dirty = 1;
+ if (!cpu->kvm_vcpu_dirty) {
+ kvm_arch_get_registers(cpu);
+ cpu->kvm_vcpu_dirty = true;
}
}
{
CPUState *cpu = ENV_GET_CPU(env);
- if (!env->kvm_vcpu_dirty) {
- run_on_cpu(cpu, do_kvm_cpu_synchronize_state, env);
+ if (!cpu->kvm_vcpu_dirty) {
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
}
}
void kvm_cpu_synchronize_post_reset(CPUArchState *env)
{
- kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
- env->kvm_vcpu_dirty = 0;
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
+ cpu->kvm_vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_init(CPUArchState *env)
{
- kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
- env->kvm_vcpu_dirty = 0;
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
+ cpu->kvm_vcpu_dirty = false;
}
int kvm_cpu_exec(CPUArchState *env)
{
- struct kvm_run *run = env->kvm_run;
+ CPUState *cpu = ENV_GET_CPU(env);
+ struct kvm_run *run = cpu->kvm_run;
int ret, run_ret;
DPRINTF("kvm_cpu_exec()\n");
- if (kvm_arch_process_async_events(env)) {
+ if (kvm_arch_process_async_events(cpu)) {
env->exit_request = 0;
return EXCP_HLT;
}
do {
- if (env->kvm_vcpu_dirty) {
- kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
- env->kvm_vcpu_dirty = 0;
+ if (cpu->kvm_vcpu_dirty) {
+ kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
+ cpu->kvm_vcpu_dirty = false;
}
- kvm_arch_pre_run(env, run);
+ kvm_arch_pre_run(cpu, run);
if (env->exit_request) {
DPRINTF("interrupt exit requested\n");
/*
}
qemu_mutex_unlock_iothread();
- run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
+ run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
qemu_mutex_lock_iothread();
- kvm_arch_post_run(env, run);
+ kvm_arch_post_run(cpu, run);
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
break;
default:
DPRINTF("kvm_arch_handle_exit\n");
- ret = kvm_arch_handle_exit(env, run);
+ ret = kvm_arch_handle_exit(cpu, run);
break;
}
} while (ret == 0);
return ret;
}
-int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
+int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
{
int ret;
void *arg;
arg = va_arg(ap, void *);
va_end(ap);
- ret = ioctl(env->kvm_fd, type, arg);
+ ret = ioctl(cpu->kvm_fd, type, arg);
if (ret == -1) {
ret = -errno;
}
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc)
{
struct kvm_sw_breakpoint *bp;
- QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
if (bp->pc == pc) {
return bp;
}
return NULL;
}
-int kvm_sw_breakpoints_active(CPUArchState *env)
+int kvm_sw_breakpoints_active(CPUState *cpu)
{
- return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
+ return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
}
struct kvm_set_guest_debug_data {
struct kvm_guest_debug dbg;
- CPUArchState *env;
+ CPUState *cpu;
int err;
};
static void kvm_invoke_set_guest_debug(void *data)
{
struct kvm_set_guest_debug_data *dbg_data = data;
- CPUArchState *env = dbg_data->env;
- dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
+ dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
+ &dbg_data->dbg);
}
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
if (env->singlestep_enabled) {
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
}
- kvm_arch_update_guest_debug(env, &data.dbg);
- data.env = env;
+ kvm_arch_update_guest_debug(cpu, &data.dbg);
+ data.cpu = cpu;
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
return data.err;
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type)
{
+ CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp;
CPUArchState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_env, addr);
+ bp = kvm_find_sw_breakpoint(current_cpu, addr);
if (bp) {
bp->use_count++;
return 0;
bp->pc = addr;
bp->use_count = 1;
- err = kvm_arch_insert_sw_breakpoint(current_env, bp);
+ err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
if (err) {
g_free(bp);
return err;
}
- QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints,
+ QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints,
bp, entry);
} else {
err = kvm_arch_insert_hw_breakpoint(addr, len, type);
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type)
{
+ CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp;
CPUArchState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_env, addr);
+ bp = kvm_find_sw_breakpoint(current_cpu, addr);
if (!bp) {
return -ENOENT;
}
return 0;
}
- err = kvm_arch_remove_sw_breakpoint(current_env, bp);
+ err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
if (err) {
return err;
}
- QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry);
+ QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
g_free(bp);
} else {
err = kvm_arch_remove_hw_breakpoint(addr, len, type);
void kvm_remove_all_breakpoints(CPUArchState *current_env)
{
+ CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp, *next;
- KVMState *s = current_env->kvm_state;
+ KVMState *s = current_cpu->kvm_state;
CPUArchState *env;
+ CPUState *cpu;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
- if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
+ if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
for (env = first_cpu; env != NULL; env = env->next_cpu) {
- if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
+ cpu = ENV_GET_CPU(env);
+ if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
break;
}
}
int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
{
+ CPUState *cpu = ENV_GET_CPU(env);
struct kvm_signal_mask *sigmask;
int r;
if (!sigset) {
- return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
+ return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
}
sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
sigmask->len = 8;
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
- r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
+ r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
g_free(sigmask);
return r;
int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
{
- return kvm_arch_on_sigbus_vcpu(env, code, addr);
+ CPUState *cpu = ENV_GET_CPU(env);
+ return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
}
int kvm_on_sigbus(int code, void *addr)
/*< public >*/
CPUAlphaState env;
+
+ /* This alarm doesn't exist in real hardware; we wish it did. */
+ struct QEMUTimer *alarm_timer;
} AlphaCPU;
static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env)
#include "cpu.h"
#include "qemu-common.h"
+#include "qapi/error.h"
+static void alpha_cpu_realize(Object *obj, Error **errp)
+{
+#ifndef CONFIG_USER_ONLY
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+
+ qemu_init_vcpu(&cpu->env);
+#endif
+}
+
+/* Sort alphabetically by type name. */
+static gint alpha_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void alpha_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ object_class_get_name(oc));
+}
+
+void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ALPHA_CPU, false);
+ list = g_slist_sort(list, alpha_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, alpha_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+/* Models */
+
+#define TYPE(model) model "-" TYPE_ALPHA_CPU
+
+typedef struct AlphaCPUAlias {
+ const char *alias;
+ const char *typename;
+} AlphaCPUAlias;
+
+static const AlphaCPUAlias alpha_cpu_aliases[] = {
+ { "21064", TYPE("ev4") },
+ { "21164", TYPE("ev5") },
+ { "21164a", TYPE("ev56") },
+ { "21164pc", TYPE("pca56") },
+ { "21264", TYPE("ev6") },
+ { "21264a", TYPE("ev67") },
+};
+
+static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc = NULL;
+ char *typename;
+ int i;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL) {
+ return oc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(alpha_cpu_aliases); i++) {
+ if (strcmp(cpu_model, alpha_cpu_aliases[i].alias) == 0) {
+ oc = object_class_by_name(alpha_cpu_aliases[i].typename);
+ assert(oc != NULL);
+ return oc;
+ }
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_ALPHA_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+AlphaCPU *cpu_alpha_init(const char *cpu_model)
+{
+ AlphaCPU *cpu;
+ CPUAlphaState *env;
+ ObjectClass *cpu_class;
+
+ cpu_class = alpha_cpu_class_by_name(cpu_model);
+ if (cpu_class == NULL) {
+ /* Default to ev67; no reason not to emulate insns by default. */
+ cpu_class = object_class_by_name(TYPE("ev67"));
+ }
+ cpu = ALPHA_CPU(object_new(object_class_get_name(cpu_class)));
+ env = &cpu->env;
+
+ env->cpu_model_str = cpu_model;
+
+ alpha_cpu_realize(OBJECT(cpu), NULL);
+ return cpu;
+}
+
+static void ev4_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_2106x;
+}
+
+static const TypeInfo ev4_cpu_type_info = {
+ .name = TYPE("ev4"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev4_cpu_initfn,
+};
+
+static void ev5_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21164;
+}
+
+static const TypeInfo ev5_cpu_type_info = {
+ .name = TYPE("ev5"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev5_cpu_initfn,
+};
+
+static void ev56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_BWX;
+}
+
+static const TypeInfo ev56_cpu_type_info = {
+ .name = TYPE("ev56"),
+ .parent = TYPE("ev5"),
+ .instance_init = ev56_cpu_initfn,
+};
+
+static void pca56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_MVI;
+}
+
+static const TypeInfo pca56_cpu_type_info = {
+ .name = TYPE("pca56"),
+ .parent = TYPE("ev56"),
+ .instance_init = pca56_cpu_initfn,
+};
+
+static void ev6_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21264;
+ env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP;
+}
+
+static const TypeInfo ev6_cpu_type_info = {
+ .name = TYPE("ev6"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev6_cpu_initfn,
+};
+
+static void ev67_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_CIX | AMASK_PREFETCH;
+}
+
+static const TypeInfo ev67_cpu_type_info = {
+ .name = TYPE("ev67"),
+ .parent = TYPE("ev6"),
+ .instance_init = ev67_cpu_initfn,
+};
+
+static const TypeInfo ev68_cpu_type_info = {
+ .name = TYPE("ev68"),
+ .parent = TYPE("ev67"),
+};
+
static void alpha_cpu_initfn(Object *obj)
{
AlphaCPU *cpu = ALPHA_CPU(obj);
cpu_exec_init(env);
tlb_flush(env, 1);
+ alpha_translate_init();
+
#if defined(CONFIG_USER_ONLY)
env->ps = PS_USER_MODE;
cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
.parent = TYPE_CPU,
.instance_size = sizeof(AlphaCPU),
.instance_init = alpha_cpu_initfn,
- .abstract = false,
+ .abstract = true,
.class_size = sizeof(AlphaCPUClass),
};
static void alpha_cpu_register_types(void)
{
type_register_static(&alpha_cpu_type_info);
+ type_register_static(&ev4_cpu_type_info);
+ type_register_static(&ev5_cpu_type_info);
+ type_register_static(&ev56_cpu_type_info);
+ type_register_static(&pca56_cpu_type_info);
+ type_register_static(&ev6_cpu_type_info);
+ type_register_static(&ev67_cpu_type_info);
+ type_register_static(&ev68_cpu_type_info);
}
type_init(alpha_cpu_register_types)
#endif
/* This alarm doesn't exist in real hardware; we wish it did. */
- struct QEMUTimer *alarm_timer;
uint64_t alarm_expire;
/* Those resources are used only in QEMU core */
int implver;
};
-#define cpu_init cpu_alpha_init
+#define cpu_list alpha_cpu_list
#define cpu_exec cpu_alpha_exec
#define cpu_gen_code cpu_alpha_gen_code
#define cpu_signal_handler cpu_alpha_signal_handler
IR_ZERO = 31,
};
-CPUAlphaState * cpu_alpha_init (const char *cpu_model);
+void alpha_translate_init(void);
+
+AlphaCPU *cpu_alpha_init(const char *cpu_model);
+
+static inline CPUAlphaState *cpu_init(const char *cpu_model)
+{
+ AlphaCPU *cpu = cpu_alpha_init(cpu_model);
+ if (cpu == NULL) {
+ return NULL;
+ }
+ return &cpu->env;
+}
+
+void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf);
int cpu_alpha_exec(CPUAlphaState *s);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+
if (expire) {
env->alarm_expire = expire;
- qemu_mod_timer(env->alarm_timer, expire);
+ qemu_mod_timer(cpu->alarm_timer, expire);
} else {
- qemu_del_timer(env->alarm_timer);
+ qemu_del_timer(cpu->alarm_timer);
}
}
#endif /* CONFIG_USER_ONLY */
#include "exec/gen-icount.h"
-static void alpha_translate_init(void)
+void alpha_translate_init(void)
{
int i;
char *p;
gen_intermediate_code_internal(env, tb, 1);
}
-struct cpu_def_t {
- const char *name;
- int implver, amask;
-};
-
-static const struct cpu_def_t cpu_defs[] = {
- { "ev4", IMPLVER_2106x, 0 },
- { "ev5", IMPLVER_21164, 0 },
- { "ev56", IMPLVER_21164, AMASK_BWX },
- { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
- { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
- { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
- | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
- { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
- | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
- { "21064", IMPLVER_2106x, 0 },
- { "21164", IMPLVER_21164, 0 },
- { "21164a", IMPLVER_21164, AMASK_BWX },
- { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
- { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
- { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
- | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
-};
-
-CPUAlphaState * cpu_alpha_init (const char *cpu_model)
-{
- AlphaCPU *cpu;
- CPUAlphaState *env;
- int implver, amask, i, max;
-
- cpu = ALPHA_CPU(object_new(TYPE_ALPHA_CPU));
- env = &cpu->env;
-
- alpha_translate_init();
-
- /* Default to ev67; no reason not to emulate insns by default. */
- implver = IMPLVER_21264;
- amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
- | AMASK_TRAP | AMASK_PREFETCH);
-
- max = ARRAY_SIZE(cpu_defs);
- for (i = 0; i < max; i++) {
- if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
- implver = cpu_defs[i].implver;
- amask = cpu_defs[i].amask;
- break;
- }
- }
- env->implver = implver;
- env->amask = amask;
- env->cpu_model_str = cpu_model;
-
- qemu_init_vcpu(env);
- return env;
-}
-
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
return cpu;
}
-typedef struct ARMCPUListState {
- fprintf_function cpu_fprintf;
- FILE *file;
-} ARMCPUListState;
-
/* Sort alphabetically by type name, except for "any". */
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
{
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
- ARMCPUListState *s = user_data;
+ CPUListState *s = user_data;
(*s->cpu_fprintf)(s->file, " %s\n",
object_class_get_name(oc));
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
- ARMCPUListState s = {
+ CPUListState s = {
.file = f,
.cpu_fprintf = cpu_fprintf,
};
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
/* test if maximum index reached */
if (index & 0x80000000) {
if (index > env->cpuid_xlevel) {
case 0xA:
/* Architectural Performance Monitoring Leaf */
if (kvm_enabled()) {
- KVMState *s = env->kvm_state;
+ KVMState *s = cs->kvm_state;
*eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
*ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
break;
}
if (kvm_enabled()) {
- KVMState *s = env->kvm_state;
+ KVMState *s = cs->kvm_state;
*eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
*ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
exit(1);
}
-int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(c);
+ CPUX86State *env = &cpu->env;
ram_addr_t ram_addr;
hwaddr paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
+ !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
/* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
- &paddr)) {
+ !kvm_physical_memory_addr_from_host(CPU(first_cpu)->kvm_state,
+ addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
return 0;
}
-static int kvm_inject_mce_oldstyle(CPUX86State *env)
+static int kvm_inject_mce_oldstyle(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
+
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
unsigned int bank, bank_num = env->mcg_cap & 0xff;
struct kvm_x86_mce mce;
mce.addr = env->mce_banks[bank * 4 + 2];
mce.misc = env->mce_banks[bank * 4 + 3];
- return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
}
return 0;
}
}
}
-int kvm_arch_init_vcpu(CPUX86State *env)
+int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} QEMU_PACKED cpuid_data;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *c;
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
- && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
+ && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int banks;
int ret;
- ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
+ ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
if (ret < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
return ret;
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
- ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
+ ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
qemu_add_vm_change_state_handler(cpu_update_state, env);
cpuid_data.cpuid.padding = 0;
- r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
+ r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
if (r) {
return r;
}
- r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL);
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
if (r && env->tsc_khz) {
- r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz);
+ r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
if (r < 0) {
fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
return r;
return 0;
}
-void kvm_arch_reset_vcpu(CPUX86State *env)
+void kvm_arch_reset_vcpu(CPUState *cs)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
env->exception_injected = -1;
env->interrupt_injected = -1;
}
}
-static int kvm_getput_regs(CPUX86State *env, int set)
+static int kvm_getput_regs(X86CPU *cpu, int set)
{
+ CPUX86State *env = &cpu->env;
struct kvm_regs regs;
int ret = 0;
if (!set) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
if (ret < 0) {
return ret;
}
kvm_getput_reg(®s.rip, &env->eip, set);
if (set) {
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
}
return ret;
}
-static int kvm_put_fpu(CPUX86State *env)
+static int kvm_put_fpu(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_fpu fpu;
int i;
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
fpu.mxcsr = env->mxcsr;
- return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
}
#define XSAVE_FCW_FSW 0
#define XSAVE_XSTATE_BV 128
#define XSAVE_YMMH_SPACE 144
-static int kvm_put_xsave(CPUX86State *env)
+static int kvm_put_xsave(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xsave* xsave = env->kvm_xsave_buf;
uint16_t cwd, swd, twd;
int i, r;
if (!kvm_has_xsave()) {
- return kvm_put_fpu(env);
+ return kvm_put_fpu(cpu);
}
memset(xsave, 0, sizeof(struct kvm_xsave));
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
sizeof env->ymmh_regs);
- r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
return r;
}
-static int kvm_put_xcrs(CPUX86State *env)
+static int kvm_put_xcrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xcrs xcrs;
if (!kvm_has_xcrs()) {
xcrs.flags = 0;
xcrs.xcrs[0].xcr = 0;
xcrs.xcrs[0].value = env->xcr0;
- return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
}
-static int kvm_put_sregs(CPUX86State *env)
+static int kvm_put_sregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_sregs sregs;
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
sregs.efer = env->efer;
- return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
}
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
entry->data = value;
}
-static int kvm_put_msrs(CPUX86State *env, int level)
+static int kvm_put_msrs(X86CPU *cpu, int level)
{
+ CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
struct kvm_msr_entry entries[100];
msr_data.info.nmsrs = n;
- return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
}
-static int kvm_get_fpu(CPUX86State *env)
+static int kvm_get_fpu(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_fpu fpu;
int i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_get_xsave(CPUX86State *env)
+static int kvm_get_xsave(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xsave* xsave = env->kvm_xsave_buf;
int ret, i;
uint16_t cwd, swd, twd;
if (!kvm_has_xsave()) {
- return kvm_get_fpu(env);
+ return kvm_get_fpu(cpu);
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_get_xcrs(CPUX86State *env)
+static int kvm_get_xcrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
int i, ret;
struct kvm_xcrs xcrs;
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_get_sregs(CPUX86State *env)
+static int kvm_get_sregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_sregs sregs;
uint32_t hflags;
int bit, i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_get_msrs(CPUX86State *env)
+static int kvm_get_msrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
struct kvm_msr_entry entries[100];
}
msr_data.info.nmsrs = n;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_put_mp_state(CPUX86State *env)
+static int kvm_put_mp_state(X86CPU *cpu)
{
- struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
+ struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
- return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
}
static int kvm_get_mp_state(X86CPU *cpu)
struct kvm_mp_state mp_state;
int ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_get_apic(CPUX86State *env)
+static int kvm_get_apic(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
int ret;
if (apic && kvm_irqchip_in_kernel()) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_put_apic(CPUX86State *env)
+static int kvm_put_apic(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
if (apic && kvm_irqchip_in_kernel()) {
kvm_put_apic_state(apic, &kapic);
- return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
}
return 0;
}
-static int kvm_put_vcpu_events(CPUX86State *env, int level)
+static int kvm_put_vcpu_events(X86CPU *cpu, int level)
{
+ CPUX86State *env = &cpu->env;
struct kvm_vcpu_events events;
if (!kvm_has_vcpu_events()) {
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
}
- return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
}
-static int kvm_get_vcpu_events(CPUX86State *env)
+static int kvm_get_vcpu_events(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
if (ret < 0) {
return ret;
}
return 0;
}
-static int kvm_guest_debug_workarounds(CPUX86State *env)
+static int kvm_guest_debug_workarounds(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
int ret = 0;
unsigned long reinject_trap = 0;
return ret;
}
-static int kvm_put_debugregs(CPUX86State *env)
+static int kvm_put_debugregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_debugregs dbgregs;
int i;
dbgregs.dr7 = env->dr[7];
dbgregs.flags = 0;
- return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
}
-static int kvm_get_debugregs(CPUX86State *env)
+static int kvm_get_debugregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_debugregs dbgregs;
int i, ret;
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
if (ret < 0) {
return ret;
}
return 0;
}
-int kvm_arch_put_registers(CPUX86State *env, int level)
+int kvm_arch_put_registers(CPUState *cpu, int level)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ X86CPU *x86_cpu = X86_CPU(cpu);
int ret;
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- ret = kvm_getput_regs(env, 1);
+ ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
}
- ret = kvm_put_xsave(env);
+ ret = kvm_put_xsave(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_xcrs(env);
+ ret = kvm_put_xcrs(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_sregs(env);
+ ret = kvm_put_sregs(x86_cpu);
if (ret < 0) {
return ret;
}
/* must be before kvm_put_msrs */
- ret = kvm_inject_mce_oldstyle(env);
+ ret = kvm_inject_mce_oldstyle(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_msrs(env, level);
+ ret = kvm_put_msrs(x86_cpu, level);
if (ret < 0) {
return ret;
}
if (level >= KVM_PUT_RESET_STATE) {
- ret = kvm_put_mp_state(env);
+ ret = kvm_put_mp_state(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_apic(env);
+ ret = kvm_put_apic(x86_cpu);
if (ret < 0) {
return ret;
}
}
- ret = kvm_put_vcpu_events(env, level);
+ ret = kvm_put_vcpu_events(x86_cpu, level);
if (ret < 0) {
return ret;
}
- ret = kvm_put_debugregs(env);
+ ret = kvm_put_debugregs(x86_cpu);
if (ret < 0) {
return ret;
}
/* must be last */
- ret = kvm_guest_debug_workarounds(env);
+ ret = kvm_guest_debug_workarounds(x86_cpu);
if (ret < 0) {
return ret;
}
return 0;
}
-int kvm_arch_get_registers(CPUX86State *env)
+int kvm_arch_get_registers(CPUState *cs)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
int ret;
- assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu)));
+ assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
- ret = kvm_getput_regs(env, 0);
+ ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
return ret;
}
- ret = kvm_get_xsave(env);
+ ret = kvm_get_xsave(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_xcrs(env);
+ ret = kvm_get_xcrs(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_sregs(env);
+ ret = kvm_get_sregs(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_msrs(env);
+ ret = kvm_get_msrs(cpu);
if (ret < 0) {
return ret;
}
if (ret < 0) {
return ret;
}
- ret = kvm_get_apic(env);
+ ret = kvm_get_apic(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_vcpu_events(env);
+ ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_debugregs(env);
+ ret = kvm_get_debugregs(cpu);
if (ret < 0) {
return ret;
}
return 0;
}
-void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
int ret;
/* Inject NMI */
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
DPRINTF("injected NMI\n");
- ret = kvm_vcpu_ioctl(env, KVM_NMI);
+ ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
strerror(-ret));
intr.irq = irq;
DPRINTF("injected interrupt %d\n", irq);
- ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
+ ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
if (ret < 0) {
fprintf(stderr,
"KVM: injection failed, interrupt lost (%s)\n",
}
}
-void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
if (run->if_flag) {
env->eflags |= IF_MASK;
} else {
cpu_set_apic_base(env->apic_state, run->apic_base);
}
-int kvm_arch_process_async_events(CPUX86State *env)
+int kvm_arch_process_async_events(CPUState *cs)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
return 0;
}
-static int kvm_handle_tpr_access(CPUX86State *env)
+static int kvm_handle_tpr_access(X86CPU *cpu)
{
- struct kvm_run *run = env->kvm_run;
+ CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
run->tpr_access.is_write ? TPR_ACCESS_WRITE
return 1;
}
-int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
static const uint8_t int3 = 0xcc;
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
uint8_t int3;
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
static CPUWatchpoint hw_watchpoint;
-static int kvm_handle_debug(CPUX86State *env,
+static int kvm_handle_debug(X86CPU *cpu,
struct kvm_debug_exit_arch *arch_info)
{
+ CPUX86State *env = &cpu->env;
int ret = 0;
int n;
}
}
}
- } else if (kvm_find_sw_breakpoint(env, arch_info->pc)) {
+ } else if (kvm_find_sw_breakpoint(CPU(cpu), arch_info->pc)) {
ret = EXCP_DEBUG;
}
if (ret == 0) {
return ret;
}
-void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
};
int n;
- if (kvm_sw_breakpoints_active(env)) {
+ if (kvm_sw_breakpoints_active(cpu)) {
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
}
if (nb_hw_breakpoint > 0) {
#define VMX_INVALID_GUEST_STATE 0x80000021
-int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
uint64_t code;
int ret;
ret = 0;
break;
case KVM_EXIT_TPR_ACCESS:
- ret = kvm_handle_tpr_access(env);
+ ret = kvm_handle_tpr_access(cpu);
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
break;
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
- ret = kvm_handle_debug(env, &run->debug.arch);
+ ret = kvm_handle_debug(cpu, &run->debug.arch);
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
return ret;
}
-bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
kvm_cpu_synchronize_state(env);
return !(env->cr[0] & CR0_PE_MASK) ||
((env->segs[R_CS].selector & 3) != 3);
#define SIGNBIT (1u << 31)
-typedef struct M68kCPUListState {
- fprintf_function cpu_fprintf;
- FILE *file;
-} M68kCPUListState;
-
/* Sort alphabetically, except for "any". */
static gint m68k_cpu_list_compare(gconstpointer a, gconstpointer b)
{
static void m68k_cpu_list_entry(gpointer data, gpointer user_data)
{
ObjectClass *c = data;
- M68kCPUListState *s = user_data;
+ CPUListState *s = user_data;
(*s->cpu_fprintf)(s->file, "%s\n",
object_class_get_name(c));
void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
- M68kCPUListState s = {
+ CPUListState s = {
.file = f,
.cpu_fprintf = cpu_fprintf,
};
return 0;
}
-static int kvm_arch_sync_sregs(CPUPPCState *cenv)
+static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
{
+ CPUPPCState *cenv = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_sregs sregs;
int ret;
}
}
- ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret) {
return ret;
}
sregs.pvr = cenv->spr[SPR_PVR];
- return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
+ return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
}
/* Set up a shared TLB array with KVM */
-static int kvm_booke206_tlb_init(CPUPPCState *env)
+static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_book3e_206_tlb_params params = {};
struct kvm_config_tlb cfg = {};
struct kvm_enable_cap encap = {};
int ret, i;
if (!kvm_enabled() ||
- !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) {
+ !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
return 0;
}
encap.cap = KVM_CAP_SW_TLB;
encap.args[0] = (uintptr_t)&cfg;
- ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap);
+ ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
if (ret < 0) {
fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
__func__, strerror(-ret));
#if defined(TARGET_PPC64)
-static void kvm_get_fallback_smmu_info(CPUPPCState *env,
+static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
struct kvm_ppc_smmu_info *info)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
memset(info, 0, sizeof(*info));
/* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
* implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
* this fallback.
*/
- if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
/* No flags */
info->flags = 0;
info->slb_size = 64;
}
}
-static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info)
+static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
{
+ CPUState *cs = CPU(cpu);
int ret;
- if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
- ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
if (ret == 0) {
return;
}
}
- kvm_get_fallback_smmu_info(env, info);
+ kvm_get_fallback_smmu_info(cpu, info);
}
static long getrampagesize(void)
return (1ul << shift) <= rampgsize;
}
-static void kvm_fixup_page_sizes(CPUPPCState *env)
+static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
{
static struct kvm_ppc_smmu_info smmu_info;
static bool has_smmu_info;
+ CPUPPCState *env = &cpu->env;
long rampagesize;
int iq, ik, jq, jk;
/* Collect MMU info from kernel if not already */
if (!has_smmu_info) {
- kvm_get_smmu_info(env, &smmu_info);
+ kvm_get_smmu_info(cpu, &smmu_info);
has_smmu_info = true;
}
}
#else /* defined (TARGET_PPC64) */
-static inline void kvm_fixup_page_sizes(CPUPPCState *env)
+static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
{
}
#endif /* !defined (TARGET_PPC64) */
-int kvm_arch_init_vcpu(CPUPPCState *cenv)
+int kvm_arch_init_vcpu(CPUState *cs)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(cenv);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
int ret;
/* Gather server mmu info from KVM and update the CPU state */
- kvm_fixup_page_sizes(cenv);
+ kvm_fixup_page_sizes(cpu);
/* Synchronize sregs with kvm */
- ret = kvm_arch_sync_sregs(cenv);
+ ret = kvm_arch_sync_sregs(cpu);
if (ret) {
return ret;
}
/* Some targets support access to KVM's guest TLB. */
switch (cenv->mmu_model) {
case POWERPC_MMU_BOOKE206:
- ret = kvm_booke206_tlb_init(cenv);
+ ret = kvm_booke206_tlb_init(cpu);
break;
default:
break;
return ret;
}
-void kvm_arch_reset_vcpu(CPUPPCState *env)
+void kvm_arch_reset_vcpu(CPUState *cpu)
{
}
-static void kvm_sw_tlb_put(CPUPPCState *env)
+static void kvm_sw_tlb_put(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_dirty_tlb dirty_tlb;
unsigned char *bitmap;
int ret;
dirty_tlb.bitmap = (uintptr_t)bitmap;
dirty_tlb.num_dirty = env->nb_tlb;
- ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb);
+ ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
if (ret) {
fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
__func__, strerror(-ret));
g_free(bitmap);
}
-int kvm_arch_put_registers(CPUPPCState *env, int level)
+int kvm_arch_put_registers(CPUState *cs, int level)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
int ret;
int i;
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
- if (ret < 0)
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
+ if (ret < 0) {
return ret;
+ }
regs.ctr = env->ctr;
regs.lr = env->lr;
for (i = 0;i < 32; i++)
regs.gpr[i] = env->gpr[i];
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
if (ret < 0)
return ret;
if (env->tlb_dirty) {
- kvm_sw_tlb_put(env);
+ kvm_sw_tlb_put(cpu);
env->tlb_dirty = false;
}
| env->IBAT[1][i];
}
- ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
if (ret) {
return ret;
}
.addr = (uintptr_t) &hior,
};
- ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, ®);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
if (ret) {
return ret;
}
return ret;
}
-int kvm_arch_get_registers(CPUPPCState *env)
+int kvm_arch_get_registers(CPUState *cs)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
struct kvm_sregs sregs;
uint32_t cr;
int i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
if (ret < 0)
return ret;
env->gpr[i] = regs.gpr[i];
if (cap_booke_sregs) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
}
if (cap_segstate) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
return 0;
}
-int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level)
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
{
unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
return 0;
}
- kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
+ kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
return 0;
}
#define PPC_INPUT_INT PPC6xx_INPUT_INT
#endif
-void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int r;
unsigned irq;
irq = KVM_INTERRUPT_SET;
dprintf("injected interrupt %d\n", irq);
- r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
if (r < 0)
printf("cpu %d fail inject %x\n", env->cpu_index, irq);
* anyways, so we will get a chance to deliver the rest. */
}
-void kvm_arch_post_run(CPUPPCState *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
}
-int kvm_arch_process_async_events(CPUPPCState *env)
+int kvm_arch_process_async_events(CPUState *cs)
{
- return env->halted;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ return cpu->env.halted;
}
static int kvmppc_handle_halt(CPUPPCState *env)
return 0;
}
-int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int ret;
switch (run->exit_reason) {
#ifdef CONFIG_PSERIES
case KVM_EXIT_PAPR_HCALL:
dprintf("handle PAPR hypercall\n");
- run->papr_hcall.ret = spapr_hypercall(ppc_env_get_cpu(env),
+ run->papr_hcall.ret = spapr_hypercall(cpu,
run->papr_hcall.nr,
run->papr_hcall.args);
ret = 0;
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint32_t *hc = (uint32_t*)buf;
struct kvm_ppc_pvinfo pvinfo;
- if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
- !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
memcpy(buf, pvinfo.hcall, buf_len);
return 0;
return 0;
}
-void kvmppc_set_papr(CPUPPCState *env)
+void kvmppc_set_papr(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_enable_cap cap = {};
int ret;
cap.cap = KVM_CAP_PPC_PAPR;
- ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
+ ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
if (ret) {
cpu_abort(env, "This KVM version does not support PAPR\n");
}
-bool kvm_arch_stop_on_emulation_error(CPUPPCState *env)
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
{
return true;
}
-int kvm_arch_on_sigbus_vcpu(CPUPPCState *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{
return 1;
}
uint32_t kvmppc_get_vmx(void);
uint32_t kvmppc_get_dfp(void);
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
-int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level);
-void kvmppc_set_papr(CPUPPCState *env);
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
+void kvmppc_set_papr(PowerPCCPU *cpu);
int kvmppc_smt_threads(void);
#ifndef CONFIG_USER_ONLY
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
return -1;
}
-static inline int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level)
+static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
{
return -1;
}
-static inline void kvmppc_set_papr(CPUPPCState *env)
+static inline void kvmppc_set_papr(PowerPCCPU *cpu)
{
}
int s390_virtio_hypercall(CPUS390XState *env, uint64_t mem, uint64_t hypercall);
#ifdef CONFIG_KVM
-void kvm_s390_interrupt(CPUS390XState *env, int type, uint32_t code);
-void kvm_s390_virtio_irq(CPUS390XState *env, int config_change, uint64_t token);
-void kvm_s390_interrupt_internal(CPUS390XState *env, int type, uint32_t parm,
+void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code);
+void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token);
+void kvm_s390_interrupt_internal(S390CPU *cpu, int type, uint32_t parm,
uint64_t parm64, int vm);
#else
-static inline void kvm_s390_interrupt(CPUS390XState *env, int type, uint32_t code)
+static inline void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code)
{
}
-static inline void kvm_s390_virtio_irq(CPUS390XState *env, int config_change,
+static inline void kvm_s390_virtio_irq(S390CPU *cpu, int config_change,
uint64_t token)
{
}
-static inline void kvm_s390_interrupt_internal(CPUS390XState *env, int type,
+static inline void kvm_s390_interrupt_internal(S390CPU *cpu, int type,
uint32_t parm, uint64_t parm64,
int vm)
{
if (kvm_enabled()) {
#ifdef CONFIG_KVM
- kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE, parm, 0, 1);
+ kvm_s390_interrupt_internal(dummy_cpu, KVM_S390_INT_SERVICE, parm,
+ 0, 1);
#endif
} else {
env->psw.addr += 4;
return 0;
}
-int kvm_arch_init_vcpu(CPUS390XState *env)
+int kvm_arch_init_vcpu(CPUState *cpu)
{
int ret = 0;
- if (kvm_vcpu_ioctl(env, KVM_S390_INITIAL_RESET, NULL) < 0) {
+ if (kvm_vcpu_ioctl(cpu, KVM_S390_INITIAL_RESET, NULL) < 0) {
perror("cannot init reset vcpu");
}
return ret;
}
-void kvm_arch_reset_vcpu(CPUS390XState *env)
+void kvm_arch_reset_vcpu(CPUState *cpu)
{
/* FIXME: add code to reset vcpu. */
}
-int kvm_arch_put_registers(CPUS390XState *env, int level)
+int kvm_arch_put_registers(CPUState *cs, int level)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
struct kvm_sregs sregs;
struct kvm_regs regs;
int ret;
int i;
/* always save the PSW and the GPRS*/
- env->kvm_run->psw_addr = env->psw.addr;
- env->kvm_run->psw_mask = env->psw.mask;
+ cs->kvm_run->psw_addr = env->psw.addr;
+ cs->kvm_run->psw_mask = env->psw.mask;
- if (cap_sync_regs && env->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
+ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
- env->kvm_run->s.regs.gprs[i] = env->regs[i];
- env->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+ cs->kvm_run->s.regs.gprs[i] = env->regs[i];
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
}
} else {
for (i = 0; i < 16; i++) {
regs.gprs[i] = env->regs[i];
}
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
if (ret < 0) {
return ret;
}
}
if (cap_sync_regs &&
- env->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
- env->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
+ cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
+ cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
- env->kvm_run->s.regs.acrs[i] = env->aregs[i];
- env->kvm_run->s.regs.crs[i] = env->cregs[i];
+ cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
+ cs->kvm_run->s.regs.crs[i] = env->cregs[i];
}
- env->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
- env->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
} else {
for (i = 0; i < 16; i++) {
sregs.acrs[i] = env->aregs[i];
sregs.crs[i] = env->cregs[i];
}
- ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
}
/* Finally the prefix */
- if (cap_sync_regs && env->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
- env->kvm_run->s.regs.prefix = env->psa;
- env->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
+ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
+ cs->kvm_run->s.regs.prefix = env->psa;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
} else {
/* prefix is only supported via sync regs */
}
return 0;
}
-int kvm_arch_get_registers(CPUS390XState *env)
+int kvm_arch_get_registers(CPUState *cs)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
struct kvm_sregs sregs;
struct kvm_regs regs;
int ret;
int i;
/* get the PSW */
- env->psw.addr = env->kvm_run->psw_addr;
- env->psw.mask = env->kvm_run->psw_mask;
+ env->psw.addr = cs->kvm_run->psw_addr;
+ env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
- if (cap_sync_regs && env->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
+ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
- env->regs[i] = env->kvm_run->s.regs.gprs[i];
+ env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
if (ret < 0) {
return ret;
}
/* The ACRS and CRS */
if (cap_sync_regs &&
- env->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
- env->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
+ cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
+ cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
- env->aregs[i] = env->kvm_run->s.regs.acrs[i];
- env->cregs[i] = env->kvm_run->s.regs.crs[i];
+ env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
+ env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
}
/* Finally the prefix */
- if (cap_sync_regs && env->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
- env->psa = env->kvm_run->s.regs.prefix;
+ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
+ env->psa = cs->kvm_run->s.regs.prefix;
} else {
/* no prefix without sync regs */
}
}
}
-int kvm_arch_insert_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
uint8_t t[4];
static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
return 0;
}
-void kvm_arch_pre_run(CPUS390XState *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
}
-void kvm_arch_post_run(CPUS390XState *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
}
-int kvm_arch_process_async_events(CPUS390XState *env)
+int kvm_arch_process_async_events(CPUState *cs)
{
- return env->halted;
+ S390CPU *cpu = S390_CPU(cs);
+ return cpu->env.halted;
}
-void kvm_s390_interrupt_internal(CPUS390XState *env, int type, uint32_t parm,
+void kvm_s390_interrupt_internal(S390CPU *cpu, int type, uint32_t parm,
uint64_t parm64, int vm)
{
+ CPUState *cs = CPU(cpu);
struct kvm_s390_interrupt kvmint;
int r;
- if (!env->kvm_state) {
+ if (!cs->kvm_state) {
return;
}
kvmint.parm64 = parm64;
if (vm) {
- r = kvm_vm_ioctl(env->kvm_state, KVM_S390_INTERRUPT, &kvmint);
+ r = kvm_vm_ioctl(cs->kvm_state, KVM_S390_INTERRUPT, &kvmint);
} else {
- r = kvm_vcpu_ioctl(env, KVM_S390_INTERRUPT, &kvmint);
+ r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
}
if (r < 0) {
}
}
-void kvm_s390_virtio_irq(CPUS390XState *env, int config_change, uint64_t token)
+void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token)
{
- kvm_s390_interrupt_internal(env, KVM_S390_INT_VIRTIO, config_change,
+ kvm_s390_interrupt_internal(cpu, KVM_S390_INT_VIRTIO, config_change,
token, 1);
}
-void kvm_s390_interrupt(CPUS390XState *env, int type, uint32_t code)
+void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code)
{
- kvm_s390_interrupt_internal(env, type, code, 0, 0);
+ kvm_s390_interrupt_internal(cpu, type, code, 0, 0);
}
-static void enter_pgmcheck(CPUS390XState *env, uint16_t code)
+static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
{
- kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
+ kvm_s390_interrupt(cpu, KVM_S390_PROGRAM_INT, code);
}
-static inline void setcc(CPUS390XState *env, uint64_t cc)
+static inline void setcc(S390CPU *cpu, uint64_t cc)
{
- env->kvm_run->psw_mask &= ~(3ull << 44);
- env->kvm_run->psw_mask |= (cc & 3) << 44;
+ CPUS390XState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ cs->kvm_run->psw_mask &= ~(3ull << 44);
+ cs->kvm_run->psw_mask |= (cc & 3) << 44;
env->psw.mask &= ~(3ul << 44);
env->psw.mask |= (cc & 3) << 44;
}
-static int kvm_sclp_service_call(CPUS390XState *env, struct kvm_run *run,
+static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
uint16_t ipbh0)
{
+ CPUS390XState *env = &cpu->env;
uint32_t sccb;
uint64_t code;
int r = 0;
r = sclp_service_call(sccb, code);
if (r < 0) {
- enter_pgmcheck(env, -r);
+ enter_pgmcheck(cpu, -r);
}
- setcc(env, r);
+ setcc(cpu, r);
return 0;
}
-static int handle_priv(CPUS390XState *env, struct kvm_run *run, uint8_t ipa1)
+static int handle_priv(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
{
int r = 0;
uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
dprintf("KVM: PRIV: %d\n", ipa1);
switch (ipa1) {
case PRIV_SCLP_CALL:
- r = kvm_sclp_service_call(env, run, ipbh0);
+ r = kvm_sclp_service_call(cpu, run, ipbh0);
break;
default:
dprintf("KVM: unknown PRIV: 0x%x\n", ipa1);
{
CPUS390XState *env = &cpu->env;
- kvm_s390_interrupt(env, KVM_S390_RESTART, 0);
+ kvm_s390_interrupt(cpu, KVM_S390_RESTART, 0);
s390_add_running_cpu(env);
qemu_cpu_kick(CPU(cpu));
dprintf("DONE: SIGP cpu restart: %p\n", env);
return -1;
}
-static int s390_cpu_initial_reset(CPUS390XState *env)
+static int s390_cpu_initial_reset(S390CPU *cpu)
{
+ CPUS390XState *env = &cpu->env;
int i;
s390_del_running_cpu(env);
- if (kvm_vcpu_ioctl(env, KVM_S390_INITIAL_RESET, NULL) < 0) {
+ if (kvm_vcpu_ioctl(CPU(cpu), KVM_S390_INITIAL_RESET, NULL) < 0) {
perror("cannot init reset vcpu");
}
return 0;
}
-static int handle_sigp(CPUS390XState *env, struct kvm_run *run, uint8_t ipa1)
+static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
{
+ CPUS390XState *env = &cpu->env;
uint8_t order_code;
uint32_t parameter;
uint16_t cpu_addr;
/* make the caller panic */
return -1;
case SIGP_INITIAL_CPU_RESET:
- r = s390_cpu_initial_reset(target_env);
+ r = s390_cpu_initial_reset(target_cpu);
break;
default:
fprintf(stderr, "KVM: unknown SIGP: 0x%x\n", order_code);
}
out:
- setcc(env, r ? 3 : 0);
+ setcc(cpu, r ? 3 : 0);
return 0;
}
-static int handle_instruction(CPUS390XState *env, struct kvm_run *run)
+static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
{
+ CPUS390XState *env = &cpu->env;
unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
int ipb_code = (run->s390_sieic.ipb & 0x0fff0000) >> 16;
dprintf("handle_instruction 0x%x 0x%x\n", run->s390_sieic.ipa, run->s390_sieic.ipb);
switch (ipa0) {
case IPA0_PRIV:
- r = handle_priv(env, run, ipa1);
+ r = handle_priv(cpu, run, ipa1);
break;
case IPA0_DIAG:
r = handle_diag(env, run, ipb_code);
break;
case IPA0_SIGP:
- r = handle_sigp(env, run, ipa1);
+ r = handle_sigp(cpu, run, ipa1);
break;
}
if (r < 0) {
- enter_pgmcheck(env, 0x0001);
+ enter_pgmcheck(cpu, 0x0001);
}
return 0;
}
-static bool is_special_wait_psw(CPUS390XState *env)
+static bool is_special_wait_psw(CPUState *cs)
{
/* signal quiesce */
- return env->kvm_run->psw_addr == 0xfffUL;
+ return cs->kvm_run->psw_addr == 0xfffUL;
}
-static int handle_intercept(CPUS390XState *env)
+static int handle_intercept(S390CPU *cpu)
{
- struct kvm_run *run = env->kvm_run;
+ CPUS390XState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
int icpt_code = run->s390_sieic.icptcode;
int r = 0;
dprintf("intercept: 0x%x (at 0x%lx)\n", icpt_code,
- (long)env->kvm_run->psw_addr);
+ (long)cs->kvm_run->psw_addr);
switch (icpt_code) {
case ICPT_INSTRUCTION:
- r = handle_instruction(env, run);
+ r = handle_instruction(cpu, run);
break;
case ICPT_WAITPSW:
if (s390_del_running_cpu(env) == 0 &&
- is_special_wait_psw(env)) {
+ is_special_wait_psw(cs)) {
qemu_system_shutdown_request();
}
r = EXCP_HALTED;
return r;
}
-int kvm_arch_handle_exit(CPUS390XState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
+ S390CPU *cpu = S390_CPU(cs);
int ret = 0;
switch (run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
- ret = handle_intercept(env);
+ ret = handle_intercept(cpu);
break;
case KVM_EXIT_S390_RESET:
qemu_system_reset_request();
return ret;
}
-bool kvm_arch_stop_on_emulation_error(CPUS390XState *env)
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
{
return true;
}
-int kvm_arch_on_sigbus_vcpu(CPUS390XState *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{
return 1;
}
if (kvm_enabled()) {
#ifdef CONFIG_KVM
- kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
+ kvm_s390_interrupt(s390_env_get_cpu(env), KVM_S390_PROGRAM_INT, code);
#endif
} else {
env->int_pgm_code = code;