struct qemu_work_item {
struct qemu_work_item *next;
run_on_cpu_func func;
- void *data;
+ run_on_cpu_data data;
bool free, exclusive, done;
};
qemu_cpu_kick(cpu);
}
-void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
+void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex)
{
struct qemu_work_item wi;
}
}
-void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
struct qemu_work_item *wi;
}
}
-void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
+ run_on_cpu_data data)
{
struct qemu_work_item *wi;
}
};
-static void cpu_throttle_thread(CPUState *cpu, void *opaque)
+static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
{
double pct;
double throttle_ratio;
}
CPU_FOREACH(cpu) {
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
- async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
+ async_run_on_cpu(cpu, cpu_throttle_thread,
+ RUN_ON_CPU_NULL);
}
}
qemu_thread_get_self(&io_thread);
}
-void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
}
}
}
-static void kvm_apic_put(CPUState *cs, void *data)
+static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
{
- APICCommonState *s = data;
+ APICCommonState *s = data.host_ptr;
struct kvm_lapic_state kapic;
int ret;
static void kvm_apic_post_load(APICCommonState *s)
{
- run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
+ run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}
-static void do_inject_external_nmi(CPUState *cpu, void *data)
+static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
{
- APICCommonState *s = data;
+ APICCommonState *s = data.host_ptr;
uint32_t lvt;
int ret;
static void kvm_apic_external_nmi(APICCommonState *s)
{
- run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
+ run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
}
static void kvm_send_msi(MSIMessage *msg)
/* Not used by KVM, which uses the CPU mp_state instead. */
s->wait_for_sipi = 0;
- run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
+ run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}
static void kvm_apic_realize(DeviceState *dev, Error **errp)
bool enable;
} VAPICEnableTPRReporting;
-static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
+static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
{
- VAPICEnableTPRReporting *info = data;
-
+ VAPICEnableTPRReporting *info = data.host_ptr;
apic_enable_tpr_access_reporting(info->apic, info->enable);
}
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
info.apic = cpu->apic_state;
- run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
+ run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
}
}
nb_option_roms++;
}
-static void do_vapic_enable(CPUState *cs, void *data)
+static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
{
- VAPICROMState *s = data;
+ VAPICROMState *s = data.host_ptr;
X86CPU *cpu = X86_CPU(cs);
static const uint8_t enabled = 1;
if (s->state == VAPIC_ACTIVE) {
if (smp_cpus == 1) {
- run_on_cpu(first_cpu, do_vapic_enable, s);
+ run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
} else {
zero = g_malloc0(s->rom_state.vapic_size);
cpu_physical_memory_write(s->vapic_paddr, zero,
env->tlb_dirty = true;
}
-static void spin_kick(CPUState *cs, void *data)
+static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- SpinInfo *curspin = data;
+ SpinInfo *curspin = data.host_ptr;
hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start;
if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */
- run_on_cpu(cpu, spin_kick, curspin);
+ run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
}
}
g_free(spapr->kvm_type);
}
-static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg)
+static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{
cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs);
CPUState *cs;
CPU_FOREACH(cs) {
- async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL);
+ async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
}
}
target_ulong mask;
};
-static void do_spr_sync(CPUState *cs, void *arg)
+static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
{
- struct SPRSyncState *s = arg;
+ struct SPRSyncState *s = arg.host_ptr;
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
.value = value,
.mask = mask
};
- run_on_cpu(cs, do_spr_sync, &s);
+ run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
}
static bool has_spr(PowerPCCPU *cpu, int spr)
Error *err;
} SetCompatState;
-static void do_set_compat(CPUState *cs, void *arg)
+static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- SetCompatState *s = arg;
+ SetCompatState *s = arg.host_ptr;
cpu_synchronize_state(cs);
ppc_set_compat(cpu, s->cpu_version, &s->err);
.err = NULL,
};
- run_on_cpu(cs, do_set_compat, &s);
+ run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
if (s.err) {
error_report_err(s.err);
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */
-typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);
+
+/* The union type allows passing of 64 bit target pointers on 32 bit
+ * hosts in a single parameter
+ */
+typedef union {
+ int host_int;
+ unsigned long host_ulong;
+ void *host_ptr;
+ vaddr target_ptr;
+} run_on_cpu_data;
+
+#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
+#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
+#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
+#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
+#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
+
+typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
+
struct qemu_work_item;
/**
*
* Used internally in the implementation of run_on_cpu.
*/
-void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
+void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex);
/**
*
* Schedules the function @func for execution on the vCPU @cpu.
*/
-void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
* async_run_on_cpu:
*
* Schedules the function @func for execution on the vCPU @cpu asynchronously.
*/
-void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
* async_safe_run_on_cpu:
* Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
* BQL.
*/
-void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
+void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
* qemu_get_cpu:
s->coalesced_flush_in_progress = false;
}
-static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
+static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->kvm_vcpu_dirty) {
kvm_arch_get_registers(cpu);
void kvm_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->kvm_vcpu_dirty) {
- run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
-static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
+static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false;
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{
- run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}
-static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
+static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false;
void kvm_cpu_synchronize_post_init(CPUState *cpu)
{
- run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
int kvm_cpu_exec(CPUState *cpu)
int err;
};
-static void kvm_invoke_set_guest_debug(CPUState *cpu, void *data)
+static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
{
- struct kvm_set_guest_debug_data *dbg_data = data;
+ struct kvm_set_guest_debug_data *dbg_data =
+ (struct kvm_set_guest_debug_data *) data.host_ptr;
dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
&dbg_data->dbg);
}
kvm_arch_update_guest_debug(cpu, &data.dbg);
- run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
+ run_on_cpu(cpu, kvm_invoke_set_guest_debug,
+ RUN_ON_CPU_HOST_PTR(&data));
return data.err;
}
int flags;
} MCEInjectionParams;
-static void do_inject_x86_mce(CPUState *cs, void *data)
+static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
{
- MCEInjectionParams *params = data;
+ MCEInjectionParams *params = data.host_ptr;
X86CPU *cpu = X86_CPU(cs);
CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
return;
}
- run_on_cpu(cs, do_inject_x86_mce, ¶ms);
+ run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
if (flags & MCE_INJECT_BROADCAST) {
CPUState *other_cs;
if (other_cs == cs) {
continue;
}
- run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
+ run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(¶ms));
}
}
}
return 0;
}
-static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg)
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
{
kvm_get_tsc(cpu);
}
if (kvm_enabled()) {
CPU_FOREACH(cpu) {
- run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL);
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
}
}
}
{
S390CPU *cpu = opaque;
- run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL);
+ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
}
#endif
s390_cpu_gdb_init(cs);
qemu_init_vcpu(cs);
#if !defined(CONFIG_USER_ONLY)
- run_on_cpu(cs, s390_do_cpu_full_reset, NULL);
+ run_on_cpu(cs, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
#else
cpu_reset(cs);
#endif
#define decode_basedisp_rs decode_basedisp_s
/* helper functions for run_on_cpu() */
-static inline void s390_do_cpu_reset(CPUState *cs, void *arg)
+static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{
S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
scc->cpu_reset(cs);
}
-static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg)
+static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
{
cpu_reset(cs);
}
{
SigpInfo si = {};
- run_on_cpu(CPU(cpu), sigp_restart, &si);
+ run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
return 0;
}
switch (order) {
case SIGP_START:
- run_on_cpu(CPU(dst_cpu), sigp_start, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STOP:
- run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_RESTART:
- run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STOP_STORE_STATUS:
- run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STORE_STATUS_ADDR:
- run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_STORE_ADTL_STATUS:
- run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_SET_PREFIX:
- run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_INITIAL_CPU_RESET:
- run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break;
case SIGP_CPU_RESET:
- run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
+ run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break;
default:
DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
pause_all_vcpus();
cpu_synchronize_all_states();
CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_full_reset, NULL);
+ run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
}
s390_cmma_reset();
subsystem_reset();
pause_all_vcpus();
cpu_synchronize_all_states();
CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_reset, NULL);
+ run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
}
s390_cmma_reset();
subsystem_reset();
}
/* flush all the translation blocks */
-static void do_tb_flush(CPUState *cpu, void *data)
+static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
{
- unsigned tb_flush_req = (unsigned) (uintptr_t) data;
-
tb_lock();
- /* If it's already been done on request of another CPU,
+ /* If it is already been done on request of another CPU,
* just retry.
*/
- if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
+ if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
goto done;
}
void tb_flush(CPUState *cpu)
{
if (tcg_enabled()) {
- uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
- async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
+ unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
+ async_safe_run_on_cpu(cpu, do_tb_flush,
+ RUN_ON_CPU_HOST_INT(tb_flush_count));
}
}