uint16_t type;
} MchkQueue;
-/* Defined values for CPUS390XState.runtime_reg_dirty_mask */
-#define KVM_S390_RUNTIME_DIRTY_NONE 0
-#define KVM_S390_RUNTIME_DIRTY_PARTIAL 1
-#define KVM_S390_RUNTIME_DIRTY_FULL 2
-
typedef struct CPUS390XState {
uint64_t regs[16]; /* GP registers */
CPU_DoubleU fregs[16]; /* FP registers */
uint64_t cputm;
uint32_t todpr;
- /* on S390 the runtime register set has two dirty states:
- * a partial dirty state in which only the registers that
- * are needed all the time are fetched. And a fully dirty
- * state in which all runtime registers are fetched.
- */
- uint32_t runtime_reg_dirty_mask;
-
CPU_COMMON
/* reset does memset(0) up to here */
uint32_t io_int_word);
void kvm_s390_crw_mchk(S390CPU *cpu);
void kvm_s390_enable_css_support(S390CPU *cpu);
-int kvm_s390_get_registers_partial(CPUState *cpu);
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
int vq, bool assign);
int kvm_s390_cpu_restart(S390CPU *cpu);
static inline void kvm_s390_enable_css_support(S390CPU *cpu)
{
}
-static inline int kvm_s390_get_registers_partial(CPUState *cpu)
-{
- return -ENOSYS;
-}
static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
uint32_t sch, int vq,
bool assign)
}
}
- if (env->runtime_reg_dirty_mask == KVM_S390_RUNTIME_DIRTY_FULL) {
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
- if (ret < 0) {
- return ret;
- }
+ /* Do we need to save more than that? */
+ if (level == KVM_PUT_RUNTIME_STATE) {
+ return 0;
+ }
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
- if (ret < 0) {
- return ret;
- }
+ reg.id = KVM_REG_S390_CPU_TIMER;
+ reg.addr = (__u64)&(env->cputm);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ return ret;
+ }
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
- if (ret < 0) {
- return ret;
- }
+ reg.id = KVM_REG_S390_CLOCK_COMP;
+ reg.addr = (__u64)&(env->ckc);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ return ret;
}
- env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_NONE;
- /* Do we need to save more than that? */
- if (level == KVM_PUT_RUNTIME_STATE) {
- return 0;
+ reg.id = KVM_REG_S390_TODPR;
+ reg.addr = (__u64)&(env->todpr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ return ret;
}
if (cap_sync_regs &&
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
struct kvm_one_reg reg;
- int r;
-
- r = kvm_s390_get_registers_partial(cs);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
- if (r < 0) {
- return r;
- }
-
- env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_FULL;
- return 0;
-}
-
-int kvm_s390_get_registers_partial(CPUState *cs)
-{
- S390CPU *cpu = S390_CPU(cs);
- CPUS390XState *env = &cpu->env;
struct kvm_sregs sregs;
struct kvm_regs regs;
- int ret;
- int i;
-
- if (env->runtime_reg_dirty_mask) {
- return 0;
- }
+ int i, r;
/* get the PSW */
env->psw.addr = cs->kvm_run->psw_addr;
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
+ if (r < 0) {
+ return r;
}
for (i = 0; i < 16; i++) {
env->regs[i] = regs.gprs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
}
for (i = 0; i < 16; i++) {
env->aregs[i] = sregs.acrs[i];
}
}
- /* Finally the prefix */
+ /* The prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
env->psa = cs->kvm_run->s.regs.prefix;
- } else {
- /* no prefix without sync regs */
}
- env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_PARTIAL;
+ /* One Regs */
+ reg.id = KVM_REG_S390_CPU_TIMER;
+ reg.addr = (__u64)&(env->cputm);
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (r < 0) {
+ return r;
+ }
+
+ reg.id = KVM_REG_S390_CLOCK_COMP;
+ reg.addr = (__u64)&(env->ckc);
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (r < 0) {
+ return r;
+ }
+
+ reg.id = KVM_REG_S390_TODPR;
+ reg.addr = (__u64)&(env->todpr);
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (r < 0) {
+ return r;
+ }
+
return 0;
}
uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
{
CPUS390XState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
if (ipa0 != 0xb2) {
/* Not handled for now. */
return -1;
}
- kvm_s390_get_registers_partial(cs);
- cs->kvm_vcpu_dirty = true;
+ cpu_synchronize_state(CPU(cpu));
switch (ipa1) {
case PRIV_XSCH:
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
{
- CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
- kvm_s390_get_registers_partial(cs);
- cs->kvm_vcpu_dirty = true;
+ cpu_synchronize_state(CPU(cpu));
env->regs[2] = s390_virtio_hypercall(env);
return 0;
struct kvm_run *run = cs->kvm_run;
int ret;
- kvm_s390_get_registers_partial(cs);
- cs->kvm_vcpu_dirty = true;
+ cpu_synchronize_state(cs);
ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
if (ret >= 0) {