#define GICD_BASE_GPA 0x08000000ULL
#define GICR_BASE_GPA 0x080A0000ULL
-#define VCPU_ID 0
/*
* Stores the user specified args; it's passed to the guest and to every test
}
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
- uint32_t vcpu, bool expect_failure)
+ struct kvm_vcpu *vcpu,
+ bool expect_failure)
{
/*
* Ignore this when expecting failure as invalid intids will lead to
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
(tmp)++, (i)++)
-static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
- struct kvm_inject_args *inject_args,
- struct test_args *test_args)
+static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
+ struct kvm_inject_args *inject_args,
+ struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
uint32_t intid = inject_args->first_intid;
uint32_t num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
+ struct kvm_vm *vm = vcpu->vm;
uint64_t tmp;
uint32_t i;
break;
case KVM_WRITE_ISPENDR:
for (i = intid; i < intid + num; i++)
- kvm_irq_write_ispendr_check(gic_fd, i,
- VCPU_ID, expect_failure);
+ kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
+ expect_failure);
break;
case KVM_WRITE_ISACTIVER:
for (i = intid; i < intid + num; i++)
- kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
+ kvm_irq_write_isactiver(gic_fd, i, vcpu);
break;
default:
break;
{
struct ucall uc;
int gic_fd;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
vm_vaddr_t args_gva;
print_args(&args);
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vm, vcpu->id);
/* Setup the guest args page (so it gets the args). */
args_gva = vm_vaddr_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
- vcpu_args_set(vm, 0, 1, args_gva);
+ vcpu_args_set(vm, vcpu->id, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vm, vcpu->id);
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vm, vcpu->id, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
- run_guest_cmd(vm, gic_fd, &inject_args, &args);
+ run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
#include <linux/kvm.h>
+#include "kvm_util.h"
+
#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
(((uint64_t)(count) << 52) | \
((uint64_t)((base) >> 16) << 16) | \
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
+void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
-static void vgic_poke_irq(int gic_fd, uint32_t intid,
- uint32_t vcpu, uint64_t reg_off)
+static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
+ uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
- assert(vcpu == 0);
+ assert(vcpu->id == 0);
attr += SZ_64K;
}
kvm_device_attr_set(gic_fd, group, attr, &val);
}
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
+void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
+void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}