int slots;
uint32_t write_percent;
uint32_t random_seed;
+ bool random_access;
};
static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
* would pollute the performance results.
*/
perf_test_set_write_percent(vm, 100);
+ perf_test_set_random_access(vm, false);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
ts_diff.tv_sec, ts_diff.tv_nsec);
perf_test_set_write_percent(vm, p->write_percent);
+ perf_test_set_random_access(vm, p->random_access);
while (iteration < p->iterations) {
/*
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
+ printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] "
"[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
"[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name);
puts("");
+ printf(" -a: access memory randomly rather than in order.\n");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "b:c:eghi:m:nop:r:s:v:x:w:")) != -1) {
+ while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) {
switch (opt) {
+ case 'a':
+ p.random_access = true;
+ break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested;
+ /* Randomize which pages are accessed by the guest. */
+ bool random_access;
/* True if all vCPUs are pinned to pCPUs */
bool pin_vcpus;
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
+void perf_test_set_random_access(struct kvm_vm *vm, bool random_access);
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
struct guest_random_state rand_state;
uint64_t gva;
uint64_t pages;
+ uint64_t addr;
+ uint64_t page;
int i;
rand_state = new_guest_random_state(pta->random_seed + vcpu_idx);
while (true) {
for (i = 0; i < pages; i++) {
- uint64_t addr = gva + (i * pta->guest_page_size);
+ if (pta->random_access)
+ page = guest_random_u32(&rand_state) % pages;
+ else
+ page = i;
+
+ addr = gva + (page * pta->guest_page_size);
if (guest_random_u32(&rand_state) % 100 < pta->write_percent)
*(uint64_t *)addr = 0x0123456789ABCDEF;
sync_global_to_guest(vm, perf_test_args.random_seed);
}
+void perf_test_set_random_access(struct kvm_vm *vm, bool random_access)
+{
+ perf_test_args.random_access = random_access;
+ sync_global_to_guest(vm, perf_test_args.random_access);
+}
+
uint64_t __weak perf_test_nested_pages(int nr_vcpus)
{
return 0;