Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | // SPDX-License-Identifier: GPL-2.0 /* * KVM memslot modification stress test * Adapted from demand_paging_test.c * * Copyright (C) 2018, Red Hat, Inc. * Copyright (C) 2020, Google, Inc. */ #define _GNU_SOURCE /* for program_invocation_name */ #include <stdio.h> #include <stdlib.h> #include <sys/syscall.h> #include <unistd.h> #include <asm/unistd.h> #include <time.h> #include <poll.h> #include <pthread.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/userfaultfd.h> #include "perf_test_util.h" #include "processor.h" #include "test_util.h" #include "guest_modes.h" #define DUMMY_MEMSLOT_INDEX 7 #define DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS 10 static int nr_vcpus = 1; static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static bool run_vcpus = true; static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) { struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_run *run; int ret; run = vcpu->run; /* Let the guest access its memory until a stop signal is received */ while (READ_ONCE(run_vcpus)) { ret = _vcpu_run(vcpu); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); if (get_ucall(vcpu, NULL) == UCALL_SYNC) continue; TEST_ASSERT(false, "Invalid guest sync status: exit_reason=%s\n", exit_reason_str(run->exit_reason)); } } struct memslot_antagonist_args { struct kvm_vm *vm; useconds_t delay; uint64_t nr_modifications; }; static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, uint64_t nr_modifications) { uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; uint64_t gpa; int i; /* * Add the dummy memslot just below the perf_test_util memslot, which is * at the top of the guest physical address space. */ gpa = perf_test_args.gpa - pages * vm->page_size; for (i = 0; i < nr_modifications; i++) { usleep(delay); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, DUMMY_MEMSLOT_INDEX, pages, 0); vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX); } } struct test_params { useconds_t memslot_modification_delay; uint64_t nr_memslot_modifications; bool partition_vcpu_memory_access; }; static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; struct kvm_vm *vm; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, VM_MEM_SRC_ANONYMOUS, p->partition_vcpu_memory_access); pr_info("Finished creating vCPUs\n"); perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); pr_info("Started all vCPUs\n"); add_remove_memslot(vm, p->memslot_modification_delay, p->nr_memslot_modifications); run_vcpus = false; perf_test_join_vcpu_threads(nr_vcpus); pr_info("All vCPU threads joined\n"); perf_test_destroy_vm(vm); } static void help(char *name) { puts(""); printf("usage: %s [-h] [-m mode] [-d delay_usec]\n" " [-b memory] [-v vcpus] [-o] [-i iterations]\n", name); guest_modes_help(); printf(" -d: add a delay between each iteration of adding and\n" " deleting a memslot in usec.\n"); printf(" -b: specify the size of the memory region which should be\n" " accessed by each vCPU. e.g. 10M or 3G.\n" " Default: 1G\n"); printf(" -v: specify the number of vCPUs to run.\n"); printf(" -o: Overlap guest memory accesses instead of partitioning\n" " them into a separate region of memory for each vCPU.\n"); printf(" -i: specify the number of iterations of adding and removing\n" " a memslot.\n" " Default: %d\n", DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS); puts(""); exit(0); } int main(int argc, char *argv[]) { int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); int opt; struct test_params p = { .memslot_modification_delay = 0, .nr_memslot_modifications = DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS, .partition_vcpu_memory_access = true }; guest_modes_append_default(); while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) { switch (opt) { case 'm': guest_modes_cmdline(optarg); break; case 'd': p.memslot_modification_delay = strtoul(optarg, NULL, 0); TEST_ASSERT(p.memslot_modification_delay >= 0, "A negative delay is not supported."); break; case 'b': guest_percpu_mem_size = parse_size(optarg); break; case 'v': nr_vcpus = atoi(optarg); TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, "Invalid number of vcpus, must be between 1 and %d", max_vcpus); break; case 'o': p.partition_vcpu_memory_access = false; break; case 'i': p.nr_memslot_modifications = atoi(optarg); break; case 'h': default: help(argv[0]); break; } } for_each_guest_mode(run_test, &p); return 0; } |