diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-04-10 16:17:06 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-15 12:08:47 -0400 |
commit | 5b4f758f454b6e3d9c7182031631c745105ec24b (patch) | |
tree | 6d58f63fdcba9e4dff20f8bb739b9430a211cc35 /tools/testing/selftests/kvm/set_memory_region_test.c | |
parent | 8cc2dd637b890d75613387daf57af7e8f3a32e33 (diff) |
KVM: selftests: Make set_memory_region_test common to all architectures
Make set_memory_region_test available on all architectures by wrapping
the bits that are x86-specific in ifdefs. A future testcase
to create the maximum number of memslots will be architecture
agnostic.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200410231707.7128-10-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools/testing/selftests/kvm/set_memory_region_test.c')
-rw-r--r-- | tools/testing/selftests/kvm/set_memory_region_test.c | 346 |
1 files changed, 346 insertions, 0 deletions
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c new file mode 100644 index 000000000000..ac4945fa1c89 --- /dev/null +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include <fcntl.h> +#include <pthread.h> +#include <sched.h> +#include <semaphore.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include <linux/compiler.h> + +#include <test_util.h> +#include <kvm_util.h> +#include <processor.h> + +#define VCPU_ID 0 + +#ifdef __x86_64__ +/* + * Somewhat arbitrary location and slot, intended to not overlap anything. The + * location and size are specifically 2mb sized/aligned so that the initial + * region corresponds to exactly one large page. + */ +#define MEM_REGION_GPA 0xc0000000 +#define MEM_REGION_SIZE 0x200000 +#define MEM_REGION_SLOT 10 + +static const uint64_t MMIO_VAL = 0xbeefull; + +extern const uint64_t final_rip_start; +extern const uint64_t final_rip_end; + +static sem_t vcpu_ready; + +static inline uint64_t guest_spin_on_val(uint64_t spin_val) +{ + uint64_t val; + + do { + val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); + } while (val == spin_val); + + GUEST_SYNC(0); + return val; +} + +static void *vcpu_worker(void *data) +{ + struct kvm_vm *vm = data; + struct kvm_run *run; + struct ucall uc; + uint64_t cmd; + + /* + * Loop until the guest is done. Re-enter the guest on all MMIO exits, + * which will occur if the guest attempts to access a memslot after it + * has been deleted or while it is being moved . + */ + run = vcpu_state(vm, VCPU_ID); + + while (1) { + vcpu_run(vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_IO) { + cmd = get_ucall(vm, VCPU_ID, &uc); + if (cmd != UCALL_SYNC) + break; + + sem_post(&vcpu_ready); + continue; + } + + if (run->exit_reason != KVM_EXIT_MMIO) + break; + + TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write"); + TEST_ASSERT(run->mmio.len == 8, + "Unexpected exit mmio size = %u", run->mmio.len); + + TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA, + "Unexpected exit mmio address = 0x%llx", + run->mmio.phys_addr); + memcpy(run->mmio.data, &MMIO_VAL, 8); + } + + if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) + TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2]); + + return NULL; +} + +static void wait_for_vcpu(void) +{ + struct timespec ts; + + TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), + "clock_gettime() failed: %d\n", errno); + + ts.tv_sec += 2; + TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), + "sem_timedwait() failed: %d\n", errno); + + /* Wait for the vCPU thread to reenter the guest. */ + usleep(100000); +} + +static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code) +{ + struct kvm_vm *vm; + uint64_t *hva; + uint64_t gpa; + + vm = vm_create_default(VCPU_ID, 0, guest_code); + + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, + MEM_REGION_GPA, MEM_REGION_SLOT, + MEM_REGION_SIZE / getpagesize(), 0); + + /* + * Allocate and map two pages so that the GPA accessed by guest_code() + * stays valid across the memslot move. + */ + gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); + TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); + + virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); + + /* Ditto for the host mapping so that both pages can be zeroed. */ + hva = addr_gpa2hva(vm, MEM_REGION_GPA); + memset(hva, 0, 2 * 4096); + + pthread_create(vcpu_thread, NULL, vcpu_worker, vm); + + /* Ensure the guest thread is spun up. */ + wait_for_vcpu(); + + return vm; +} + + +static void guest_code_move_memory_region(void) +{ + uint64_t val; + + GUEST_SYNC(0); + + /* + * Spin until the memory region is moved to a misaligned address. This + * may or may not trigger MMIO, as the window where the memslot is + * invalid is quite small. + */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val); + + /* Spin until the memory region is realigned. */ + val = guest_spin_on_val(MMIO_VAL); + GUEST_ASSERT_1(val == 1, val); + + GUEST_DONE(); +} + +static void test_move_memory_region(void) +{ + pthread_t vcpu_thread; + struct kvm_vm *vm; + uint64_t *hva; + + vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region); + + hva = addr_gpa2hva(vm, MEM_REGION_GPA); + + /* + * Shift the region's base GPA. The guest should not see "2" as the + * hva->gpa translation is misaligned, i.e. the guest is accessing a + * different host pfn. + */ + vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096); + WRITE_ONCE(*hva, 2); + + /* + * The guest _might_ see an invalid memslot and trigger MMIO, but it's + * a tiny window. Spin and defer the sync until the memslot is + * restored and guest behavior is once again deterministic. + */ + usleep(100000); + + /* + * Note, value in memory needs to be changed *before* restoring the + * memslot, else the guest could race the update and see "2". + */ + WRITE_ONCE(*hva, 1); + + /* Restore the original base, the guest should see "1". */ + vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA); + wait_for_vcpu(); + /* Defered sync from when the memslot was misaligned (above). */ + wait_for_vcpu(); + + pthread_join(vcpu_thread, NULL); + + kvm_vm_free(vm); +} + +static void guest_code_delete_memory_region(void) +{ + uint64_t val; + + GUEST_SYNC(0); + + /* Spin until the memory region is deleted. */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == MMIO_VAL, val); + + /* Spin until the memory region is recreated. */ + val = guest_spin_on_val(MMIO_VAL); + GUEST_ASSERT_1(val == 0, val); + + /* Spin until the memory region is deleted. */ + val = guest_spin_on_val(0); + GUEST_ASSERT_1(val == MMIO_VAL, val); + + asm("1:\n\t" + ".pushsection .rodata\n\t" + ".global final_rip_start\n\t" + "final_rip_start: .quad 1b\n\t" + ".popsection"); + + /* Spin indefinitely (until the code memslot is deleted). */ + guest_spin_on_val(MMIO_VAL); + + asm("1:\n\t" + ".pushsection .rodata\n\t" + ".global final_rip_end\n\t" + "final_rip_end: .quad 1b\n\t" + ".popsection"); + + GUEST_ASSERT_1(0, 0); +} + +static void test_delete_memory_region(void) +{ + pthread_t vcpu_thread; + struct kvm_regs regs; + struct kvm_run *run; + struct kvm_vm *vm; + + vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region); + + /* Delete the memory region, the guest should not die. */ + vm_mem_region_delete(vm, MEM_REGION_SLOT); + wait_for_vcpu(); + + /* Recreate the memory region. The guest should see "0". */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, + MEM_REGION_GPA, MEM_REGION_SLOT, + MEM_REGION_SIZE / getpagesize(), 0); + wait_for_vcpu(); + + /* Delete the region again so that there's only one memslot left. */ + vm_mem_region_delete(vm, MEM_REGION_SLOT); + wait_for_vcpu(); + + /* + * Delete the primary memslot. This should cause an emulation error or + * shutdown due to the page tables getting nuked. + */ + vm_mem_region_delete(vm, 0); + + pthread_join(vcpu_thread, NULL); + + run = vcpu_state(vm, VCPU_ID); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN || + run->exit_reason == KVM_EXIT_INTERNAL_ERROR, + "Unexpected exit reason = %d", run->exit_reason); + + vcpu_regs_get(vm, VCPU_ID, ®s); + + TEST_ASSERT(regs.rip >= final_rip_start && + regs.rip < final_rip_end, + "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n", + final_rip_start, final_rip_end, regs.rip); + + kvm_vm_free(vm); +} + +static void test_zero_memory_regions(void) +{ + struct kvm_run *run; + struct kvm_vm *vm; + + pr_info("Testing KVM_RUN with zero added memory regions\n"); + + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + vm_vcpu_add(vm, VCPU_ID); + + TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64), + "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno); + vcpu_run(vm, VCPU_ID); + + run = vcpu_state(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, + "Unexpected exit_reason = %u\n", run->exit_reason); + + kvm_vm_free(vm); +} +#endif /* __x86_64__ */ + +int main(int argc, char *argv[]) +{ +#ifdef __x86_64__ + int i, loops; +#endif + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + +#ifdef __x86_64__ + /* + * FIXME: the zero-memslot test fails on aarch64 and s390x because + * KVM_RUN fails with ENOEXEC or EFAULT. + */ + test_zero_memory_regions(); + + if (argc > 1) + loops = atoi(argv[1]); + else + loops = 10; + + pr_info("Testing MOVE of in-use region, %d loops\n", loops); + for (i = 0; i < loops; i++) + test_move_memory_region(); + + pr_info("Testing DELETE of in-use region, %d loops\n", loops); + for (i = 0; i < loops; i++) + test_delete_memory_region(); +#endif + + return 0; +} |