aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c21
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c3
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c219
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h6
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c82
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c26
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c3
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c22
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/handlers.S81
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c178
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c3
11 files changed, 562 insertions, 82 deletions
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 2afa6618b396..cee92d477dc0 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -5,8 +5,6 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include <linux/compiler.h>
#include "kvm_util.h"
@@ -219,21 +217,6 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- uint64_t ptrs_per_4k_pte = 512;
- uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
- struct kvm_vm *vm;
-
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
@@ -350,3 +333,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_end(ap);
}
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index c8e0ec20d3bf..2f37b90ee1a9 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -94,6 +94,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
+ if (uc)
+ memset(uc, 0, sizeof(*uc));
+
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
vm_vaddr_t gva;
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 3327cebc1095..88ef7067f1e6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -5,6 +5,7 @@
* Copyright (C) 2018, Google LLC.
*/
+#define _GNU_SOURCE /* for program_invocation_name */
#include "test_util.h"
#include "kvm_util.h"
#include "kvm_util_internal.h"
@@ -86,6 +87,44 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
return ret;
}
+/* VCPU Enable Capability
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpu_id - VCPU
+ * cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VCPU.
+ */
+int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
+ struct kvm_enable_cap *cap)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
+ int r;
+
+ TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
+
+ r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
+ TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
+ " rc: %i, errno: %i", r, errno);
+
+ return r;
+}
+
+void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
+{
+ struct kvm_enable_cap cap = { 0 };
+
+ cap.cap = KVM_CAP_DIRTY_LOG_RING;
+ cap.args[0] = ring_size;
+ vm_enable_cap(vm, &cap);
+ vm->dirty_ring_size = ring_size;
+}
+
static void vm_open(struct kvm_vm *vm, int perm)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -152,7 +191,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params)
* descriptor to control the created VM is created with the permissions
* given by perm (e.g. O_RDWR).
*/
-struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
{
struct kvm_vm *vm;
@@ -243,9 +282,61 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+ void *guest_code, uint32_t vcpuids[])
+{
+ /* The maximum page table size for a memory region will be when the
+ * smallest pages are used. Considering each page contains x page
+ * table descriptors, the total extra size for page tables (for extra
+ * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
+ * than N/x*2.
+ */
+ uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
+ struct kvm_vm *vm;
+ int i;
+
+ TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
+ "nr_vcpus = %d too large for host, max-vcpus = %d",
+ nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
+
+ pages = vm_adjust_num_guest_pages(mode, pages);
+ vm = vm_create(mode, pages, O_RDWR);
+
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+
+#ifdef __x86_64__
+ vm_create_irqchip(vm);
+#endif
+
+ for (i = 0; i < nr_vcpus; ++i) {
+ uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
+
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+
+#ifdef __x86_64__
+ vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+#endif
+ }
+
+ return vm;
+}
+
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
{
- return _vm_create(mode, phy_pages, perm);
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
+ num_percpu_pages, guest_code, vcpuids);
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+ void *guest_code)
+{
+ return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
+ (uint32_t []){ vcpuid });
}
/*
@@ -305,6 +396,11 @@ void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
__func__, strerror(-ret));
}
+uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
+{
+ return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS);
+}
+
/*
* Userspace Memory Region Find
*
@@ -409,10 +505,17 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
*
* Removes a vCPU from a VM and frees its resources.
*/
-static void vm_vcpu_rm(struct vcpu *vcpu)
+static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
{
int ret;
+ if (vcpu->dirty_gfns) {
+ ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
+ TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, "
+ "rc: %i errno: %i", ret, errno);
+ vcpu->dirty_gfns = NULL;
+ }
+
ret = munmap(vcpu->state, sizeof(*vcpu->state));
TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
"errno: %i", ret, errno);
@@ -430,7 +533,7 @@ void kvm_vm_release(struct kvm_vm *vmp)
int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
- vm_vcpu_rm(vcpu);
+ vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
@@ -1204,9 +1307,21 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
do {
rc = ioctl(vcpu->fd, KVM_RUN, NULL);
} while (rc == -1 && errno == EINTR);
+
+ assert_on_unhandled_exception(vm, vcpuid);
+
return rc;
}
+int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ return vcpu->fd;
+}
+
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
@@ -1261,6 +1376,35 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
}
/*
+ * VM VCPU Get Reg List
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ *
+ * Output Args:
+ * None
+ *
+ * Return:
+ * A pointer to an allocated struct kvm_reg_list
+ *
+ * Get the list of guest registers which are supported for
+ * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
+ */
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
+ int ret;
+
+ ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n);
+ TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
+ reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
+ reg_list->n = reg_list_n.n;
+ vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
+ return reg_list;
+}
+
+/*
* VM VCPU Regs Get
*
* Input Args:
@@ -1506,6 +1650,42 @@ int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
return ret;
}
+void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu;
+ uint32_t size = vm->dirty_ring_size;
+
+ TEST_ASSERT(size > 0, "Should enable dirty ring first");
+
+ vcpu = vcpu_find(vm, vcpuid);
+
+ TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid);
+
+ if (!vcpu->dirty_gfns) {
+ void *addr;
+
+ addr = mmap(NULL, size, PROT_READ,
+ MAP_PRIVATE, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
+
+ addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
+
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
+
+ vcpu->dirty_gfns = addr;
+ vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
+ }
+
+ return vcpu->dirty_gfns;
+}
+
/*
* VM Ioctl
*
@@ -1528,6 +1708,32 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
}
/*
+ * KVM system ioctl
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * cmd - Ioctl number
+ * arg - Argument to pass to the ioctl
+ *
+ * Return: None
+ *
+ * Issues an arbitrary ioctl on a KVM fd.
+ */
+void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
+{
+ int ret;
+
+ ret = ioctl(vm->kvm_fd, cmd, arg);
+ TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)",
+ cmd, ret, errno, strerror(errno));
+}
+
+int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
+{
+ return ioctl(vm->kvm_fd, cmd, arg);
+}
+
+/*
* VM Dump
*
* Input Args:
@@ -1599,6 +1805,9 @@ static struct exit_reason {
{KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
{KVM_EXIT_OSI, "OSI"},
{KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
+ {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
+ {KVM_EXIT_X86_RDMSR, "RDMSR"},
+ {KVM_EXIT_X86_WRMSR, "WRMSR"},
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
{KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
#endif
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 2ef446520748..34465dc562d8 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -28,6 +28,9 @@ struct vcpu {
uint32_t id;
int fd;
struct kvm_run *state;
+ struct kvm_dirty_gfn *dirty_gfns;
+ uint32_t fetch_index;
+ uint32_t dirty_gfns_count;
};
struct kvm_vm {
@@ -50,6 +53,9 @@ struct kvm_vm {
vm_paddr_t pgd;
vm_vaddr_t gdt;
vm_vaddr_t tss;
+ vm_vaddr_t idt;
+ vm_vaddr_t handlers;
+ uint32_t dirty_ring_size;
};
struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
diff --git a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
new file mode 100644
index 000000000000..86b9e611ad87
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test handler for the s390x DIAGNOSE 0x0318 instruction.
+ *
+ * Copyright (C) 2020, IBM
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+
+#define VCPU_ID 6
+
+#define ICPT_INSTRUCTION 0x04
+#define IPA0_DIAG 0x8300
+
+static void guest_code(void)
+{
+ uint64_t diag318_info = 0x12345678;
+
+ asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
+}
+
+/*
+ * The DIAGNOSE 0x0318 instruction call must be handled via userspace. As such,
+ * we create an ad-hoc VM here to handle the instruction then extract the
+ * necessary data. It is up to the caller to decide what to do with that data.
+ */
+static uint64_t diag318_handler(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ uint64_t reg;
+ uint64_t diag318_info;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vcpu_run(vm, VCPU_ID);
+ run = vcpu_state(vm, VCPU_ID);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
+ "DIAGNOSE 0x0318 instruction was not intercepted");
+ TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION,
+ "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode);
+ TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG,
+ "Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00));
+
+ reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ diag318_info = run->s.regs.gprs[reg];
+
+ TEST_ASSERT(diag318_info != 0, "DIAGNOSE 0x0318 info not set");
+
+ kvm_vm_free(vm);
+
+ return diag318_info;
+}
+
+uint64_t get_diag318_info(void)
+{
+ static uint64_t diag318_info;
+ static bool printed_skip;
+
+ /*
+ * If KVM does not support diag318, then return 0 to
+ * ensure tests do not break.
+ */
+ if (!kvm_check_cap(KVM_CAP_S390_DIAG318)) {
+ if (!printed_skip) {
+ fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. "
+ "Skipping diag318 test.\n");
+ printed_skip = true;
+ }
+ return 0;
+ }
+
+ /*
+ * If a test has previously requested the diag318 info,
+ * then don't bother spinning up a temporary VM again.
+ */
+ if (!diag318_info)
+ diag318_info = diag318_handler();
+
+ return diag318_info;
+}
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index a88c5d665725..0152f356c099 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -5,8 +5,6 @@
* Copyright (C) 2019, Red Hat, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include "processor.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
@@ -160,26 +158,6 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd);
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- /*
- * The additional amount of pages required for the page tables is:
- * 1 * n / 256 + 4 * (n / 256) / 2048 + 4 * (n / 256) / 2048^2 + ...
- * which is definitely smaller than (n / 256) * 2.
- */
- uint64_t extra_pg_pages = extra_mem_pages / 256 * 2;
- struct kvm_vm *vm;
-
- vm = vm_create(VM_MODE_DEFAULT,
- DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
@@ -241,3 +219,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
}
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+}
diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c
index fd589dc9bfab..9d3b0f15249a 100644
--- a/tools/testing/selftests/kvm/lib/s390x/ucall.c
+++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c
@@ -38,6 +38,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
+ if (uc)
+ memset(uc, 0, sizeof(*uc));
+
if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 689e97c27ee2..8e04c0b1608e 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -4,10 +4,13 @@
*
* Copyright (C) 2020, Google LLC.
*/
-#include <stdlib.h>
+
+#include <assert.h>
#include <ctype.h>
#include <limits.h>
-#include <assert.h>
+#include <stdlib.h>
+#include <time.h>
+
#include "test_util.h"
/*
@@ -81,6 +84,21 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
}
+struct timespec timespec_diff_now(struct timespec start)
+{
+ struct timespec end;
+
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ return timespec_sub(end, start);
+}
+
+struct timespec timespec_div(struct timespec ts, int divisor)
+{
+ int64_t ns = timespec_to_ns(ts) / divisor;
+
+ return timespec_add_ns((struct timespec){0}, ns);
+}
+
void print_skip(const char *fmt, ...)
{
va_list ap;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/handlers.S b/tools/testing/selftests/kvm/lib/x86_64/handlers.S
new file mode 100644
index 000000000000..aaf7bc7d2ce1
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/handlers.S
@@ -0,0 +1,81 @@
+handle_exception:
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+
+ push %rdi
+ push %rsi
+ push %rbp
+ push %rbx
+ push %rdx
+ push %rcx
+ push %rax
+ mov %rsp, %rdi
+
+ call route_exception
+
+ pop %rax
+ pop %rcx
+ pop %rdx
+ pop %rbx
+ pop %rbp
+ pop %rsi
+ pop %rdi
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+
+ /* Discard vector and error code. */
+ add $16, %rsp
+ iretq
+
+/*
+ * Build the handle_exception wrappers which push the vector/error code on the
+ * stack and an array of pointers to those wrappers.
+ */
+.pushsection .rodata
+.globl idt_handlers
+idt_handlers:
+.popsection
+
+.macro HANDLERS has_error from to
+ vector = \from
+ .rept \to - \from + 1
+ .align 8
+
+ /* Fetch current address and append it to idt_handlers. */
+ current_handler = .
+.pushsection .rodata
+.quad current_handler
+.popsection
+
+ .if ! \has_error
+ pushq $0
+ .endif
+ pushq $vector
+ jmp handle_exception
+ vector = vector + 1
+ .endr
+.endm
+
+.global idt_handler_code
+idt_handler_code:
+ HANDLERS has_error=0 from=0 to=7
+ HANDLERS has_error=1 from=8 to=8
+ HANDLERS has_error=0 from=9 to=9
+ HANDLERS has_error=1 from=10 to=14
+ HANDLERS has_error=0 from=15 to=16
+ HANDLERS has_error=1 from=17 to=17
+ HANDLERS has_error=0 from=18 to=255
+
+.section .note.GNU-stack, "", %progbits
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index f6eb34eaa0d2..95e1a757c629 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -5,16 +5,23 @@
* Copyright (C) 2018, Google LLC.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include "test_util.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"
+#ifndef NUM_INTERRUPTS
+#define NUM_INTERRUPTS 256
+#endif
+
+#define DEFAULT_CODE_SELECTOR 0x8
+#define DEFAULT_DATA_SELECTOR 0x10
+
/* Minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+vm_vaddr_t exception_handlers;
+
/* Virtual translation table structure declarations */
struct pageMapL4Entry {
uint64_t present:1;
@@ -392,11 +399,12 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
desc->limit0 = segp->limit & 0xFFFF;
desc->base0 = segp->base & 0xFFFF;
desc->base1 = segp->base >> 16;
- desc->s = segp->s;
desc->type = segp->type;
+ desc->s = segp->s;
desc->dpl = segp->dpl;
desc->p = segp->present;
desc->limit1 = segp->limit >> 16;
+ desc->avl = segp->avl;
desc->l = segp->l;
desc->db = segp->db;
desc->g = segp->g;
@@ -556,9 +564,9 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
kvm_seg_set_unusable(&sregs.ldt);
- kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs);
- kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.ds);
- kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.es);
+ kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
+ kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
+ kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
break;
@@ -721,36 +729,6 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- struct kvm_vm *vm;
- /*
- * For x86 the maximum page table size for a memory region
- * will be when only 4K pages are used. In that case the
- * total extra size for page tables (for extra N pages) will
- * be: N/512+N/512^2+N/512^3+... which is definitely smaller
- * than N/512*2.
- */
- uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
-
- /* Create VM */
- vm = vm_create(VM_MODE_DEFAULT,
- DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
- O_RDWR);
-
- /* Setup guest code */
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-
- /* Setup IRQ Chip */
- vm_create_irqchip(vm);
-
- /* Add the first vCPU. */
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
/*
* VCPU Get MSR
*
@@ -1118,3 +1096,131 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
*va_bits = (entry->eax >> 8) & 0xff;
}
}
+
+struct idt_entry {
+ uint16_t offset0;
+ uint16_t selector;
+ uint16_t ist : 3;
+ uint16_t : 5;
+ uint16_t type : 4;
+ uint16_t : 1;
+ uint16_t dpl : 2;
+ uint16_t p : 1;
+ uint16_t offset1;
+ uint32_t offset2; uint32_t reserved;
+};
+
+static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
+ int dpl, unsigned short selector)
+{
+ struct idt_entry *base =
+ (struct idt_entry *)addr_gva2hva(vm, vm->idt);
+ struct idt_entry *e = &base[vector];
+
+ memset(e, 0, sizeof(*e));
+ e->offset0 = addr;
+ e->selector = selector;
+ e->ist = 0;
+ e->type = 14;
+ e->dpl = dpl;
+ e->p = 1;
+ e->offset1 = addr >> 16;
+ e->offset2 = addr >> 32;
+}
+
+void kvm_exit_unexpected_vector(uint32_t value)
+{
+ outl(UNEXPECTED_VECTOR_PORT, value);
+}
+
+void route_exception(struct ex_regs *regs)
+{
+ typedef void(*handler)(struct ex_regs *);
+ handler *handlers = (handler *)exception_handlers;
+
+ if (handlers && handlers[regs->vector]) {
+ handlers[regs->vector](regs);
+ return;
+ }
+
+ kvm_exit_unexpected_vector(regs->vector);
+}
+
+void vm_init_descriptor_tables(struct kvm_vm *vm)
+{
+ extern void *idt_handlers;
+ int i;
+
+ vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0);
+ vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0);
+ /* Handlers have the same address in both address spaces.*/
+ for (i = 0; i < NUM_INTERRUPTS; i++)
+ set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
+ DEFAULT_CODE_SELECTOR);
+}
+
+void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct kvm_sregs sregs;
+
+ vcpu_sregs_get(vm, vcpuid, &sregs);
+ sregs.idt.base = vm->idt;
+ sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
+ sregs.gdt.base = vm->gdt;
+ sregs.gdt.limit = getpagesize() - 1;
+ kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
+ vcpu_sregs_set(vm, vcpuid, &sregs);
+ *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+}
+
+void vm_handle_exception(struct kvm_vm *vm, int vector,
+ void (*handler)(struct ex_regs *))
+{
+ vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
+
+ handlers[vector] = (vm_vaddr_t)handler;
+}
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
+ && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
+ && vcpu_state(vm, vcpuid)->io.size == 4) {
+ /* Grab pointer to io data */
+ uint32_t *data = (void *)vcpu_state(vm, vcpuid)
+ + vcpu_state(vm, vcpuid)->io.data_offset;
+
+ TEST_ASSERT(false,
+ "Unexpected vectored event in guest (vector:0x%x)",
+ *data);
+ }
+}
+
+bool set_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 *ent)
+{
+ int i;
+
+ for (i = 0; i < cpuid->nent; i++) {
+ struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
+
+ if (cur->function != ent->function || cur->index != ent->index)
+ continue;
+
+ memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
+ return true;
+ }
+
+ return false;
+}
+
+uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t a3)
+{
+ uint64_t r;
+
+ asm volatile("vmcall"
+ : "=a"(r)
+ : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
+ return r;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
index da4d89ad5419..a3489973e290 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
@@ -40,6 +40,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
+ if (uc)
+ memset(uc, 0, sizeof(*uc));
+
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;