aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c6
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c2
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c17
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c48
6 files changed, 54 insertions, 37 deletions
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index 562c16dfbb00..f212bd8ab93d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -14,11 +14,13 @@ static vm_vaddr_t *ucall_exit_mmio_addr;
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
- virt_pg_map(vm, mmio_gpa, mmio_gpa);
+ vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+
+ virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa);
+ write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
}
void ucall_arch_do_ucall(vm_vaddr_t uc)
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 820ac2d08c98..266f3876e10a 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -90,6 +90,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
" hdrp->e_shentsize: %x\n"
" expected: %zx",
hdrp->e_shentsize, sizeof(Elf64_Shdr));
+ close(fd);
}
/* VM ELF Load
@@ -190,4 +191,5 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
phdr.p_filesz);
}
}
+ close(fd);
}
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index 99a575bbbc52..1df3ce4b16fd 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -127,7 +127,7 @@ void guest_modes_cmdline(const char *arg)
mode_selected = true;
}
- mode = strtoul(optarg, NULL, 10);
+ mode = atoi_non_negative("Guest mode ID", arg);
TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
guest_modes[mode].enabled = true;
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index c88c3ace16d2..3ea24a5f4c43 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -186,6 +186,15 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
+/*
+ * Initializes vm->vpages_valid to match the canonical VA space of the
+ * architecture.
+ *
+ * The default implementation is valid for architectures which split the
+ * range addressed by a single page table into a low and high region
+ * based on the MSB of the VA. On architectures with this behavior
+ * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
+ */
__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
{
sparsebit_set_num(vm->vpages_valid,
@@ -1416,10 +1425,10 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
while (npages--) {
virt_pg_map(vm, vaddr, paddr);
+ sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
+
vaddr += page_size;
paddr += page_size;
-
- sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
}
@@ -1835,6 +1844,7 @@ static struct exit_reason {
{KVM_EXIT_X86_RDMSR, "RDMSR"},
{KVM_EXIT_X86_WRMSR, "WRMSR"},
{KVM_EXIT_XEN, "XEN"},
+ {KVM_EXIT_HYPERV, "HYPERV"},
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
{KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
#endif
@@ -1932,9 +1942,6 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
}
-/* Arbitrary minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
index 0cc0971ce60e..2f0e2ea941cc 100644
--- a/tools/testing/selftests/kvm/lib/ucall_common.c
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -4,6 +4,8 @@
#include "linux/bitmap.h"
#include "linux/atomic.h"
+#define GUEST_UCALL_FAILED -1
+
struct ucall_header {
DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
struct ucall ucalls[KVM_MAX_VCPUS];
@@ -41,7 +43,8 @@ static struct ucall *ucall_alloc(void)
struct ucall *uc;
int i;
- GUEST_ASSERT(ucall_pool);
+ if (!ucall_pool)
+ goto ucall_failed;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (!test_and_set_bit(i, ucall_pool->in_use)) {
@@ -51,7 +54,13 @@ static struct ucall *ucall_alloc(void)
}
}
- GUEST_ASSERT(0);
+ucall_failed:
+ /*
+ * If the vCPU cannot grab a ucall structure, make a bare ucall with a
+ * magic value to signal to get_ucall() that things went sideways.
+ * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
+ */
+ ucall_arch_do_ucall(GUEST_UCALL_FAILED);
return NULL;
}
@@ -93,6 +102,9 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
addr = ucall_arch_get_ucall(vcpu);
if (addr) {
+ TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
+ "Guest failed to allocate ucall struct");
+
memcpy(uc, addr, sizeof(*uc));
vcpu_run_complete_io(vcpu);
} else {
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c4d368d56cfe..ae1e573d94ce 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -19,6 +19,8 @@
#define MAX_NR_CPUID_ENTRIES 100
vm_vaddr_t exception_handlers;
+bool host_cpu_is_amd;
+bool host_cpu_is_intel;
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
@@ -113,7 +115,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
bool kvm_is_tdp_enabled(void)
{
- if (is_intel_cpu())
+ if (host_cpu_is_intel)
return get_kvm_intel_param_bool("ept");
else
return get_kvm_amd_param_bool("npt");
@@ -555,6 +557,8 @@ static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
void kvm_arch_vm_post_create(struct kvm_vm *vm)
{
vm_create_irqchip(vm);
+ sync_global_to_guest(vm, host_cpu_is_intel);
+ sync_global_to_guest(vm, host_cpu_is_amd);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
@@ -1006,32 +1010,10 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state)
free(state);
}
-static bool cpu_vendor_string_is(const char *vendor)
-{
- const uint32_t *chunk = (const uint32_t *)vendor;
- uint32_t eax, ebx, ecx, edx;
-
- cpuid(0, &eax, &ebx, &ecx, &edx);
- return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
-}
-
-bool is_intel_cpu(void)
-{
- return cpu_vendor_string_is("GenuineIntel");
-}
-
-/*
- * Exclude early K5 samples with a vendor string of "AMDisbetter!"
- */
-bool is_amd_cpu(void)
-{
- return cpu_vendor_string_is("AuthenticAMD");
-}
-
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{
if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
- *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
+ *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
*va_bits = 32;
} else {
*pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
@@ -1162,9 +1144,15 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
{
uint64_t r;
- asm volatile("vmcall"
+ asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t"
+ "jnz 1f\n\t"
+ "vmcall\n\t"
+ "jmp 2f\n\t"
+ "1: vmmcall\n\t"
+ "2:"
: "=a"(r)
- : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
+ : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3),
+ [use_vmmcall] "r" (host_cpu_is_amd));
return r;
}
@@ -1236,7 +1224,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */
- if (!is_amd_cpu())
+ if (!host_cpu_is_amd)
return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */
@@ -1276,3 +1264,9 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
return get_kvm_intel_param_bool("unrestricted_guest");
}
+
+void kvm_selftest_arch_init(void)
+{
+ host_cpu_is_intel = this_cpu_is_intel();
+ host_cpu_is_amd = this_cpu_is_amd();
+}