aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/dirty_log_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/dirty_log_test.c')
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c140
1 files changed, 74 insertions, 66 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index ceb52b952637..5614222a6628 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -19,15 +19,13 @@
#include "kvm_util.h"
#include "processor.h"
-#define DEBUG printf
-
#define VCPU_ID 1
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
-/* Default guest test memory offset, 1G */
-#define DEFAULT_GUEST_TEST_MEM 0x40000000
+/* Default guest test virtual memory offset */
+#define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* How many pages to dirty for each guest loop */
#define TEST_PAGES_PER_LOOP 1024
@@ -38,6 +36,27 @@
/* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL
+/* Dirty bitmaps are always little endian, so we need to swap on big endian */
+#if defined(__s390x__)
+# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+# define test_bit_le(nr, addr) \
+ test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define set_bit_le(nr, addr) \
+ set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define clear_bit_le(nr, addr) \
+ clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define test_and_set_bit_le(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define test_and_clear_bit_le(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+#else
+# define test_bit_le test_bit
+# define set_bit_le set_bit
+# define clear_bit_le clear_bit
+# define test_and_set_bit_le test_and_set_bit
+# define test_and_clear_bit_le test_and_clear_bit
+#endif
+
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
@@ -69,11 +88,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
*/
static void guest_code(void)
{
+ uint64_t addr;
int i;
+ /*
+ * On s390x, all pages of a 1M segment are initially marked as dirty
+ * when a page of the segment is written to for the very first time.
+ * To compensate this specialty in this test, we need to touch all
+ * pages during the first iteration.
+ */
+ for (i = 0; i < guest_num_pages; i++) {
+ addr = guest_test_virt_mem + i * guest_page_size;
+ *(uint64_t *)addr = READ_ONCE(iteration);
+ }
+
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
- uint64_t addr = guest_test_virt_mem;
+ addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr &= ~(host_page_size - 1);
@@ -158,15 +189,15 @@ static void vm_dirty_log_verify(unsigned long *bmap)
value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
- if (test_and_clear_bit(page, host_bmap_track)) {
+ if (test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++;
- TEST_ASSERT(test_bit(page, bmap),
+ TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit "
"set in this iteration but it is missing",
page);
}
- if (test_bit(page, bmap)) {
+ if (test_bit_le(page, bmap)) {
host_dirty_count++;
/*
* If the bit is set, the value written onto
@@ -209,21 +240,19 @@ static void vm_dirty_log_verify(unsigned long *bmap)
* should report its dirtyness in the
* next run
*/
- set_bit(page, host_bmap_track);
+ set_bit_le(page, host_bmap_track);
}
}
}
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
- uint64_t extra_mem_pages, void *guest_code,
- unsigned long type)
+ uint64_t extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
- vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
- O_RDWR, type);
+ vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
#ifdef __x86_64__
vm_create_irqchip(vm);
@@ -232,85 +261,61 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
return vm;
}
+#define DIRTY_MEM_BITS 30 /* 1G */
+#define PAGE_SHIFT_4K 12
+
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
unsigned long interval, uint64_t phys_offset)
{
- unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread;
struct kvm_vm *vm;
- uint64_t max_gfn;
unsigned long *bmap;
- unsigned long type = 0;
-
- switch (mode) {
- case VM_MODE_P52V48_4K:
- guest_pa_bits = 52;
- guest_page_shift = 12;
- break;
- case VM_MODE_P52V48_64K:
- guest_pa_bits = 52;
- guest_page_shift = 16;
- break;
- case VM_MODE_P48V48_4K:
- guest_pa_bits = 48;
- guest_page_shift = 12;
- break;
- case VM_MODE_P48V48_64K:
- guest_pa_bits = 48;
- guest_page_shift = 16;
- break;
- case VM_MODE_P40V48_4K:
- guest_pa_bits = 40;
- guest_page_shift = 12;
- break;
- case VM_MODE_P40V48_64K:
- guest_pa_bits = 40;
- guest_page_shift = 16;
- break;
- default:
- TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
- }
- DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
-#ifdef __x86_64__
/*
- * FIXME
- * The x86_64 kvm selftests framework currently only supports a
- * single PML4 which restricts the number of physical address
- * bits we can change to 39.
+ * We reserve page table for 2 times of extra dirty mem which
+ * will definitely cover the original (1G+) test range. Here
+ * we do the calculation with 4K page size which is the
+ * smallest so the page number will be enough for all archs
+ * (e.g., 64K page size guest will need even less memory for
+ * page tables).
*/
- guest_pa_bits = 39;
-#endif
-#ifdef __aarch64__
- if (guest_pa_bits != 40)
- type = KVM_VM_TYPE_ARM_IPA_SIZE(guest_pa_bits);
-#endif
- max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
- guest_page_size = (1ul << guest_page_shift);
+ vm = create_vm(mode, VCPU_ID,
+ 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
+ guest_code);
+
+ guest_page_size = vm_get_page_size(vm);
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
- guest_num_pages = (1ul << (30 - guest_page_shift)) + 16;
+ guest_num_pages = (1ul << (DIRTY_MEM_BITS -
+ vm_get_page_shift(vm))) + 16;
+#ifdef __s390x__
+ /* Round up to multiple of 1M (segment size) */
+ guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
+#endif
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
if (!phys_offset) {
- guest_test_phys_mem = (max_gfn - guest_num_pages) * guest_page_size;
+ guest_test_phys_mem = (vm_get_max_gfn(vm) -
+ guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
} else {
guest_test_phys_mem = phys_offset;
}
+#ifdef __s390x__
+ /* Align to 1M (segment size) */
+ guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+
DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages);
host_bmap_track = bitmap_alloc(host_num_pages);
- vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code, type);
-
#ifdef USE_CLEAR_DIRTY_LOG
struct kvm_enable_cap cap = {};
@@ -337,7 +342,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif
#ifdef __aarch64__
- ucall_init(vm, UCALL_MMIO, NULL);
+ ucall_init(vm, NULL);
#endif
/* Export the shared variables to the guest */
@@ -440,7 +445,7 @@ int main(int argc, char *argv[])
#endif
#ifdef __x86_64__
- vm_guest_mode_params_init(VM_MODE_P52V48_4K, true, true);
+ vm_guest_mode_params_init(VM_MODE_PXXV48_4K, true, true);
#endif
#ifdef __aarch64__
vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
@@ -454,6 +459,9 @@ int main(int argc, char *argv[])
vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true);
}
#endif
+#ifdef __s390x__
+ vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
+#endif
while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
switch (opt) {