diff options
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/gic.c | 66 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/gic_private.h | 11 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 206 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/gic_v3.h | 70 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/processor.c | 82 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/aarch64/vgic.c | 103 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/guest_modes.c | 59 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 126 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/riscv/processor.c | 362 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/riscv/ucall.c | 87 | ||||
| -rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/processor.c | 95 | 
11 files changed, 1134 insertions, 133 deletions
| diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic.c b/tools/testing/selftests/kvm/lib/aarch64/gic.c index fff4fc27504d..55668631d546 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic.c @@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid)  	GUEST_ASSERT(gic_common_ops);  	gic_common_ops->gic_write_eoir(intid);  } + +void gic_set_dir(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_write_dir(intid); +} + +void gic_set_eoi_split(bool split) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_set_eoi_split(split); +} + +void gic_set_priority_mask(uint64_t pmr) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_set_priority_mask(pmr); +} + +void gic_set_priority(unsigned int intid, unsigned int prio) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_set_priority(intid, prio); +} + +void gic_irq_set_active(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_irq_set_active(intid); +} + +void gic_irq_clear_active(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_irq_clear_active(intid); +} + +bool gic_irq_get_active(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	return gic_common_ops->gic_irq_get_active(intid); +} + +void gic_irq_set_pending(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_irq_set_pending(intid); +} + +void gic_irq_clear_pending(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_irq_clear_pending(intid); +} + +bool gic_irq_get_pending(unsigned int intid) +{ +	GUEST_ASSERT(gic_common_ops); +	return gic_common_ops->gic_irq_get_pending(intid); +} + +void gic_irq_set_config(unsigned int intid, bool is_edge) +{ +	GUEST_ASSERT(gic_common_ops); +	gic_common_ops->gic_irq_set_config(intid, is_edge); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h index d81d739433dc..75d07313c893 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h @@ -14,6 +14,17 @@ struct gic_common_ops {  	void (*gic_irq_disable)(unsigned int intid);  	uint64_t (*gic_read_iar)(void);  	void (*gic_write_eoir)(uint32_t irq); +	void (*gic_write_dir)(uint32_t irq); +	void (*gic_set_eoi_split)(bool split); +	void (*gic_set_priority_mask)(uint64_t mask); +	void (*gic_set_priority)(uint32_t intid, uint32_t prio); +	void (*gic_irq_set_active)(uint32_t intid); +	void (*gic_irq_clear_active)(uint32_t intid); +	bool (*gic_irq_get_active)(uint32_t intid); +	void (*gic_irq_set_pending)(uint32_t intid); +	void (*gic_irq_clear_pending)(uint32_t intid); +	bool (*gic_irq_get_pending)(uint32_t intid); +	void (*gic_irq_set_config)(uint32_t intid, bool is_edge);  };  extern const struct gic_common_ops gicv3_ops; diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index 2dbf3339b62e..00f613c0583c 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -19,7 +19,8 @@ struct gicv3_data {  	unsigned int nr_spis;  }; -#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define sgi_base_from_redist(redist_base) 	(redist_base + SZ_64K) +#define DIST_BIT				(1U << 31)  enum gicv3_intid_range {  	SGI_RANGE, @@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base)  	}  } +static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) +{ +	if (cpu_or_dist & DIST_BIT) +		gicv3_gicd_wait_for_rwp(); +	else +		gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]); +} +  static enum gicv3_intid_range get_intid_range(unsigned int intid)  {  	switch (intid) { @@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq)  	isb();  } -static void -gicv3_config_irq(unsigned int intid, unsigned int offset) +static void gicv3_write_dir(uint32_t irq) +{ +	write_sysreg_s(irq, SYS_ICC_DIR_EL1); +	isb(); +} + +static void gicv3_set_priority_mask(uint64_t mask) +{ +	write_sysreg_s(mask, SYS_ICC_PMR_EL1); +} + +static void gicv3_set_eoi_split(bool split) +{ +	uint32_t val; + +	/* All other fields are read-only, so no need to read CTLR first. In +	 * fact, the kernel does the same. +	 */ +	val = split ? (1U << 1) : 0; +	write_sysreg_s(val, SYS_ICC_CTLR_EL1); +	isb(); +} + +uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) +{ +	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base +		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); +	return readl(base + offset); +} + +void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) +{ +	void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base +		: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); +	writel(reg_val, base + offset); +} + +uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) +{ +	return gicv3_reg_readl(cpu_or_dist, offset) & mask; +} + +void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, +		uint32_t mask, uint32_t reg_val) +{ +	uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; + +	tmp |= (reg_val & mask); +	gicv3_reg_writel(cpu_or_dist, offset, tmp); +} + +/* + * We use a single offset for the distributor and redistributor maps as they + * have the same value in both. The only exceptions are registers that only + * exist in one and not the other, like GICR_WAKER that doesn't exist in the + * distributor map. Such registers are conveniently marked as reserved in the + * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being + * marked as "Reserved" in the Distributor map. + */ +static void gicv3_access_reg(uint32_t intid, uint64_t offset, +		uint32_t reg_bits, uint32_t bits_per_field, +		bool write, uint32_t *val)  {  	uint32_t cpu = guest_get_vcpuid(); -	uint32_t mask = 1 << (intid % 32);  	enum gicv3_intid_range intid_range = get_intid_range(intid); -	void *reg; - -	/* We care about 'cpu' only for SGIs or PPIs */ -	if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) { -		GUEST_ASSERT(cpu < gicv3_data.nr_cpus); - -		reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) + -			offset; -		writel(mask, reg); -		gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]); -	} else if (intid_range == SPI_RANGE) { -		reg = gicv3_data.dist_base + offset + (intid / 32) * 4; -		writel(mask, reg); -		gicv3_gicd_wait_for_rwp(); -	} else { -		GUEST_ASSERT(0); -	} +	uint32_t fields_per_reg, index, mask, shift; +	uint32_t cpu_or_dist; + +	GUEST_ASSERT(bits_per_field <= reg_bits); +	GUEST_ASSERT(*val < (1U << bits_per_field)); +	/* Some registers like IROUTER are 64 bit long. Those are currently not +	 * supported by readl nor writel, so just asserting here until then. +	 */ +	GUEST_ASSERT(reg_bits == 32); + +	fields_per_reg = reg_bits / bits_per_field; +	index = intid % fields_per_reg; +	shift = index * bits_per_field; +	mask = ((1U << bits_per_field) - 1) << shift; + +	/* Set offset to the actual register holding intid's config. */ +	offset += (intid / fields_per_reg) * (reg_bits / 8); + +	cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu; + +	if (write) +		gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift); +	*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; +} + +static void gicv3_write_reg(uint32_t intid, uint64_t offset, +		uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) +{ +	gicv3_access_reg(intid, offset, reg_bits, +			bits_per_field, true, &val); +} + +static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, +		uint32_t reg_bits, uint32_t bits_per_field) +{ +	uint32_t val; + +	gicv3_access_reg(intid, offset, reg_bits, +			bits_per_field, false, &val); +	return val; +} + +static void gicv3_set_priority(uint32_t intid, uint32_t prio) +{ +	gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); +} + +/* Sets the intid to be level-sensitive or edge-triggered. */ +static void gicv3_irq_set_config(uint32_t intid, bool is_edge) +{ +	uint32_t val; + +	/* N/A for private interrupts. */ +	GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); +	val = is_edge ? 2 : 0; +	gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); +} + +static void gicv3_irq_enable(uint32_t intid) +{ +	bool is_spi = get_intid_range(intid) == SPI_RANGE; +	uint32_t cpu = guest_get_vcpuid(); + +	gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); +	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); +} + +static void gicv3_irq_disable(uint32_t intid) +{ +	bool is_spi = get_intid_range(intid) == SPI_RANGE; +	uint32_t cpu = guest_get_vcpuid(); + +	gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); +	gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); +} + +static void gicv3_irq_set_active(uint32_t intid) +{ +	gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); +} + +static void gicv3_irq_clear_active(uint32_t intid) +{ +	gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); +} + +static bool gicv3_irq_get_active(uint32_t intid) +{ +	return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); +} + +static void gicv3_irq_set_pending(uint32_t intid) +{ +	gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);  } -static void gicv3_irq_enable(unsigned int intid) +static void gicv3_irq_clear_pending(uint32_t intid)  { -	gicv3_config_irq(intid, GICD_ISENABLER); +	gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);  } -static void gicv3_irq_disable(unsigned int intid) +static bool gicv3_irq_get_pending(uint32_t intid)  { -	gicv3_config_irq(intid, GICD_ICENABLER); +	return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);  }  static void gicv3_enable_redist(void *redist_base) @@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = {  	.gic_irq_disable = gicv3_irq_disable,  	.gic_read_iar = gicv3_read_iar,  	.gic_write_eoir = gicv3_write_eoir, +	.gic_write_dir = gicv3_write_dir, +	.gic_set_priority_mask = gicv3_set_priority_mask, +	.gic_set_eoi_split = gicv3_set_eoi_split, +	.gic_set_priority = gicv3_set_priority, +	.gic_irq_set_active = gicv3_irq_set_active, +	.gic_irq_clear_active = gicv3_irq_clear_active, +	.gic_irq_get_active = gicv3_irq_get_active, +	.gic_irq_set_pending = gicv3_irq_set_pending, +	.gic_irq_clear_pending = gicv3_irq_clear_pending, +	.gic_irq_get_pending = gicv3_irq_get_pending, +	.gic_irq_set_config = gicv3_irq_set_config,  }; diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h deleted file mode 100644 index b51536d469a6..000000000000 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ARM Generic Interrupt Controller (GIC) v3 specific defines - */ - -#ifndef SELFTEST_KVM_GICV3_H -#define SELFTEST_KVM_GICV3_H - -#include <asm/sysreg.h> - -/* - * Distributor registers - */ -#define GICD_CTLR			0x0000 -#define GICD_TYPER			0x0004 -#define GICD_IGROUPR			0x0080 -#define GICD_ISENABLER			0x0100 -#define GICD_ICENABLER			0x0180 -#define GICD_ICACTIVER			0x0380 -#define GICD_IPRIORITYR			0x0400 - -/* - * The assumption is that the guest runs in a non-secure mode. - * The following bits of GICD_CTLR are defined accordingly. - */ -#define GICD_CTLR_RWP			(1U << 31) -#define GICD_CTLR_nASSGIreq		(1U << 8) -#define GICD_CTLR_ARE_NS		(1U << 4) -#define GICD_CTLR_ENABLE_G1A		(1U << 1) -#define GICD_CTLR_ENABLE_G1		(1U << 0) - -#define GICD_TYPER_SPIS(typer)		((((typer) & 0x1f) + 1) * 32) -#define GICD_INT_DEF_PRI_X4		0xa0a0a0a0 - -/* - * Redistributor registers - */ -#define GICR_CTLR			0x000 -#define GICR_WAKER			0x014 - -#define GICR_CTLR_RWP			(1U << 3) - -#define GICR_WAKER_ProcessorSleep	(1U << 1) -#define GICR_WAKER_ChildrenAsleep	(1U << 2) - -/* - * Redistributor registers, offsets from SGI base - */ -#define GICR_IGROUPR0			GICD_IGROUPR -#define GICR_ISENABLER0			GICD_ISENABLER -#define GICR_ICENABLER0			GICD_ICENABLER -#define GICR_ICACTIVER0			GICD_ICACTIVER -#define GICR_IPRIORITYR0		GICD_IPRIORITYR - -/* CPU interface registers */ -#define SYS_ICC_PMR_EL1			sys_reg(3, 0, 4, 6, 0) -#define SYS_ICC_IAR1_EL1		sys_reg(3, 0, 12, 12, 0) -#define SYS_ICC_EOIR1_EL1		sys_reg(3, 0, 12, 12, 1) -#define SYS_ICC_SRE_EL1			sys_reg(3, 0, 12, 12, 5) -#define SYS_ICC_GRPEN1_EL1		sys_reg(3, 0, 12, 12, 7) - -#define ICC_PMR_DEF_PRIO		0xf0 - -#define ICC_SRE_EL1_SRE			(1U << 0) - -#define ICC_IGRPEN1_EL1_ENABLE		(1U << 0) - -#define GICV3_MAX_CPUS			512 - -#endif /* SELFTEST_KVM_GICV3_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index b4eeeafd2a70..9343d82519b4 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -8,6 +8,7 @@  #include <linux/compiler.h>  #include <assert.h> +#include "guest_modes.h"  #include "kvm_util.h"  #include "../kvm_util_internal.h"  #include "processor.h" @@ -237,6 +238,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init  	get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);  	get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); +	/* Configure base granule size */  	switch (vm->mode) {  	case VM_MODE_P52V48_4K:  		TEST_FAIL("AArch64 does not support 4K sized pages " @@ -245,25 +247,47 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init  		TEST_FAIL("AArch64 does not support 4K sized pages "  			  "with ANY-bit physical address ranges");  	case VM_MODE_P52V48_64K: +	case VM_MODE_P48V48_64K: +	case VM_MODE_P40V48_64K: +	case VM_MODE_P36V48_64K:  		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ -		tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ +		break; +	case VM_MODE_P48V48_16K: +	case VM_MODE_P40V48_16K: +	case VM_MODE_P36V48_16K: +	case VM_MODE_P36V47_16K: +		tcr_el1 |= 2ul << 14; /* TG0 = 16KB */  		break;  	case VM_MODE_P48V48_4K: +	case VM_MODE_P40V48_4K: +	case VM_MODE_P36V48_4K:  		tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ -		tcr_el1 |= 5ul << 32; /* IPS = 48 bits */  		break; +	default: +		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); +	} + +	/* Configure output size */ +	switch (vm->mode) { +	case VM_MODE_P52V48_64K: +		tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ +		break; +	case VM_MODE_P48V48_4K: +	case VM_MODE_P48V48_16K:  	case VM_MODE_P48V48_64K: -		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */  		tcr_el1 |= 5ul << 32; /* IPS = 48 bits */  		break;  	case VM_MODE_P40V48_4K: -		tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ -		tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ -		break; +	case VM_MODE_P40V48_16K:  	case VM_MODE_P40V48_64K: -		tcr_el1 |= 1ul << 14; /* TG0 = 64KB */  		tcr_el1 |= 2ul << 32; /* IPS = 40 bits */  		break; +	case VM_MODE_P36V48_4K: +	case VM_MODE_P36V48_16K: +	case VM_MODE_P36V48_64K: +	case VM_MODE_P36V47_16K: +		tcr_el1 |= 1ul << 32; /* IPS = 36 bits */ +		break;  	default:  		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);  	} @@ -432,3 +456,47 @@ uint32_t guest_get_vcpuid(void)  {  	return read_sysreg(tpidr_el1);  } + +void aarch64_get_supported_page_sizes(uint32_t ipa, +				      bool *ps4k, bool *ps16k, bool *ps64k) +{ +	struct kvm_vcpu_init preferred_init; +	int kvm_fd, vm_fd, vcpu_fd, err; +	uint64_t val; +	struct kvm_one_reg reg = { +		.id	= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), +		.addr	= (uint64_t)&val, +	}; + +	kvm_fd = open_kvm_dev_path_or_exit(); +	vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa); +	TEST_ASSERT(vm_fd >= 0, "Can't create VM"); + +	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0); +	TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu"); + +	err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init); +	TEST_ASSERT(err == 0, "Can't get target"); +	err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init); +	TEST_ASSERT(err == 0, "Can't get init vcpu"); + +	err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®); +	TEST_ASSERT(err == 0, "Can't get MMFR0"); + +	*ps4k = ((val >> 28) & 0xf) != 0xf; +	*ps64k = ((val >> 24) & 0xf) == 0; +	*ps16k = ((val >> 20) & 0xf) != 0; + +	close(vcpu_fd); +	close(vm_fd); +	close(kvm_fd); +} + +/* + * arm64 doesn't have a true default mode, so start by computing the + * available IPA space and page sizes early. + */ +void __attribute__((constructor)) init_guest_modes(void) +{ +       guest_modes_append_default(); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index b9b271ff520d..b3a0fca0d780 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -5,11 +5,14 @@  #include <linux/kvm.h>  #include <linux/sizes.h> +#include <asm/kvm_para.h>  #include <asm/kvm.h>  #include "kvm_util.h"  #include "../kvm_util_internal.h"  #include "vgic.h" +#include "gic.h" +#include "gic_v3.h"  /*   * vGIC-v3 default host setup @@ -28,7 +31,7 @@   * redistributor regions of the guest. Since it depends on the number of   * vCPUs for the VM, it must be called after all the vCPUs have been created.   */ -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,  		uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)  {  	int gic_fd; @@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,  	/* Distributor setup */  	gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + +	kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, +			0, &nr_irqs, true); + +	kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, +			KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); +  	kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,  			KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);  	nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); @@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,  	return gic_fd;  } + +/* should only work for level sensitive interrupts */ +int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ +	uint64_t attr = 32 * (intid / 32); +	uint64_t index = intid % 32; +	uint64_t val; +	int ret; + +	ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, +				 attr, &val, false); +	if (ret != 0) +		return ret; + +	val |= 1U << index; +	ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, +				 attr, &val, true); +	return ret; +} + +void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ +	int ret = _kvm_irq_set_level_info(gic_fd, intid, level); + +	TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, " +			"rc: %i errno: %i", ret, errno); +} + +int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ +	uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + +	TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " +		"doesn't allow injecting SGIs. There's no mask for it."); + +	if (INTID_IS_PPI(intid)) +		irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT; +	else +		irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT; + +	return _kvm_irq_line(vm, irq, level); +} + +void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ +	int ret = _kvm_arm_irq_line(vm, intid, level); + +	TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", +			ret, errno); +} + +static void vgic_poke_irq(int gic_fd, uint32_t intid, +		uint32_t vcpu, uint64_t reg_off) +{ +	uint64_t reg = intid / 32; +	uint64_t index = intid % 32; +	uint64_t attr = reg_off + reg * 4; +	uint64_t val; +	bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); + +	/* Check that the addr part of the attr is within 32 bits. */ +	assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK); + +	uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS +					  : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; + +	if (intid_is_private) { +		/* TODO: only vcpu 0 implemented for now. */ +		assert(vcpu == 0); +		attr += SZ_64K; +	} + +	/* All calls will succeed, even with invalid intid's, as long as the +	 * addr part of the attr is within 32 bits (checked above). An invalid +	 * intid will just make the read/writes point to above the intended +	 * register space (i.e., ICPENDR after ISPENDR). +	 */ +	kvm_device_access(gic_fd, group, attr, &val, false); +	val |= 1ULL << index; +	kvm_device_access(gic_fd, group, attr, &val, true); +} + +void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu) +{ +	vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); +} + +void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu) +{ +	vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); +} diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index c330f414ef96..8784013b747c 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -4,22 +4,59 @@   */  #include "guest_modes.h" +#ifdef __aarch64__ +#include "processor.h" +enum vm_guest_mode vm_mode_default; +#endif +  struct guest_mode guest_modes[NUM_VM_MODES];  void guest_modes_append_default(void)  { +#ifndef __aarch64__  	guest_mode_append(VM_MODE_DEFAULT, true, true); - -#ifdef __aarch64__ -	guest_mode_append(VM_MODE_P40V48_64K, true, true); +#else  	{  		unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); +		bool ps4k, ps16k, ps64k; +		int i; + +		aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k); + +		vm_mode_default = NUM_VM_MODES; +  		if (limit >= 52) -			guest_mode_append(VM_MODE_P52V48_64K, true, true); +			guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k);  		if (limit >= 48) { -			guest_mode_append(VM_MODE_P48V48_4K, true, true); -			guest_mode_append(VM_MODE_P48V48_64K, true, true); +			guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k); +			guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k); +			guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k); +		} +		if (limit >= 40) { +			guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k); +			guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k); +			guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k); +			if (ps4k) +				vm_mode_default = VM_MODE_P40V48_4K;  		} +		if (limit >= 36) { +			guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k); +			guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k); +			guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k); +			guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k); +		} + +		/* +		 * Pick the first supported IPA size if the default +		 * isn't available. +		 */ +		for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) { +			if (guest_modes[i].supported && guest_modes[i].enabled) +				vm_mode_default = i; +		} + +		TEST_ASSERT(vm_mode_default != NUM_VM_MODES, +			    "No supported mode!");  	}  #endif  #ifdef __s390x__ @@ -38,6 +75,16 @@ void guest_modes_append_default(void)  			guest_mode_append(VM_MODE_P47V64_4K, true, true);  	}  #endif +#ifdef __riscv +	{ +		unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS); + +		if (sz >= 52) +			guest_mode_append(VM_MODE_P52V48_4K, true, true); +		if (sz >= 48) +			guest_mode_append(VM_MODE_P48V48_4K, true, true); +	} +#endif  }  void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 53d2b5d04b82..4a645dc77f34 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -85,6 +85,33 @@ int kvm_check_cap(long cap)  	return ret;  } +/* VM Check Capability + * + * Input Args: + *   vm - Virtual Machine + *   cap - Capability + * + * Output Args: None + * + * Return: + *   On success, the Value corresponding to the capability (KVM_CAP_*) + *   specified by the value of cap.  On failure a TEST_ASSERT failure + *   is produced. + * + * Looks up and returns the value corresponding to the capability + * (KVM_CAP_*) given by cap. + */ +int vm_check_cap(struct kvm_vm *vm, long cap) +{ +	int ret; + +	ret = ioctl(vm->fd, KVM_CHECK_EXTENSION, cap); +	TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION VM IOCTL failed,\n" +		"  rc: %i errno: %i", ret, errno); + +	return ret; +} +  /* VM Enable Capability   *   * Input Args: @@ -166,12 +193,18 @@ const char *vm_guest_mode_string(uint32_t i)  		[VM_MODE_P52V48_4K]	= "PA-bits:52,  VA-bits:48,  4K pages",  		[VM_MODE_P52V48_64K]	= "PA-bits:52,  VA-bits:48, 64K pages",  		[VM_MODE_P48V48_4K]	= "PA-bits:48,  VA-bits:48,  4K pages", +		[VM_MODE_P48V48_16K]	= "PA-bits:48,  VA-bits:48, 16K pages",  		[VM_MODE_P48V48_64K]	= "PA-bits:48,  VA-bits:48, 64K pages",  		[VM_MODE_P40V48_4K]	= "PA-bits:40,  VA-bits:48,  4K pages", +		[VM_MODE_P40V48_16K]	= "PA-bits:40,  VA-bits:48, 16K pages",  		[VM_MODE_P40V48_64K]	= "PA-bits:40,  VA-bits:48, 64K pages",  		[VM_MODE_PXXV48_4K]	= "PA-bits:ANY, VA-bits:48,  4K pages",  		[VM_MODE_P47V64_4K]	= "PA-bits:47,  VA-bits:64,  4K pages",  		[VM_MODE_P44V64_4K]	= "PA-bits:44,  VA-bits:64,  4K pages", +		[VM_MODE_P36V48_4K]	= "PA-bits:36,  VA-bits:48,  4K pages", +		[VM_MODE_P36V48_16K]	= "PA-bits:36,  VA-bits:48, 16K pages", +		[VM_MODE_P36V48_64K]	= "PA-bits:36,  VA-bits:48, 64K pages", +		[VM_MODE_P36V47_16K]	= "PA-bits:36,  VA-bits:47, 16K pages",  	};  	_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,  		       "Missing new mode strings?"); @@ -185,12 +218,18 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {  	[VM_MODE_P52V48_4K]	= { 52, 48,  0x1000, 12 },  	[VM_MODE_P52V48_64K]	= { 52, 48, 0x10000, 16 },  	[VM_MODE_P48V48_4K]	= { 48, 48,  0x1000, 12 }, +	[VM_MODE_P48V48_16K]	= { 48, 48,  0x4000, 14 },  	[VM_MODE_P48V48_64K]	= { 48, 48, 0x10000, 16 },  	[VM_MODE_P40V48_4K]	= { 40, 48,  0x1000, 12 }, +	[VM_MODE_P40V48_16K]	= { 40, 48,  0x4000, 14 },  	[VM_MODE_P40V48_64K]	= { 40, 48, 0x10000, 16 },  	[VM_MODE_PXXV48_4K]	= {  0,  0,  0x1000, 12 },  	[VM_MODE_P47V64_4K]	= { 47, 64,  0x1000, 12 },  	[VM_MODE_P44V64_4K]	= { 44, 64,  0x1000, 12 }, +	[VM_MODE_P36V48_4K]	= { 36, 48,  0x1000, 12 }, +	[VM_MODE_P36V48_16K]	= { 36, 48,  0x4000, 14 }, +	[VM_MODE_P36V48_64K]	= { 36, 48, 0x10000, 16 }, +	[VM_MODE_P36V47_16K]	= { 36, 47,  0x4000, 14 },  };  _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,  	       "Missing new mode params?"); @@ -252,9 +291,19 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)  		vm->pgtable_levels = 3;  		break;  	case VM_MODE_P40V48_4K: +	case VM_MODE_P36V48_4K:  		vm->pgtable_levels = 4;  		break;  	case VM_MODE_P40V48_64K: +	case VM_MODE_P36V48_64K: +		vm->pgtable_levels = 3; +		break; +	case VM_MODE_P48V48_16K: +	case VM_MODE_P40V48_16K: +	case VM_MODE_P36V48_16K: +		vm->pgtable_levels = 4; +		break; +	case VM_MODE_P36V47_16K:  		vm->pgtable_levels = 3;  		break;  	case VM_MODE_PXXV48_4K: @@ -344,6 +393,11 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,  	struct kvm_vm *vm;  	int i; +	/* +	 * Permission needs to be requested before KVM_SET_CPUID2. +	 */ +	vm_xsave_req_perm(); +  	/* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */  	if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)  		slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES; @@ -2087,6 +2141,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,  }  /* + * IRQ related functions. + */ + +int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ +	struct kvm_irq_level irq_level = { +		.irq    = irq, +		.level  = level, +	}; + +	return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); +} + +void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ +	int ret = _kvm_irq_line(vm, irq, level); + +	TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno); +} + +struct kvm_irq_routing *kvm_gsi_routing_create(void) +{ +	struct kvm_irq_routing *routing; +	size_t size; + +	size = sizeof(struct kvm_irq_routing); +	/* Allocate space for the max number of entries: this wastes 196 KBs. */ +	size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); +	routing = calloc(1, size); +	assert(routing); + +	return routing; +} + +void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, +		uint32_t gsi, uint32_t pin) +{ +	int i; + +	assert(routing); +	assert(routing->nr < KVM_MAX_IRQ_ROUTES); + +	i = routing->nr; +	routing->entries[i].gsi = gsi; +	routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; +	routing->entries[i].flags = 0; +	routing->entries[i].u.irqchip.irqchip = 0; +	routing->entries[i].u.irqchip.pin = pin; +	routing->nr++; +} + +int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ +	int ret; + +	assert(routing); +	ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing); +	free(routing); + +	return ret; +} + +void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ +	int ret; + +	ret = _kvm_gsi_routing_write(vm, routing); +	TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i", +				ret, errno); +} + +/*   * VM Dump   *   * Input Args: diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c new file mode 100644 index 000000000000..d377f2603d98 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V code + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include <linux/compiler.h> +#include <assert.h> + +#include "kvm_util.h" +#include "../kvm_util_internal.h" +#include "processor.h" + +#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN	0xac0000 + +static uint64_t page_align(struct kvm_vm *vm, uint64_t v) +{ +	return (v + vm->page_size) & ~(vm->page_size - 1); +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +{ +	return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << +		PGTBL_PAGE_SIZE_SHIFT; +} + +static uint64_t ptrs_per_pte(struct kvm_vm *vm) +{ +	return PGTBL_PAGE_SIZE / sizeof(uint64_t); +} + +static uint64_t pte_index_mask[] = { +	PGTBL_L0_INDEX_MASK, +	PGTBL_L1_INDEX_MASK, +	PGTBL_L2_INDEX_MASK, +	PGTBL_L3_INDEX_MASK, +}; + +static uint32_t pte_index_shift[] = { +	PGTBL_L0_INDEX_SHIFT, +	PGTBL_L1_INDEX_SHIFT, +	PGTBL_L2_INDEX_SHIFT, +	PGTBL_L3_INDEX_SHIFT, +}; + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +{ +	TEST_ASSERT(level > -1, +		"Negative page table level (%d) not possible", level); +	TEST_ASSERT(level < vm->pgtable_levels, +		"Invalid page table level (%d)", level); + +	return (gva & pte_index_mask[level]) >> pte_index_shift[level]; +} + +void virt_pgd_alloc(struct kvm_vm *vm) +{ +	if (!vm->pgd_created) { +		vm_paddr_t paddr = vm_phy_pages_alloc(vm, +			page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size, +			KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); +		vm->pgd = paddr; +		vm->pgd_created = true; +	} +} + +void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ +	uint64_t *ptep, next_ppn; +	int level = vm->pgtable_levels - 1; + +	TEST_ASSERT((vaddr % vm->page_size) == 0, +		"Virtual address not on page boundary,\n" +		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); +	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, +		(vaddr >> vm->page_shift)), +		"Invalid virtual address, vaddr: 0x%lx", vaddr); +	TEST_ASSERT((paddr % vm->page_size) == 0, +		"Physical address not on page boundary,\n" +		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); +	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, +		"Physical address beyond maximum supported,\n" +		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", +		paddr, vm->max_gfn, vm->page_size); + +	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; +	if (!*ptep) { +		next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; +		*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | +			PGTBL_PTE_VALID_MASK; +	} +	level--; + +	while (level > -1) { +		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + +		       pte_index(vm, vaddr, level) * 8; +		if (!*ptep && level > 0) { +			next_ppn = vm_alloc_page_table(vm) >> +				   PGTBL_PAGE_SIZE_SHIFT; +			*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | +				PGTBL_PTE_VALID_MASK; +		} +		level--; +	} + +	paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; +	*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | +		PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; +} + +vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ +	uint64_t *ptep; +	int level = vm->pgtable_levels - 1; + +	if (!vm->pgd_created) +		goto unmapped_gva; + +	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; +	if (!ptep) +		goto unmapped_gva; +	level--; + +	while (level > -1) { +		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + +		       pte_index(vm, gva, level) * 8; +		if (!ptep) +			goto unmapped_gva; +		level--; +	} + +	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); + +unmapped_gva: +	TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", +		  gva, level); +	exit(1); +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, +		     uint64_t page, int level) +{ +#ifdef DEBUG +	static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; +	uint64_t pte, *ptep; + +	if (level < 0) +		return; + +	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { +		ptep = addr_gpa2hva(vm, pte); +		if (!*ptep) +			continue; +		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", +			type[level], pte, *ptep, ptep); +		pte_dump(stream, vm, indent + 1, +			 pte_addr(vm, *ptep), level - 1); +	} +#endif +} + +void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ +	int level = vm->pgtable_levels - 1; +	uint64_t pgd, *ptep; + +	if (!vm->pgd_created) +		return; + +	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { +		ptep = addr_gpa2hva(vm, pgd); +		if (!*ptep) +			continue; +		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", +			pgd, *ptep, ptep); +		pte_dump(stream, vm, indent + 1, +			 pte_addr(vm, *ptep), level - 1); +	} +} + +void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) +{ +	unsigned long satp; + +	/* +	 * The RISC-V Sv48 MMU mode supports 56-bit physical address +	 * for 48-bit virtual address with 4KB last level page size. +	 */ +	switch (vm->mode) { +	case VM_MODE_P52V48_4K: +	case VM_MODE_P48V48_4K: +	case VM_MODE_P40V48_4K: +		break; +	default: +		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); +	} + +	satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; +	satp |= SATP_MODE_48; + +	set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); +} + +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) +{ +	struct kvm_riscv_core core; + +	get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); +	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); + +	fprintf(stream, +		" MODE:  0x%lx\n", core.mode); +	fprintf(stream, +		" PC: 0x%016lx   RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n", +		core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp); +	fprintf(stream, +		" TP: 0x%016lx   T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n", +		core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2); +	fprintf(stream, +		" S0: 0x%016lx   S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n", +		core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1); +	fprintf(stream, +		" A2: 0x%016lx   A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n", +		core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5); +	fprintf(stream, +		" A6: 0x%016lx   A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n", +		core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3); +	fprintf(stream, +		" S4: 0x%016lx   S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n", +		core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7); +	fprintf(stream, +		" S8: 0x%016lx   S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n", +		core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11); +	fprintf(stream, +		" T3: 0x%016lx   T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n", +		core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6); +} + +static void guest_hang(void) +{ +	while (1) +		; +} + +void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) +{ +	int r; +	size_t stack_size = vm->page_size == 4096 ? +					DEFAULT_STACK_PGS * vm->page_size : +					vm->page_size; +	unsigned long stack_vaddr = vm_vaddr_alloc(vm, stack_size, +					DEFAULT_RISCV_GUEST_STACK_VADDR_MIN); +	unsigned long current_gp = 0; +	struct kvm_mp_state mps; + +	vm_vcpu_add(vm, vcpuid); +	riscv_vcpu_mmu_setup(vm, vcpuid); + +	/* +	 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are +	 * powered-off by default so we ensure that all secondary VCPUs +	 * are powered-on using KVM_SET_MP_STATE ioctl(). +	 */ +	mps.mp_state = KVM_MP_STATE_RUNNABLE; +	r = _vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, &mps); +	TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r); + +	/* Setup global pointer of guest to be same as the host */ +	asm volatile ( +		"add %0, gp, zero" : "=r" (current_gp) : : "memory"); +	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), current_gp); + +	/* Setup stack pointer and program counter of guest */ +	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), +		stack_vaddr + stack_size); +	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), +		(unsigned long)guest_code); + +	/* Setup default exception vector of guest */ +	set_reg(vm, vcpuid, RISCV_CSR_REG(stvec), +		(unsigned long)guest_hang); +} + +void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) +{ +	va_list ap; +	uint64_t id = RISCV_CORE_REG(regs.a0); +	int i; + +	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" +		    "  num: %u\n", num); + +	va_start(ap, num); + +	for (i = 0; i < num; i++) { +		switch (i) { +		case 0: +			id = RISCV_CORE_REG(regs.a0); +			break; +		case 1: +			id = RISCV_CORE_REG(regs.a1); +			break; +		case 2: +			id = RISCV_CORE_REG(regs.a2); +			break; +		case 3: +			id = RISCV_CORE_REG(regs.a3); +			break; +		case 4: +			id = RISCV_CORE_REG(regs.a4); +			break; +		case 5: +			id = RISCV_CORE_REG(regs.a5); +			break; +		case 6: +			id = RISCV_CORE_REG(regs.a6); +			break; +		case 7: +			id = RISCV_CORE_REG(regs.a7); +			break; +		}; +		set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); +	} + +	va_end(ap); +} + +void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) +{ +} diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c new file mode 100644 index 000000000000..9e42d8248fa6 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include <linux/kvm.h> + +#include "kvm_util.h" +#include "../kvm_util_internal.h" +#include "processor.h" + +void ucall_init(struct kvm_vm *vm, void *arg) +{ +} + +void ucall_uninit(struct kvm_vm *vm) +{ +} + +struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, +			unsigned long arg1, unsigned long arg2, +			unsigned long arg3, unsigned long arg4, +			unsigned long arg5) +{ +	register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); +	register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); +	register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); +	register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); +	register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); +	register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); +	register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); +	register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); +	struct sbiret ret; + +	asm volatile ( +		"ecall" +		: "+r" (a0), "+r" (a1) +		: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) +		: "memory"); +	ret.error = a0; +	ret.value = a1; + +	return ret; +} + +void ucall(uint64_t cmd, int nargs, ...) +{ +	struct ucall uc = { +		.cmd = cmd, +	}; +	va_list va; +	int i; + +	nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; + +	va_start(va, nargs); +	for (i = 0; i < nargs; ++i) +		uc.args[i] = va_arg(va, uint64_t); +	va_end(va); + +	sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 0, (vm_vaddr_t)&uc, +		  0, 0, 0, 0, 0); +} + +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) +{ +	struct kvm_run *run = vcpu_state(vm, vcpu_id); +	struct ucall ucall = {}; + +	if (uc) +		memset(uc, 0, sizeof(*uc)); + +	if (run->exit_reason == KVM_EXIT_RISCV_SBI && +	    run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT && +	    run->riscv_sbi.function_id == 0) { +		memcpy(&ucall, addr_gva2hva(vm, run->riscv_sbi.args[0]), +			sizeof(ucall)); + +		vcpu_run_complete_io(vm, vcpu_id); +		if (uc) +			memcpy(uc, &ucall, sizeof(ucall)); +	} + +	return ucall.cmd; +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index eef7b34756d5..babb0f28575c 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -650,6 +650,45 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid)  	vcpu_sregs_set(vm, vcpuid, &sregs);  } +#define CPUID_XFD_BIT (1 << 4) +static bool is_xfd_supported(void) +{ +	int eax, ebx, ecx, edx; +	const int leaf = 0xd, subleaf = 0x1; + +	__asm__ __volatile__( +		"cpuid" +		: /* output */ "=a"(eax), "=b"(ebx), +		  "=c"(ecx), "=d"(edx) +		: /* input */ "0"(leaf), "2"(subleaf)); + +	return !!(eax & CPUID_XFD_BIT); +} + +void vm_xsave_req_perm(void) +{ +	unsigned long bitmask; +	long rc; + +	if (!is_xfd_supported()) +		return; + +	rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, +		     XSTATE_XTILE_DATA_BIT); +	/* +	 * The older kernel version(<5.15) can't support +	 * ARCH_REQ_XCOMP_GUEST_PERM and directly return. +	 */ +	if (rc) +		return; + +	rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask); +	TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc); +	TEST_ASSERT(bitmask & XFEATURE_XTILE_MASK, +		    "prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx", +		    bitmask); +} +  void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)  {  	struct kvm_mp_state mp_state; @@ -1017,21 +1056,6 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)  	sregs_dump(stream, &sregs, indent + 4);  } -struct kvm_x86_state { -	struct kvm_vcpu_events events; -	struct kvm_mp_state mp_state; -	struct kvm_regs regs; -	struct kvm_xsave xsave; -	struct kvm_xcrs xcrs; -	struct kvm_sregs sregs; -	struct kvm_debugregs debugregs; -	union { -		struct kvm_nested_state nested; -		char nested_[16384]; -	}; -	struct kvm_msrs msrs; -}; -  static int kvm_get_num_msrs_fd(int kvm_fd)  {  	struct kvm_msr_list nmsrs; @@ -1069,6 +1093,22 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)  	return list;  } +static int vcpu_save_xsave_state(struct kvm_vm *vm, struct vcpu *vcpu, +				 struct kvm_x86_state *state) +{ +	int size; + +	size = vm_check_cap(vm, KVM_CAP_XSAVE2); +	if (!size) +		size = sizeof(struct kvm_xsave); + +	state->xsave = malloc(size); +	if (size == sizeof(struct kvm_xsave)) +		return ioctl(vcpu->fd, KVM_GET_XSAVE, state->xsave); +	else +		return ioctl(vcpu->fd, KVM_GET_XSAVE2, state->xsave); +} +  struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)  {  	struct vcpu *vcpu = vcpu_find(vm, vcpuid); @@ -1112,7 +1152,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)          TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",                  r); -	r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave); +	r = vcpu_save_xsave_state(vm, vcpu, state);          TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",                  r); @@ -1157,24 +1197,25 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s  	struct vcpu *vcpu = vcpu_find(vm, vcpuid);  	int r; -	r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave); -        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", +	r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); +	TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",                  r); +	r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs); +	TEST_ASSERT(r == state->msrs.nmsrs, +		"Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)", +		r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index); +  	if (kvm_check_cap(KVM_CAP_XCRS)) {  		r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);  		TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",  			    r);  	} -	r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); -        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", +	r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave); +	TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",                  r); -	r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs); -        TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)", -                r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index); -  	r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);          TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",                  r); @@ -1198,6 +1239,12 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s  	}  } +void kvm_x86_state_cleanup(struct kvm_x86_state *state) +{ +	free(state->xsave); +	free(state); +} +  bool is_intel_cpu(void)  {  	int eax, ebx, ecx, edx; |