diff options
Diffstat (limited to 'arch/riscv/kvm/aia.c')
| -rw-r--r-- | arch/riscv/kvm/aia.c | 274 | 
1 files changed, 272 insertions, 2 deletions
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 4f1286fc7f17..585a3b42c52c 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -8,11 +8,49 @@   */  #include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqdomain.h>  #include <linux/kvm_host.h> +#include <linux/percpu.h> +#include <linux/spinlock.h>  #include <asm/hwcap.h> +#include <asm/kvm_aia_imsic.h> +struct aia_hgei_control { +	raw_spinlock_t lock; +	unsigned long free_bitmap; +	struct kvm_vcpu *owners[BITS_PER_LONG]; +}; +static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei); +static int hgei_parent_irq; + +unsigned int kvm_riscv_aia_nr_hgei; +unsigned int kvm_riscv_aia_max_ids;  DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available); +static int aia_find_hgei(struct kvm_vcpu *owner) +{ +	int i, hgei; +	unsigned long flags; +	struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei); + +	raw_spin_lock_irqsave(&hgctrl->lock, flags); + +	hgei = -1; +	for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) { +		if (hgctrl->owners[i] == owner) { +			hgei = i; +			break; +		} +	} + +	raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +	put_cpu_ptr(&aia_hgei); +	return hgei; +} +  static void aia_set_hvictl(bool ext_irq_pending)  {  	unsigned long hvictl; @@ -56,6 +94,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)  bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)  { +	int hgei;  	unsigned long seip;  	if (!kvm_riscv_aia_available()) @@ -74,6 +113,10 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)  	if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)  		return false; +	hgei = aia_find_hgei(vcpu); +	if (hgei > 0) +		return !!(csr_read(CSR_HGEIP) & BIT(hgei)); +  	return false;  } @@ -323,8 +366,6 @@ static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,  	return KVM_INSN_CONTINUE_NEXT_SEPC;  } -#define IMSIC_FIRST	0x70 -#define IMSIC_LAST	0xff  int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,  				unsigned long *val, unsigned long new_val,  				unsigned long wr_mask) @@ -348,6 +389,143 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,  	return KVM_INSN_EXIT_TO_USER_SPACE;  } +int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, +			     void __iomem **hgei_va, phys_addr_t *hgei_pa) +{ +	int ret = -ENOENT; +	unsigned long flags; +	struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu); + +	if (!kvm_riscv_aia_available() || !hgctrl) +		return -ENODEV; + +	raw_spin_lock_irqsave(&hgctrl->lock, flags); + +	if (hgctrl->free_bitmap) { +		ret = __ffs(hgctrl->free_bitmap); +		hgctrl->free_bitmap &= ~BIT(ret); +		hgctrl->owners[ret] = owner; +	} + +	raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +	/* TODO: To be updated later by AIA IMSIC HW guest file support */ +	if (hgei_va) +		*hgei_va = NULL; +	if (hgei_pa) +		*hgei_pa = 0; + +	return ret; +} + +void kvm_riscv_aia_free_hgei(int cpu, int hgei) +{ +	unsigned long flags; +	struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu); + +	if (!kvm_riscv_aia_available() || !hgctrl) +		return; + +	raw_spin_lock_irqsave(&hgctrl->lock, flags); + +	if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) { +		if (!(hgctrl->free_bitmap & BIT(hgei))) { +			hgctrl->free_bitmap |= BIT(hgei); +			hgctrl->owners[hgei] = NULL; +		} +	} + +	raw_spin_unlock_irqrestore(&hgctrl->lock, flags); +} + +void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable) +{ +	int hgei; + +	if (!kvm_riscv_aia_available()) +		return; + +	hgei = aia_find_hgei(owner); +	if (hgei > 0) { +		if (enable) +			csr_set(CSR_HGEIE, BIT(hgei)); +		else +			csr_clear(CSR_HGEIE, BIT(hgei)); +	} +} + +static irqreturn_t hgei_interrupt(int irq, void *dev_id) +{ +	int i; +	unsigned long hgei_mask, flags; +	struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei); + +	hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE); +	csr_clear(CSR_HGEIE, hgei_mask); + +	raw_spin_lock_irqsave(&hgctrl->lock, flags); + +	for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) { +		if (hgctrl->owners[i]) +			kvm_vcpu_kick(hgctrl->owners[i]); +	} + +	raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +	put_cpu_ptr(&aia_hgei); +	return IRQ_HANDLED; +} + +static int aia_hgei_init(void) +{ +	int cpu, rc; +	struct irq_domain *domain; +	struct aia_hgei_control *hgctrl; + +	/* Initialize per-CPU guest external interrupt line management */ +	for_each_possible_cpu(cpu) { +		hgctrl = per_cpu_ptr(&aia_hgei, cpu); +		raw_spin_lock_init(&hgctrl->lock); +		if (kvm_riscv_aia_nr_hgei) { +			hgctrl->free_bitmap = +				BIT(kvm_riscv_aia_nr_hgei + 1) - 1; +			hgctrl->free_bitmap &= ~BIT(0); +		} else +			hgctrl->free_bitmap = 0; +	} + +	/* Find INTC irq domain */ +	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), +					  DOMAIN_BUS_ANY); +	if (!domain) { +		kvm_err("unable to find INTC domain\n"); +		return -ENOENT; +	} + +	/* Map per-CPU SGEI interrupt from INTC domain */ +	hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT); +	if (!hgei_parent_irq) { +		kvm_err("unable to map SGEI IRQ\n"); +		return -ENOMEM; +	} + +	/* Request per-CPU SGEI interrupt */ +	rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt, +				"riscv-kvm", &aia_hgei); +	if (rc) { +		kvm_err("failed to request SGEI IRQ\n"); +		return rc; +	} + +	return 0; +} + +static void aia_hgei_exit(void) +{ +	/* Free per-CPU SGEI interrupt */ +	free_percpu_irq(hgei_parent_irq, &aia_hgei); +} +  void kvm_riscv_aia_enable(void)  {  	if (!kvm_riscv_aia_available()) @@ -362,21 +540,105 @@ void kvm_riscv_aia_enable(void)  	csr_write(CSR_HVIPRIO1H, 0x0);  	csr_write(CSR_HVIPRIO2H, 0x0);  #endif + +	/* Enable per-CPU SGEI interrupt */ +	enable_percpu_irq(hgei_parent_irq, +			  irq_get_trigger_type(hgei_parent_irq)); +	csr_set(CSR_HIE, BIT(IRQ_S_GEXT));  }  void kvm_riscv_aia_disable(void)  { +	int i; +	unsigned long flags; +	struct kvm_vcpu *vcpu; +	struct aia_hgei_control *hgctrl; +  	if (!kvm_riscv_aia_available())  		return; +	hgctrl = get_cpu_ptr(&aia_hgei); + +	/* Disable per-CPU SGEI interrupt */ +	csr_clear(CSR_HIE, BIT(IRQ_S_GEXT)); +	disable_percpu_irq(hgei_parent_irq);  	aia_set_hvictl(false); + +	raw_spin_lock_irqsave(&hgctrl->lock, flags); + +	for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) { +		vcpu = hgctrl->owners[i]; +		if (!vcpu) +			continue; + +		/* +		 * We release hgctrl->lock before notifying IMSIC +		 * so that we don't have lock ordering issues. +		 */ +		raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +		/* Notify IMSIC */ +		kvm_riscv_vcpu_aia_imsic_release(vcpu); + +		/* +		 * Wakeup VCPU if it was blocked so that it can +		 * run on other HARTs +		 */ +		if (csr_read(CSR_HGEIE) & BIT(i)) { +			csr_clear(CSR_HGEIE, BIT(i)); +			kvm_vcpu_kick(vcpu); +		} + +		raw_spin_lock_irqsave(&hgctrl->lock, flags); +	} + +	raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +	put_cpu_ptr(&aia_hgei);  }  int kvm_riscv_aia_init(void)  { +	int rc; +  	if (!riscv_isa_extension_available(NULL, SxAIA))  		return -ENODEV; +	/* Figure-out number of bits in HGEIE */ +	csr_write(CSR_HGEIE, -1UL); +	kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE)); +	csr_write(CSR_HGEIE, 0); +	if (kvm_riscv_aia_nr_hgei) +		kvm_riscv_aia_nr_hgei--; + +	/* +	 * Number of usable HGEI lines should be minimum of per-HART +	 * IMSIC guest files and number of bits in HGEIE +	 * +	 * TODO: To be updated later by AIA IMSIC HW guest file support +	 */ +	kvm_riscv_aia_nr_hgei = 0; + +	/* +	 * Find number of guest MSI IDs +	 * +	 * TODO: To be updated later by AIA IMSIC HW guest file support +	 */ +	kvm_riscv_aia_max_ids = IMSIC_MAX_ID; + +	/* Initialize guest external interrupt line management */ +	rc = aia_hgei_init(); +	if (rc) +		return rc; + +	/* Register device operations */ +	rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops, +				     KVM_DEV_TYPE_RISCV_AIA); +	if (rc) { +		aia_hgei_exit(); +		return rc; +	} +  	/* Enable KVM AIA support */  	static_branch_enable(&kvm_riscv_aia_available); @@ -385,4 +647,12 @@ int kvm_riscv_aia_init(void)  void kvm_riscv_aia_exit(void)  { +	if (!kvm_riscv_aia_available()) +		return; + +	/* Unregister device operations */ +	kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA); + +	/* Cleanup the HGEI state */ +	aia_hgei_exit();  }  |