aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-01-18 04:45:32 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2022-01-19 12:14:02 -0500
commit4f5a884fc212d99654e4fb36ba98d5354f0dd18e (patch)
treef4ac26f5acd43be75cb9d3b2f89fa92c40dbc8ee /arch/x86/kvm
parente09fccb5435d7b9ab3fd5dfeada8ae40cfa56e08 (diff)
parent5f02ef741a785678930f3ff0a8b6b2b0ef1bb402 (diff)
Merge branch 'kvm-pi-raw-spinlock' into HEAD
Bring in fix for VT-d posted interrupts before further changing the code in 5.17. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/debugfs.c3
-rw-r--r--arch/x86/kvm/svm/sev.c2
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c16
3 files changed, 12 insertions, 9 deletions
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 543a8c04025c..9240b3b7f8dd 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
unsigned int *log[KVM_NR_PAGE_SIZES], *cur;
int i, j, k, l, ret;
+ if (!kvm_memslots_have_rmaps(kvm))
+ return 0;
+
ret = -ENOMEM;
memset(log, 0, sizeof(log));
for (i = 0; i < KVM_NR_PAGE_SIZES; i++) {
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 322553322202..6a22798eaaee 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1566,7 +1566,7 @@ static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
r = -EINTR;
if (mutex_lock_killable(&dst_kvm->lock))
goto release_src;
- if (mutex_lock_killable(&src_kvm->lock))
+ if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
goto unlock_dst;
return 0;
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 88c53c521094..a82a15aa1982 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -27,7 +27,7 @@ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
* CPU. IRQs must be disabled when taking this lock, otherwise deadlock will
* occur if a wakeup IRQ arrives and attempts to acquire the lock.
*/
-static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
+static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock);
static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
@@ -133,9 +133,9 @@ static void __pi_post_block(struct kvm_vcpu *vcpu)
* Remove the vCPU from the wakeup list of the _previous_ pCPU, which
* will not be the same as the current pCPU if the task was migrated.
*/
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
list_del(&vcpu->blocked_vcpu_list);
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
dest = cpu_physical_id(vcpu->cpu);
if (!x2apic_mode)
@@ -181,10 +181,10 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
local_irq_save(flags);
vcpu->pre_pcpu = vcpu->cpu;
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
list_add_tail(&vcpu->blocked_vcpu_list,
&per_cpu(blocked_vcpu_on_cpu, vcpu->cpu));
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
WARN(pi_desc->sn == 1,
"Posted Interrupt Suppress Notification set before blocking");
@@ -224,7 +224,7 @@ void pi_wakeup_handler(void)
struct kvm_vcpu *vcpu;
int cpu = smp_processor_id();
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
blocked_vcpu_list) {
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -232,13 +232,13 @@ void pi_wakeup_handler(void)
if (pi_test_on(pi_desc))
kvm_vcpu_kick(vcpu);
}
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}
void __init pi_init_cpu(int cpu)
{
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)