aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kvm/pv.c
diff options
context:
space:
mode:
authorClaudio Imbrenda <[email protected]>2022-06-28 15:56:10 +0200
committerJanosch Frank <[email protected]>2022-07-19 12:05:57 +0000
commitca2fd0609b5ddd15fe57c917a41508a7a9fc17e1 (patch)
tree9dd07a7d5886f6a00ded48d6653477ee0aad59a5 /arch/s390/kvm/pv.c
parentbe48d86f77f0cbceecadf10fda6330d82a0a77b7 (diff)
KVM: s390: pv: add mmu_notifier
Add an mmu_notifier for protected VMs. The callback function is triggered when the mm is torn down, and will attempt to convert all protected vCPUs to non-protected. This allows the mm teardown to use the destroy page UVC instead of export. Also make KVM select CONFIG_MMU_NOTIFIER, needed to use mmu_notifiers. Signed-off-by: Claudio Imbrenda <[email protected]> Acked-by: Janosch Frank <[email protected]> Reviewed-by: Nico Boehr <[email protected]> Link: https://lore.kernel.org/r/[email protected] Message-Id: <[email protected]> [[email protected]: Conflict resolution for mmu_notifier.h include and struct kvm_s390_pv] Signed-off-by: Janosch Frank <[email protected]>
Diffstat (limited to 'arch/s390/kvm/pv.c')
-rw-r--r--arch/s390/kvm/pv.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index d18c5ccfa5dc..c063d1a9cf04 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -15,6 +15,7 @@
#include <asm/mman.h>
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
static void kvm_s390_clear_pv_state(struct kvm *kvm)
@@ -188,6 +189,26 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO;
}
+static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
+ struct mm_struct *mm)
+{
+ struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
+ u16 dummy;
+
+ /*
+ * No locking is needed since this is the last thread of the last user of this
+ * struct mm.
+ * When the struct kvm gets deinitialized, this notifier is also
+ * unregistered. This means that if this notifier runs, then the
+ * struct kvm is still valid.
+ */
+ kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
+}
+
+static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
+ .release = kvm_s390_pv_mmu_notifier_release,
+};
+
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct uv_cb_cgc uvcb = {
@@ -229,6 +250,11 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
+ /* Add the notifier only once. No races because we hold kvm->lock */
+ if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
+ kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
+ mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
+ }
return 0;
}