aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/hyperv/ivm.c
diff options
context:
space:
mode:
authorDexuan Cui <[email protected]>2023-08-24 01:07:10 -0700
committerWei Liu <[email protected]>2023-08-25 00:04:57 +0000
commitb9b4fe3a72b60c8d74a9ffb61aa778f04eaddd87 (patch)
tree85b3853e1490683d3f814092b5b8acd7e87e5374 /arch/x86/hyperv/ivm.c
parent23378295042a4bcaeec350733a4771678e7a1f3a (diff)
x86/hyperv: Use TDX GHCI to access some MSRs in a TDX VM with the paravisor
When the paravisor is present, a SNP VM must use GHCB to access some special MSRs, including HV_X64_MSR_GUEST_OS_ID and some SynIC MSRs. Similarly, when the paravisor is present, a TDX VM must use TDX GHCI to access the same MSRs. Implement hv_tdx_msr_write() and hv_tdx_msr_read(), and use the helper functions hv_ivm_msr_read() and hv_ivm_msr_write() to access the MSRs in a unified way for SNP/TDX VMs with the paravisor. Do not export hv_tdx_msr_write() and hv_tdx_msr_read(), because we never really used hv_ghcb_msr_write() and hv_ghcb_msr_read() in any module. Update arch/x86/include/asm/mshyperv.h so that the kernel can still build if CONFIG_AMD_MEM_ENCRYPT or CONFIG_INTEL_TDX_GUEST is not set, or neither is set. Signed-off-by: Dexuan Cui <[email protected]> Reviewed-by: Tianyu Lan <[email protected]> Reviewed-by: Michael Kelley <[email protected]> Signed-off-by: Wei Liu <[email protected]> Link: https://lore.kernel.org/r/[email protected]
Diffstat (limited to 'arch/x86/hyperv/ivm.c')
-rw-r--r--arch/x86/hyperv/ivm.c69
1 files changed, 65 insertions, 4 deletions
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 7bd0359d5e38..fbc07493fcb4 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -24,6 +24,7 @@
#include <asm/realmode.h>
#include <asm/e820/api.h>
#include <asm/desc.h>
+#include <uapi/asm/vmx.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -186,7 +187,7 @@ bool hv_ghcb_negotiate_protocol(void)
return true;
}
-void hv_ghcb_msr_write(u64 msr, u64 value)
+static void hv_ghcb_msr_write(u64 msr, u64 value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
@@ -214,9 +215,8 @@ void hv_ghcb_msr_write(u64 msr, u64 value)
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
-void hv_ghcb_msr_read(u64 msr, u64 *value)
+static void hv_ghcb_msr_read(u64 msr, u64 *value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
@@ -246,10 +246,71 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
+#else
+static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
+static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
+#ifdef CONFIG_INTEL_TDX_GUEST
+static void hv_tdx_msr_write(u64 msr, u64 val)
+{
+ struct tdx_hypercall_args args = {
+ .r10 = TDX_HYPERCALL_STANDARD,
+ .r11 = EXIT_REASON_MSR_WRITE,
+ .r12 = msr,
+ .r13 = val,
+ };
+
+ u64 ret = __tdx_hypercall(&args);
+
+ WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
+}
+
+static void hv_tdx_msr_read(u64 msr, u64 *val)
+{
+ struct tdx_hypercall_args args = {
+ .r10 = TDX_HYPERCALL_STANDARD,
+ .r11 = EXIT_REASON_MSR_READ,
+ .r12 = msr,
+ };
+
+ u64 ret = __tdx_hypercall_ret(&args);
+
+ if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
+ *val = 0;
+ else
+ *val = args.r11;
+}
+#else
+static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
+static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
+#endif /* CONFIG_INTEL_TDX_GUEST */
+
+#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
+void hv_ivm_msr_write(u64 msr, u64 value)
+{
+ if (!ms_hyperv.paravisor_present)
+ return;
+
+ if (hv_isolation_type_tdx())
+ hv_tdx_msr_write(msr, value);
+ else if (hv_isolation_type_snp())
+ hv_ghcb_msr_write(msr, value);
+}
+
+void hv_ivm_msr_read(u64 msr, u64 *value)
+{
+ if (!ms_hyperv.paravisor_present)
+ return;
+
+ if (hv_isolation_type_tdx())
+ hv_tdx_msr_read(msr, value);
+ else if (hv_isolation_type_snp())
+ hv_ghcb_msr_read(msr, value);
+}
+#endif
+
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.