diff options
-rw-r--r-- | arch/x86/include/asm/mtrr.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 60 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/mtrr.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 2 |
4 files changed, 82 insertions, 2 deletions
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 1421fe811401..1bae790a553a 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -58,6 +58,8 @@ struct mtrr_state_type { */ # ifdef CONFIG_MTRR void mtrr_bp_init(void); +void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, + mtrr_type def_type); extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); @@ -75,6 +77,12 @@ void mtrr_disable(void); void mtrr_enable(void); void mtrr_generic_set_state(void); # else +static inline void mtrr_overwrite_state(struct mtrr_var_range *var, + unsigned int num_var, + mtrr_type def_type) +{ +} + static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform) { /* diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 08ba55832182..e81d832475a1 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -8,10 +8,12 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/mm.h> - +#include <linux/cc_platform.h> #include <asm/processor-flags.h> #include <asm/cacheinfo.h> #include <asm/cpufeature.h> +#include <asm/hypervisor.h> +#include <asm/mshyperv.h> #include <asm/tlbflush.h> #include <asm/mtrr.h> #include <asm/msr.h> @@ -243,6 +245,62 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, } /** + * mtrr_overwrite_state - set static MTRR state + * + * Used to set MTRR state via different means (e.g. with data obtained from + * a hypervisor). + * Is allowed only for special cases when running virtualized. Must be called + * from the x86_init.hyper.init_platform() hook. It can be called only once. + * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR + * is cleared. + */ +void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, + mtrr_type def_type) +{ + unsigned int i; + + /* Only allowed to be called once before mtrr_bp_init(). */ + if (WARN_ON_ONCE(mtrr_state_set)) + return; + + /* Only allowed when running virtualized. */ + if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) + return; + + /* + * Only allowed for special virtualization cases: + * - when running as Hyper-V, SEV-SNP guest using vTOM + * - when running as Xen PV guest + * - when running as SEV-SNP or TDX guest to avoid unnecessary + * VMM communication/Virtualization exceptions (#VC, #VE) + */ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && + !hv_is_isolation_supported() && + !cpu_feature_enabled(X86_FEATURE_XENPV) && + !cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + return; + + /* Disable MTRR in order to disable MTRR modifications. */ + setup_clear_cpu_cap(X86_FEATURE_MTRR); + + if (var) { + if (num_var > MTRR_MAX_VAR_RANGES) { + pr_warn("Trying to overwrite MTRR state with %u variable entries\n", + num_var); + num_var = MTRR_MAX_VAR_RANGES; + } + for (i = 0; i < num_var; i++) + mtrr_state.var_ranges[i] = var[i]; + num_var_ranges = num_var; + } + + mtrr_state.def_type = def_type; + mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED; + + mtrr_state_set = 1; +} + +/** * mtrr_type_lookup - look up memory type in MTRR * * Return Values: diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 1067f128bded..be35a0b09604 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -625,11 +625,23 @@ int __initdata changed_by_mtrr_cleanup; */ void __init mtrr_bp_init(void) { + bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR); const char *why = "(not available)"; phys_hi_rsvd = GENMASK(31, boot_cpu_data.x86_phys_bits - 32); - if (cpu_feature_enabled(X86_FEATURE_MTRR)) { + if (!generic_mtrrs && mtrr_state.enabled) { + /* + * Software overwrite of MTRR state, only for generic case. + * Note that X86_FEATURE_MTRR has been reset in this case. + */ + init_table(); + pr_info("MTRRs set to read-only\n"); + + return; + } + + if (generic_mtrrs) { mtrr_if = &generic_mtrr_ops; } else { switch (boot_cpu_data.x86_vendor) { diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 16babff771bd..0cccfeb67c3a 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1037,6 +1037,8 @@ void __init setup_arch(char **cmdline_p) /* * VMware detection requires dmi to be available, so this * needs to be done after dmi_setup(), for the boot CPU. + * For some guest types (Xen PV, SEV-SNP, TDX) it is required to be + * called before cache_bp_init() for setting up MTRR state. */ init_hypervisor_platform(); |