diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/Makefile | 5 | ||||
| -rw-r--r-- | arch/x86/crypto/Makefile | 7 | ||||
| -rw-r--r-- | arch/x86/include/asm/io_bitmap.h | 9 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 7 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/kvm.c | 65 | ||||
| -rw-r--r-- | arch/x86/kernel/paravirt.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/Kconfig | 13 | ||||
| -rw-r--r-- | arch/x86/kvm/Makefile | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 5 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 17 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 14 | ||||
| -rw-r--r-- | arch/x86/mm/dump_pagetables.c | 7 | ||||
| -rw-r--r-- | arch/x86/platform/efi/efi_64.c | 151 | ||||
| -rw-r--r-- | arch/x86/xen/enlighten_pv.c | 25 | 
17 files changed, 199 insertions, 140 deletions
| diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94df0868804b..513a55562d75 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)  avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)  sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)  sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) +adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)  KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b69e00bf20b8..8c2e9eadee8a 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\  avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)  sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)  sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) +adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)  obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o @@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o  obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o  obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o -obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o + +# These modules require the assembler to support ADX. +ifeq ($(adx_supported),yes) +	obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o +endif  # These modules require assembler to support AVX.  ifeq ($(avx_supported),yes) diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h index 02c6ef8f7667..07344d82e88e 100644 --- a/arch/x86/include/asm/io_bitmap.h +++ b/arch/x86/include/asm/io_bitmap.h @@ -19,7 +19,14 @@ struct task_struct;  void io_bitmap_share(struct task_struct *tsk);  void io_bitmap_exit(void); -void tss_update_io_bitmap(void); +void native_tss_update_io_bitmap(void); + +#ifdef CONFIG_PARAVIRT_XXL +#include <asm/paravirt.h> +#else +#define tss_update_io_bitmap native_tss_update_io_bitmap +#endif +  #else  static inline void io_bitmap_share(struct task_struct *tsk) { }  static inline void io_bitmap_exit(void) { } diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 86e7317eb31f..694d8daf4983 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)  	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);  } +#ifdef CONFIG_X86_IOPL_IOPERM +static inline void tss_update_io_bitmap(void) +{ +	PVOP_VCALL0(cpu.update_io_bitmap); +} +#endif +  static inline void paravirt_activate_mm(struct mm_struct *prev,  					struct mm_struct *next)  { diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 84812964d3dd..732f62e04ddb 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -140,6 +140,10 @@ struct pv_cpu_ops {  	void (*load_sp0)(unsigned long sp0); +#ifdef CONFIG_X86_IOPL_IOPERM +	void (*update_io_bitmap)(void); +#endif +  	void (*wbinvd)(void);  	/* cpuid emulation, mostly so that caps bits can be disabled */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 52c9bfbbdb2a..4cdb123ff66a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)  	 * cpuid bit to be set.  We need to ensure that we  	 * update that bit in this CPU's "cpu_info".  	 */ -	get_cpu_cap(c); +	set_cpu_cap(c, X86_FEATURE_OSPKE);  }  #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d817f255aed8..6efe0410fb72 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)  	}  } +static bool pv_tlb_flush_supported(void) +{ +	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && +		!kvm_para_has_hint(KVM_HINTS_REALTIME) && +		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + +static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); +  #ifdef CONFIG_SMP + +static bool pv_ipi_supported(void) +{ +	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); +} + +static bool pv_sched_yield_supported(void) +{ +	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && +		!kvm_para_has_hint(KVM_HINTS_REALTIME) && +	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} +  #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)  static void __send_ipi_mask(const struct cpumask *mask, int vector) @@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)  static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)  {  	unsigned int this_cpu = smp_processor_id(); -	struct cpumask new_mask; +	struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);  	const struct cpumask *local_mask; -	cpumask_copy(&new_mask, mask); -	cpumask_clear_cpu(this_cpu, &new_mask); -	local_mask = &new_mask; +	cpumask_copy(new_mask, mask); +	cpumask_clear_cpu(this_cpu, new_mask); +	local_mask = new_mask;  	__send_ipi_mask(local_mask, vector);  } @@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)  	update_intr_gate(X86_TRAP_PF, async_page_fault);  } -static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);  static void kvm_flush_tlb_others(const struct cpumask *cpumask,  			const struct flush_tlb_info *info) @@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,  	u8 state;  	int cpu;  	struct kvm_steal_time *src; -	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); +	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);  	cpumask_copy(flushmask, cpumask);  	/* @@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)  		pv_ops.time.steal_clock = kvm_steal_clock;  	} -	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_tlb_flush_supported()) {  		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;  		pv_ops.mmu.tlb_remove_table = tlb_remove_table; +		pr_info("KVM setup pv remote TLB flush\n");  	}  	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)  #ifdef CONFIG_SMP  	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;  	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; -	if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_sched_yield_supported()) {  		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;  		pr_info("KVM setup pv sched yield\n");  	} @@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)  static void __init kvm_apic_init(void)  {  #if defined(CONFIG_SMP) -	if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) +	if (pv_ipi_supported())  		kvm_setup_pv_ipi();  #endif  } @@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)  }  arch_initcall(activate_jump_labels); -static __init int kvm_setup_pv_tlb_flush(void) +static __init int kvm_alloc_cpumask(void)  {  	int cpu; +	bool alloc = false;  	if (!kvm_para_available() || nopv)  		return 0; -	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_tlb_flush_supported()) +		alloc = true; + +#if defined(CONFIG_SMP) +	if (pv_ipi_supported()) +		alloc = true; +#endif + +	if (alloc)  		for_each_possible_cpu(cpu) { -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), +			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),  				GFP_KERNEL, cpu_to_node(cpu));  		} -		pr_info("KVM setup pv remote TLB flush\n"); -	}  	return 0;  } -arch_initcall(kvm_setup_pv_tlb_flush); +arch_initcall(kvm_alloc_cpumask);  #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 789f5e4f89de..c131ba4e70ef 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -30,6 +30,7 @@  #include <asm/timer.h>  #include <asm/special_insns.h>  #include <asm/tlb.h> +#include <asm/io_bitmap.h>  /*   * nop stub, which must not clobber anything *including the stack* to @@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {  	.cpu.iret		= native_iret,  	.cpu.swapgs		= native_swapgs, +#ifdef CONFIG_X86_IOPL_IOPERM +	.cpu.update_io_bitmap	= native_tss_update_io_bitmap, +#endif +  	.cpu.start_context_switch	= paravirt_nop,  	.cpu.end_context_switch		= paravirt_nop, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 839b5244e3b7..3053c85e0e42 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)  /**   * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode   */ -void tss_update_io_bitmap(void) +void native_tss_update_io_bitmap(void)  {  	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);  	struct thread_struct *t = ¤t->thread; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 991019d5eee1..1bb4927030af 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -59,6 +59,19 @@ config KVM  	  If unsure, say N. +config KVM_WERROR +	bool "Compile KVM with -Werror" +	# KASAN may cause the build to fail due to larger frames +	default y if X86_64 && !KASAN +	# We use the dependency on !COMPILE_TEST to not be enabled +	# blindly in allmodconfig or allyesconfig configurations +	depends on (X86_64 && !KASAN) || !COMPILE_TEST +	depends on EXPERT +	help +	  Add -Werror to the build flags for (and only for) i915.ko. + +	  If in doubt, say "N". +  config KVM_INTEL  	tristate "KVM for Intel (and compatible) processors support"  	depends on KVM && IA32_FEAT_CTL diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b19ef421084d..e553f0fdd87d 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,7 @@  # SPDX-License-Identifier: GPL-2.0  ccflags-y += -Iarch/x86/kvm +ccflags-$(CONFIG_KVM_WERROR) += -Werror  KVM := ../../../virt/kvm diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ad3f5b178a03..24c0b2ba8fb9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -57,11 +57,13 @@  MODULE_AUTHOR("Qumranet");  MODULE_LICENSE("GPL"); +#ifdef MODULE  static const struct x86_cpu_id svm_cpu_id[] = {  	X86_FEATURE_MATCH(X86_FEATURE_SVM),  	{}  };  MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); +#endif  #define IOPM_ALLOC_ORDER 2  #define MSRPM_ALLOC_ORDER 1 @@ -2194,8 +2196,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)  static int avic_init_vcpu(struct vcpu_svm *svm)  {  	int ret; +	struct kvm_vcpu *vcpu = &svm->vcpu; -	if (!kvm_vcpu_apicv_active(&svm->vcpu)) +	if (!avic || !irqchip_in_kernel(vcpu->kvm))  		return 0;  	ret = avic_init_backing_page(&svm->vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 63aaf44edd1f..40b1e6138cd5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -64,11 +64,13 @@  MODULE_AUTHOR("Qumranet");  MODULE_LICENSE("GPL"); +#ifdef MODULE  static const struct x86_cpu_id vmx_cpu_id[] = {  	X86_FEATURE_MATCH(X86_FEATURE_VMX),  	{}  };  MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); +#endif  bool __read_mostly enable_vpid = 1;  module_param_named(vpid, enable_vpid, bool, 0444); @@ -7175,6 +7177,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,  	else  		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); +	/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */  	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;  } @@ -7204,6 +7207,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,  	case x86_intercept_outs:  		return vmx_check_intercept_io(vcpu, info); +	case x86_intercept_lgdt: +	case x86_intercept_lidt: +	case x86_intercept_lldt: +	case x86_intercept_ltr: +	case x86_intercept_sgdt: +	case x86_intercept_sidt: +	case x86_intercept_sldt: +	case x86_intercept_str: +		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) +			return X86EMUL_CONTINUE; + +		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */ +		break; +  	/* TODO: check more intercepts... */  	default:  		break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 359fcd395132..5de200663f51 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7190,15 +7190,15 @@ static void kvm_timer_init(void)  	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {  #ifdef CONFIG_CPU_FREQ -		struct cpufreq_policy policy; +		struct cpufreq_policy *policy;  		int cpu; -		memset(&policy, 0, sizeof(policy));  		cpu = get_cpu(); -		cpufreq_get_policy(&policy, cpu); -		if (policy.cpuinfo.max_freq) -			max_tsc_khz = policy.cpuinfo.max_freq; +		policy = cpufreq_cpu_get(cpu); +		if (policy && policy->cpuinfo.max_freq) +			max_tsc_khz = policy->cpuinfo.max_freq;  		put_cpu(); +		cpufreq_cpu_put(policy);  #endif  		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,  					  CPUFREQ_TRANSITION_NOTIFIER); @@ -7308,12 +7308,12 @@ int kvm_arch_init(void *opaque)  	}  	if (!ops->cpu_has_kvm_support()) { -		printk(KERN_ERR "kvm: no hardware support\n"); +		pr_err_ratelimited("kvm: no hardware support\n");  		r = -EOPNOTSUPP;  		goto out;  	}  	if (ops->disabled_by_bios()) { -		printk(KERN_ERR "kvm: disabled by bios\n"); +		pr_err_ratelimited("kvm: disabled by bios\n");  		r = -EOPNOTSUPP;  		goto out;  	} diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 64229dad7eab..69309cd56fdf 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -363,13 +363,8 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,  {  	const struct ptdump_range ptdump_ranges[] = {  #ifdef CONFIG_X86_64 - -#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1)) -#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \ -			   normalize_addr_shift) -  	{0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2}, -	{normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL}, +	{GUARD_HOLE_END_ADDR, ~0UL},  #else  	{0, ~0UL},  #endif diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index fa8506e76bbe..d19a2edd63cb 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -180,7 +180,7 @@ void efi_sync_low_kernel_mappings(void)  static inline phys_addr_t  virt_to_phys_or_null_size(void *va, unsigned long size)  { -	bool bad_size; +	phys_addr_t pa;  	if (!va)  		return 0; @@ -188,16 +188,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)  	if (virt_addr_valid(va))  		return virt_to_phys(va); -	/* -	 * A fully aligned variable on the stack is guaranteed not to -	 * cross a page bounary. Try to catch strings on the stack by -	 * checking that 'size' is a power of two. -	 */ -	bad_size = size > PAGE_SIZE || !is_power_of_2(size); +	pa = slow_virt_to_phys(va); -	WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); +	/* check if the object crosses a page boundary */ +	if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK)) +		return 0; -	return slow_virt_to_phys(va); +	return pa;  }  #define virt_to_phys_or_null(addr)				\ @@ -568,85 +565,25 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size,  static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)  { -	efi_status_t status; -	u32 phys_tm, phys_tc; -	unsigned long flags; - -	spin_lock(&rtc_lock); -	spin_lock_irqsave(&efi_runtime_lock, flags); - -	phys_tm = virt_to_phys_or_null(tm); -	phys_tc = virt_to_phys_or_null(tc); - -	status = efi_thunk(get_time, phys_tm, phys_tc); - -	spin_unlock_irqrestore(&efi_runtime_lock, flags); -	spin_unlock(&rtc_lock); - -	return status; +	return EFI_UNSUPPORTED;  }  static efi_status_t efi_thunk_set_time(efi_time_t *tm)  { -	efi_status_t status; -	u32 phys_tm; -	unsigned long flags; - -	spin_lock(&rtc_lock); -	spin_lock_irqsave(&efi_runtime_lock, flags); - -	phys_tm = virt_to_phys_or_null(tm); - -	status = efi_thunk(set_time, phys_tm); - -	spin_unlock_irqrestore(&efi_runtime_lock, flags); -	spin_unlock(&rtc_lock); - -	return status; +	return EFI_UNSUPPORTED;  }  static efi_status_t  efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,  			  efi_time_t *tm)  { -	efi_status_t status; -	u32 phys_enabled, phys_pending, phys_tm; -	unsigned long flags; - -	spin_lock(&rtc_lock); -	spin_lock_irqsave(&efi_runtime_lock, flags); - -	phys_enabled = virt_to_phys_or_null(enabled); -	phys_pending = virt_to_phys_or_null(pending); -	phys_tm = virt_to_phys_or_null(tm); - -	status = efi_thunk(get_wakeup_time, phys_enabled, -			     phys_pending, phys_tm); - -	spin_unlock_irqrestore(&efi_runtime_lock, flags); -	spin_unlock(&rtc_lock); - -	return status; +	return EFI_UNSUPPORTED;  }  static efi_status_t  efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)  { -	efi_status_t status; -	u32 phys_tm; -	unsigned long flags; - -	spin_lock(&rtc_lock); -	spin_lock_irqsave(&efi_runtime_lock, flags); - -	phys_tm = virt_to_phys_or_null(tm); - -	status = efi_thunk(set_wakeup_time, enabled, phys_tm); - -	spin_unlock_irqrestore(&efi_runtime_lock, flags); -	spin_unlock(&rtc_lock); - -	return status; +	return EFI_UNSUPPORTED;  }  static unsigned long efi_name_size(efi_char16_t *name) @@ -658,6 +595,8 @@ static efi_status_t  efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,  		       u32 *attr, unsigned long *data_size, void *data)  { +	u8 buf[24] __aligned(8); +	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));  	efi_status_t status;  	u32 phys_name, phys_vendor, phys_attr;  	u32 phys_data_size, phys_data; @@ -665,14 +604,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,  	spin_lock_irqsave(&efi_runtime_lock, flags); +	*vnd = *vendor; +  	phys_data_size = virt_to_phys_or_null(data_size); -	phys_vendor = virt_to_phys_or_null(vendor); +	phys_vendor = virt_to_phys_or_null(vnd);  	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));  	phys_attr = virt_to_phys_or_null(attr);  	phys_data = virt_to_phys_or_null_size(data, *data_size); -	status = efi_thunk(get_variable, phys_name, phys_vendor, -			   phys_attr, phys_data_size, phys_data); +	if (!phys_name || (data && !phys_data)) +		status = EFI_INVALID_PARAMETER; +	else +		status = efi_thunk(get_variable, phys_name, phys_vendor, +				   phys_attr, phys_data_size, phys_data);  	spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -683,19 +627,25 @@ static efi_status_t  efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,  		       u32 attr, unsigned long data_size, void *data)  { +	u8 buf[24] __aligned(8); +	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));  	u32 phys_name, phys_vendor, phys_data;  	efi_status_t status;  	unsigned long flags;  	spin_lock_irqsave(&efi_runtime_lock, flags); +	*vnd = *vendor; +  	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); -	phys_vendor = virt_to_phys_or_null(vendor); +	phys_vendor = virt_to_phys_or_null(vnd);  	phys_data = virt_to_phys_or_null_size(data, data_size); -	/* If data_size is > sizeof(u32) we've got problems */ -	status = efi_thunk(set_variable, phys_name, phys_vendor, -			   attr, data_size, phys_data); +	if (!phys_name || !phys_data) +		status = EFI_INVALID_PARAMETER; +	else +		status = efi_thunk(set_variable, phys_name, phys_vendor, +				   attr, data_size, phys_data);  	spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -707,6 +657,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,  				   u32 attr, unsigned long data_size,  				   void *data)  { +	u8 buf[24] __aligned(8); +	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));  	u32 phys_name, phys_vendor, phys_data;  	efi_status_t status;  	unsigned long flags; @@ -714,13 +666,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,  	if (!spin_trylock_irqsave(&efi_runtime_lock, flags))  		return EFI_NOT_READY; +	*vnd = *vendor; +  	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); -	phys_vendor = virt_to_phys_or_null(vendor); +	phys_vendor = virt_to_phys_or_null(vnd);  	phys_data = virt_to_phys_or_null_size(data, data_size); -	/* If data_size is > sizeof(u32) we've got problems */ -	status = efi_thunk(set_variable, phys_name, phys_vendor, -			   attr, data_size, phys_data); +	if (!phys_name || !phys_data) +		status = EFI_INVALID_PARAMETER; +	else +		status = efi_thunk(set_variable, phys_name, phys_vendor, +				   attr, data_size, phys_data);  	spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -732,39 +688,36 @@ efi_thunk_get_next_variable(unsigned long *name_size,  			    efi_char16_t *name,  			    efi_guid_t *vendor)  { +	u8 buf[24] __aligned(8); +	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));  	efi_status_t status;  	u32 phys_name_size, phys_name, phys_vendor;  	unsigned long flags;  	spin_lock_irqsave(&efi_runtime_lock, flags); +	*vnd = *vendor; +  	phys_name_size = virt_to_phys_or_null(name_size); -	phys_vendor = virt_to_phys_or_null(vendor); +	phys_vendor = virt_to_phys_or_null(vnd);  	phys_name = virt_to_phys_or_null_size(name, *name_size); -	status = efi_thunk(get_next_variable, phys_name_size, -			   phys_name, phys_vendor); +	if (!phys_name) +		status = EFI_INVALID_PARAMETER; +	else +		status = efi_thunk(get_next_variable, phys_name_size, +				   phys_name, phys_vendor);  	spin_unlock_irqrestore(&efi_runtime_lock, flags); +	*vendor = *vnd;  	return status;  }  static efi_status_t  efi_thunk_get_next_high_mono_count(u32 *count)  { -	efi_status_t status; -	u32 phys_count; -	unsigned long flags; - -	spin_lock_irqsave(&efi_runtime_lock, flags); - -	phys_count = virt_to_phys_or_null(count); -	status = efi_thunk(get_next_high_mono_count, phys_count); - -	spin_unlock_irqrestore(&efi_runtime_lock, flags); - -	return status; +	return EFI_UNSUPPORTED;  }  static void diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 79409120a603..507f4fb88fa7 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -72,6 +72,9 @@  #include <asm/mwait.h>  #include <asm/pci_x86.h>  #include <asm/cpu.h> +#ifdef CONFIG_X86_IOPL_IOPERM +#include <asm/io_bitmap.h> +#endif  #ifdef CONFIG_ACPI  #include <linux/acpi.h> @@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)  	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);  } +#ifdef CONFIG_X86_IOPL_IOPERM +static void xen_update_io_bitmap(void) +{ +	struct physdev_set_iobitmap iobitmap; +	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + +	native_tss_update_io_bitmap(); + +	iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) + +			  tss->x86_tss.io_bitmap_base; +	if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID) +		iobitmap.nr_ports = 0; +	else +		iobitmap.nr_ports = IO_BITMAP_BITS; + +	HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); +} +#endif +  static void xen_io_delay(void)  {  } @@ -1047,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {  	.write_idt_entry = xen_write_idt_entry,  	.load_sp0 = xen_load_sp0, +#ifdef CONFIG_X86_IOPL_IOPERM +	.update_io_bitmap = xen_update_io_bitmap, +#endif  	.io_delay = xen_io_delay,  	/* Xen takes care of %gs when switching to usermode for us */ |