aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig11
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/fault.c24
-rw-r--r--arch/arm/mm/ioremap.c16
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/pageattr.c42
-rw-r--r--arch/arm/mm/proc-v7-bugs.c208
-rw-r--r--arch/arm/mm/proc-v7m.S20
8 files changed, 268 insertions, 59 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 58afba346729..9724c16e9076 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
config CPU_SPECTRE
bool
+ select GENERIC_CPU_VULNERABILITIES
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
@@ -850,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
If unsure, say Y.
+config HARDEN_BRANCH_HISTORY
+ bool "Harden Spectre style attacks against branch history" if EXPERT
+ depends on CPU_SPECTRE
+ default y
+ help
+ Speculation attacks against some high-performance processors can
+ make use of branch history to influence future speculation. When
+ taking an exception, a sequence of branches overwrites the branch
+ history, or branch history is invalidated.
+
config TLS_REG_EMUL
bool
select NEED_KUSER_HELPERS
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index ea81e89e7740..6f499559d193 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -990,7 +990,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* there is no work pending for this thread.
*/
raw_local_irq_disable();
- if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+ if (!(read_thread_flags() & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
@@ -1005,7 +1005,7 @@ static int __init noalign_setup(char *__unused)
__setup("noalign", noalign_setup);
/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
+ * This needs to be done after sysctl_init_bases(), otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
* We now locate it in /proc/cpu/alignment instead.
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bc8779d54a64..a062e07516dd 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -17,6 +17,7 @@
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/kfence.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
@@ -99,6 +100,11 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
{ }
#endif /* CONFIG_MMU */
+static inline bool is_write_fault(unsigned int fsr)
+{
+ return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
+}
+
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -111,7 +117,7 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm,
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
/*
@@ -131,10 +137,14 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
- if (addr < PAGE_SIZE)
+ if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
- else
+ } else {
+ if (kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
+ return;
+
msg = "paging request";
+ }
die_kernel_fault(msg, mm, addr, fsr, regs);
}
@@ -191,8 +201,8 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
#ifdef CONFIG_MMU
-#define VM_FAULT_BADMAP 0x010000
-#define VM_FAULT_BADACCESS 0x020000
+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static inline bool is_permission_fault(unsigned int fsr)
{
@@ -261,7 +271,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) {
+ if (is_write_fault(fsr)) {
flags |= FAULT_FLAG_WRITE;
vm_flags = VM_WRITE;
}
@@ -312,7 +322,7 @@ retry:
return 0;
}
- if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6e830b9418c9..197f8eb3a775 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -459,16 +459,20 @@ void pci_ioremap_set_mem_type(int mem_type)
pci_ioremap_mem_type = mem_type;
}
-int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
- return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
- PCI_IO_VIRT_BASE + offset + SZ_64K,
- phys_addr,
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
+
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
-EXPORT_SYMBOL_GPL(pci_ioremap_io);
+EXPORT_SYMBOL(pci_remap_iospace);
void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 274e4f73fd33..5e2be37a198e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -212,12 +212,14 @@ early_param("ecc", early_ecc);
static int __init early_cachepolicy(char *p)
{
pr_warn("cachepolicy kernel parameter not supported without cp15\n");
+ return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init noalign_setup(char *__unused)
{
pr_warn("noalign kernel parameter not supported without cp15\n");
+ return 1;
}
__setup("noalign", noalign_setup);
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 9790ae3a8c68..c3c34fe714b0 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -32,14 +32,31 @@ static bool in_range(unsigned long start, unsigned long size,
size <= range_end - start;
}
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ struct page_change_data data;
+ int ret;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, start + size);
+ return ret;
+}
+
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr & PAGE_MASK;
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
unsigned long size = end - start;
- int ret;
- struct page_change_data data;
WARN_ON_ONCE(start != addr);
@@ -50,14 +67,7 @@ static int change_memory_common(unsigned long addr, int numpages,
!in_range(start, size, VMALLOC_START, VMALLOC_END))
return -EINVAL;
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, end);
- return ret;
+ return __change_memory_common(start, size, set_mask, clear_mask);
}
int set_memory_ro(unsigned long addr, int numpages)
@@ -87,3 +97,15 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(0),
__pgprot(L_PTE_XN));
}
+
+int set_memory_valid(unsigned long addr, int numpages, int enable)
+{
+ if (enable)
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(L_PTE_VALID),
+ __pgprot(0));
+ else
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(0),
+ __pgprot(L_PTE_VALID));
+}
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 114c05ab4dd9..06dbfb968182 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -6,8 +6,35 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/proc-fns.h>
+#include <asm/spectre.h>
#include <asm/system_misc.h>
+#ifdef CONFIG_ARM_PSCI
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+ switch ((int)res.a0) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+
+ default:
+ return SPECTRE_VULNERABLE;
+ }
+}
+#else
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+ return SPECTRE_VULNERABLE;
+}
+#endif
+
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
@@ -36,13 +63,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
-static void cpu_v7_spectre_init(void)
+static unsigned int spectre_v2_install_workaround(unsigned int method)
{
const char *spectre_v2_method = NULL;
int cpu = smp_processor_id();
if (per_cpu(harden_branch_predictor_fn, cpu))
- return;
+ return SPECTRE_MITIGATED;
+
+ switch (method) {
+ case SPECTRE_V2_METHOD_BPIALL:
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ harden_branch_predictor_bpiall;
+ spectre_v2_method = "BPIALL";
+ break;
+
+ case SPECTRE_V2_METHOD_ICIALLU:
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ harden_branch_predictor_iciallu;
+ spectre_v2_method = "ICIALLU";
+ break;
+
+ case SPECTRE_V2_METHOD_HVC:
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_hvc_arch_workaround_1;
+ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+ spectre_v2_method = "hypervisor";
+ break;
+
+ case SPECTRE_V2_METHOD_SMC:
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_smc_arch_workaround_1;
+ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+ spectre_v2_method = "firmware";
+ break;
+ }
+
+ if (spectre_v2_method)
+ pr_info("CPU%u: Spectre v2: using %s workaround\n",
+ smp_processor_id(), spectre_v2_method);
+
+ return SPECTRE_MITIGATED;
+}
+#else
+static unsigned int spectre_v2_install_workaround(unsigned int method)
+{
+ pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
+ smp_processor_id());
+
+ return SPECTRE_VULNERABLE;
+}
+#endif
+
+static void cpu_v7_spectre_v2_init(void)
+{
+ unsigned int state, method = 0;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A8:
@@ -51,69 +126,133 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
- per_cpu(harden_branch_predictor_fn, cpu) =
- harden_branch_predictor_bpiall;
- spectre_v2_method = "BPIALL";
+ state = SPECTRE_MITIGATED;
+ method = SPECTRE_V2_METHOD_BPIALL;
break;
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
- per_cpu(harden_branch_predictor_fn, cpu) =
- harden_branch_predictor_iciallu;
- spectre_v2_method = "ICIALLU";
+ state = SPECTRE_MITIGATED;
+ method = SPECTRE_V2_METHOD_ICIALLU;
break;
-#ifdef CONFIG_ARM_PSCI
case ARM_CPU_PART_BRAHMA_B53:
/* Requires no workaround */
+ state = SPECTRE_UNAFFECTED;
break;
+
default:
/* Other ARM CPUs require no workaround */
- if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
+ state = SPECTRE_UNAFFECTED;
break;
+ }
+
fallthrough;
- /* Cortex A57/A72 require firmware workaround */
- case ARM_CPU_PART_CORTEX_A57:
- case ARM_CPU_PART_CORTEX_A72: {
- struct arm_smccc_res res;
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 != 0)
- return;
+ /* Cortex A57/A72 require firmware workaround */
+ case ARM_CPU_PART_CORTEX_A57:
+ case ARM_CPU_PART_CORTEX_A72:
+ state = spectre_v2_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ break;
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
- per_cpu(harden_branch_predictor_fn, cpu) =
- call_hvc_arch_workaround_1;
- cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
- spectre_v2_method = "hypervisor";
+ method = SPECTRE_V2_METHOD_HVC;
break;
case SMCCC_CONDUIT_SMC:
- per_cpu(harden_branch_predictor_fn, cpu) =
- call_smc_arch_workaround_1;
- cpu_do_switch_mm = cpu_v7_smc_switch_mm;
- spectre_v2_method = "firmware";
+ method = SPECTRE_V2_METHOD_SMC;
break;
default:
+ state = SPECTRE_VULNERABLE;
break;
}
}
-#endif
+
+ if (state == SPECTRE_MITIGATED)
+ state = spectre_v2_install_workaround(method);
+
+ spectre_v2_update_state(state, method);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+static int spectre_bhb_method;
+
+static const char *spectre_bhb_method_name(int method)
+{
+ switch (method) {
+ case SPECTRE_V2_METHOD_LOOP8:
+ return "loop";
+
+ case SPECTRE_V2_METHOD_BPIALL:
+ return "BPIALL";
+
+ default:
+ return "unknown";
}
+}
- if (spectre_v2_method)
- pr_info("CPU%u: Spectre v2: using %s workaround\n",
- smp_processor_id(), spectre_v2_method);
+static int spectre_bhb_install_workaround(int method)
+{
+ if (spectre_bhb_method != method) {
+ if (spectre_bhb_method) {
+ pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
+ smp_processor_id());
+
+ return SPECTRE_VULNERABLE;
+ }
+
+ if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
+ return SPECTRE_VULNERABLE;
+
+ spectre_bhb_method = method;
+ }
+
+ pr_info("CPU%u: Spectre BHB: using %s workaround\n",
+ smp_processor_id(), spectre_bhb_method_name(method));
+
+ return SPECTRE_MITIGATED;
}
#else
-static void cpu_v7_spectre_init(void)
+static int spectre_bhb_install_workaround(int method)
{
+ return SPECTRE_VULNERABLE;
}
#endif
+static void cpu_v7_spectre_bhb_init(void)
+{
+ unsigned int state, method = 0;
+
+ switch (read_cpuid_part()) {
+ case ARM_CPU_PART_CORTEX_A15:
+ case ARM_CPU_PART_BRAHMA_B15:
+ case ARM_CPU_PART_CORTEX_A57:
+ case ARM_CPU_PART_CORTEX_A72:
+ state = SPECTRE_MITIGATED;
+ method = SPECTRE_V2_METHOD_LOOP8;
+ break;
+
+ case ARM_CPU_PART_CORTEX_A73:
+ case ARM_CPU_PART_CORTEX_A75:
+ state = SPECTRE_MITIGATED;
+ method = SPECTRE_V2_METHOD_BPIALL;
+ break;
+
+ default:
+ state = SPECTRE_UNAFFECTED;
+ break;
+ }
+
+ if (state == SPECTRE_MITIGATED)
+ state = spectre_bhb_install_workaround(method);
+
+ spectre_v2_update_state(state, method);
+}
+
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg)
{
@@ -142,16 +281,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
void cpu_v7_ca8_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
- cpu_v7_spectre_init();
+ cpu_v7_spectre_v2_init();
}
void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
- cpu_v7_spectre_init();
+ cpu_v7_spectre_v2_init();
}
void cpu_v7_bugs_init(void)
{
- cpu_v7_spectre_init();
+ cpu_v7_spectre_v2_init();
+ cpu_v7_spectre_bhb_init();
}
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 84459c1d31b8..335144d50134 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -194,6 +194,26 @@ ENDPROC(__v7m_setup)
.endm
/*
+ * Match ARM Cortex-M55 processor.
+ */
+ .type __v7m_cm55_proc_info, #object
+__v7m_cm55_proc_info:
+ .long 0x410fd220 /* ARM Cortex-M55 0xD22 */
+ .long 0xff0ffff0 /* Mask off revision, patch release */
+ __v7m_proc __v7m_cm55_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
+ .size __v7m_cm55_proc_info, . - __v7m_cm55_proc_info
+
+ /*
+ * Match ARM Cortex-M33 processor.
+ */
+ .type __v7m_cm33_proc_info, #object
+__v7m_cm33_proc_info:
+ .long 0x410fd210 /* ARM Cortex-M33 0xD21 */
+ .long 0xff0ffff0 /* Mask off revision, patch release */
+ __v7m_proc __v7m_cm33_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
+ .size __v7m_cm33_proc_info, . - __v7m_cm33_proc_info
+
+ /*
* Match ARM Cortex-M7 processor.
*/
.type __v7m_cm7_proc_info, #object