aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/alternative.h1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/assembler.h11
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h1
-rw-r--r--arch/arm64/include/asm/compat.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h25
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/efi.h9
-rw-r--r--arch/arm64/include/asm/ftrace.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h44
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/irq.h50
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h76
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h19
-rw-r--r--arch/arm64/include/asm/kvm_host.h87
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h9
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/paravirt.h20
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h36
-rw-r--r--arch/arm64/include/asm/shmparam.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h23
-rw-r--r--arch/arm64/include/asm/stacktrace.h9
-rw-r--r--arch/arm64/include/asm/sysreg.h21
-rw-r--r--arch/arm64/include/asm/thread_info.h10
29 files changed, 364 insertions, 155 deletions
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index d56ec0715157..e4962f04201e 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -19,7 +19,6 @@ struct alt_instr {
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
-void free_alternatives_memory(void);
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 030cdcb46c6b..2731d3b25ed2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -77,6 +77,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/barrier.h>
/*
* Low-level accessors
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 12eff928ef8b..bb7b72734c24 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -193,6 +193,17 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
+ /*
+ * @sym: The name of the per-cpu variable
+ * @reg: Result of per_cpu(sym, smp_processor_id())
+ * @tmp: scratch register
+ */
+ .macro this_cpu_ptr, sym, reg, tmp
+ adr_l \reg, \sym
+ mrs \tmp, tpidr_el1
+ add \reg, \reg, \tmp
+ .endm
+
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f9679f4b0..9622eb48f894 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@ do { \
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1; \
+ union { typeof(*p) __val; char __c[1]; } __u; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u8 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u16 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u32 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u64 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
} \
- ___p1; \
+ __u.__val; \
})
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 54efedaf331f..7fc294c3bc5b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -68,6 +68,7 @@
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
+extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 9ea611ea69df..510c7b404454 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,7 +19,6 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
-#include <linux/mmdebug.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed6919b54..eb8432bb82b8 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..8f271b83f910 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
#define ARM64_HAS_PAN 4
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
-#define ARM64_NCAPS 7
+#define ARM64_NCAPS 8
#ifndef __ASSEMBLY__
@@ -46,8 +47,12 @@ enum ftr_type {
#define FTR_STRICT true /* SANITY check strict matching required */
#define FTR_NONSTRICT false /* SANITY check ignored */
+#define FTR_SIGNED true /* Value should be treated as signed */
+#define FTR_UNSIGNED false /* Value should be treated as unsigned */
+
struct arm64_ftr_bits {
- bool strict; /* CPU Sanity check: strict matching required ? */
+ bool sign; /* Value is signed ? */
+ bool strict; /* CPU Sanity check: strict matching required ? */
enum ftr_type type;
u8 shift;
u8 width;
@@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
return cpuid_feature_extract_field_width(features, field, 4);
}
+static inline unsigned int __attribute_const__
+cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
+{
+ return (u64)(features << (64 - width - field)) >> (64 - width);
+}
+
+static inline unsigned int __attribute_const__
+cpuid_feature_extract_unsigned_field(u64 features, int field)
+{
+ return cpuid_feature_extract_unsigned_field_width(features, field, 4);
+}
+
static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
{
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
{
- return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width);
+ return ftrp->sign ?
+ cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
+ cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
}
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead41afc..61e08f360e31 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
#ifdef __KERNEL__
-#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -26,22 +25,16 @@
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0)
-extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
- if (unlikely(!dev))
- return dma_ops;
- else if (dev->archdata.dma_ops)
+ if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
- else if (acpi_disabled)
- return dma_ops;
/*
- * When ACPI is enabled, if arch_set_dma_ops is not called,
- * we will disable device DMA capability by setting it
- * to dummy_dma_ops.
+ * We expect no ISA devices, and all other DMA masters are expected to
+ * have someone call arch_setup_dma_ops at device creation time.
*/
return &dummy_dma_ops;
}
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index ef572206f1c3..8e88a696c9cb 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -2,7 +2,9 @@
#define _ASM_EFI_H
#include <asm/io.h>
+#include <asm/mmu_context.h>
#include <asm/neon.h>
+#include <asm/tlbflush.h>
#ifdef CONFIG_EFI
extern void efi_init(void);
@@ -10,6 +12,8 @@ extern void efi_init(void);
#define efi_init()
#endif
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
#define efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f; \
@@ -63,6 +67,11 @@ extern void efi_init(void);
* Services are enabled and the EFI_RUNTIME_SERVICES bit set.
*/
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ switch_mm(NULL, mm, NULL);
+}
+
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index c5534facf941..3c60f37e48ab 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -28,6 +28,8 @@ struct dyn_arch_ftrace {
extern unsigned long ftrace_graph_call;
+extern void return_to_handler(void);
+
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bb4052e85dba..bbc1e35aa601 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -26,36 +26,7 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- set_pte_at(mm, addr, ptep, pte);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
@@ -97,4 +68,19 @@ static inline void arch_clear_hugepage_flags(struct page *page)
clear_bit(PG_dcache_clean, &page->flags);
}
+extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable);
+#define arch_make_huge_pte arch_make_huge_pte
+extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index e54415ec6935..9732908bfc8a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
+ u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
- cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+ cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT);
}
/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
+ u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
- cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+ cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT);
}
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 23eb450b820b..b77197d941fc 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,10 +1,60 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
+#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START_SP THREAD_START_SP
+
+#ifndef __ASSEMBLER__
+
+#include <linux/percpu.h>
+
#include <asm-generic/irq.h>
+#include <asm/thread_info.h>
struct pt_regs;
+DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+/*
+ * The highest address on the stack, and the first to be used. Used to
+ * find the dummy-stack frame put down by el?_irq() in entry.S, which
+ * is structured as follows:
+ *
+ * ------------
+ * | | <- irq_stack_ptr
+ * top ------------
+ * | x19 | <- irq_stack_ptr - 0x08
+ * ------------
+ * | x29 | <- irq_stack_ptr - 0x10
+ * ------------
+ *
+ * where x19 holds a copy of the task stack pointer where the struct pt_regs
+ * from kernel_entry can be found.
+ *
+ */
+#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
+
+/*
+ * The offset from irq_stack_ptr where entry.S will store the original
+ * stack pointer. Used by unwind_frame() and dump_backtrace().
+ */
+#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
+
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+static inline int nr_legacy_irqs(void)
+{
+ return 0;
+}
+
+static inline bool on_irq_stack(unsigned long sp, int cpu)
+{
+ /* variable names the same as kernel/stacktrace.c */
+ unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+ unsigned long high = low + IRQ_STACK_START_SP;
+
+ return (low <= sp && sp <= high);
+}
+
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 5e6857b6bdc4..738a95f93e49 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -125,6 +125,7 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+#define VTCR_EL2_VS 19
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -169,7 +170,7 @@
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (UL(48))
-#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 5e377101f919..52b777b7d407 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,82 +20,6 @@
#include <asm/virt.h>
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
-#define CSSELR_EL1 2 /* Cache Size Selection Register */
-#define SCTLR_EL1 3 /* System Control Register */
-#define ACTLR_EL1 4 /* Auxiliary Control Register */
-#define CPACR_EL1 5 /* Coprocessor Access Control */
-#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
-#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
-#define TCR_EL1 8 /* Translation Control Register */
-#define ESR_EL1 9 /* Exception Syndrome Register */
-#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
-#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
-#define FAR_EL1 12 /* Fault Address Register */
-#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
-#define VBAR_EL1 14 /* Vector Base Address Register */
-#define CONTEXTIDR_EL1 15 /* Context ID Register */
-#define TPIDR_EL0 16 /* Thread ID, User R/W */
-#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
-#define TPIDR_EL1 18 /* Thread ID, Privileged */
-#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
-#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
-#define PAR_EL1 21 /* Physical Address Register */
-#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
-#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 24 /* Domain Access Control Register */
-#define IFSR32_EL2 25 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
-#define NR_SYS_REGS 28
-
-/* 32bit mapping */
-#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
-#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
-#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
-#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
-#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
-#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
-#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
-#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
-#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
-#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
-#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
-#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
-#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
-#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
-#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
-#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
-#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
-#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
-#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
-#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
-#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
-#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
-#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
-#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
-#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
-#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
-#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
-#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-
-#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
-#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
-#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
-#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
-#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
-#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
-#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
-
-#define NR_COPRO_REGS (NR_SYS_REGS * 2)
-
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..3066328cd86b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -26,7 +26,6 @@
#include <asm/esr.h>
#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
@@ -99,12 +98,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}
-static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+/*
+ * vcpu_get_reg and vcpu_set_reg should always be passed a register number
+ * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
+ * AArch32 with banked registers.
+ */
+static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+ u8 reg_num)
{
- if (vcpu_mode_is_32bit(vcpu))
- return vcpu_reg32(vcpu, reg_num);
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+}
- return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+ unsigned long val)
+{
+ if (reg_num != 31)
+ vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}
/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a35ce7266aac..689d4c95e12f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,7 +25,6 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info {
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
};
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+ __INVALID_SYSREG__,
+ MPIDR_EL1, /* MultiProcessor Affinity Register */
+ CSSELR_EL1, /* Cache Size Selection Register */
+ SCTLR_EL1, /* System Control Register */
+ ACTLR_EL1, /* Auxiliary Control Register */
+ CPACR_EL1, /* Coprocessor Access Control */
+ TTBR0_EL1, /* Translation Table Base Register 0 */
+ TTBR1_EL1, /* Translation Table Base Register 1 */
+ TCR_EL1, /* Translation Control Register */
+ ESR_EL1, /* Exception Syndrome Register */
+ AFSR0_EL1, /* Auxilary Fault Status Register 0 */
+ AFSR1_EL1, /* Auxilary Fault Status Register 1 */
+ FAR_EL1, /* Fault Address Register */
+ MAIR_EL1, /* Memory Attribute Indirection Register */
+ VBAR_EL1, /* Vector Base Address Register */
+ CONTEXTIDR_EL1, /* Context ID Register */
+ TPIDR_EL0, /* Thread ID, User R/W */
+ TPIDRRO_EL0, /* Thread ID, User R/O */
+ TPIDR_EL1, /* Thread ID, Privileged */
+ AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
+ CNTKCTL_EL1, /* Timer Control Register (EL1) */
+ PAR_EL1, /* Physical Address Register */
+ MDSCR_EL1, /* Monitor Debug System Control Register */
+ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+ /* 32bit specific registers. Keep them at the end of the range */
+ DACR32_EL2, /* Domain Access Control Register */
+ IFSR32_EL2, /* Instruction Fault Status Register */
+ FPEXC32_EL2, /* Floating-Point Exception Control Register */
+ DBGVCR32_EL2, /* Debug Vector Catch Register */
+
+ NR_SYS_REGS /* Nothing after this line! */
+};
+
+/* 32bit mapping */
+#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
+#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
+#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
+#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
+#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
+#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
+#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
+#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
+#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
+#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
+#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
+
struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
@@ -197,6 +276,12 @@ struct kvm_vcpu_stat {
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_wakeup;
+ u32 hvc_exit_stat;
+ u64 wfe_exit_stat;
+ u64 wfi_exit_stat;
+ u64 mmio_exit_user;
+ u64 mmio_exit_kernel;
+ u64 exits;
};
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 889c908ee631..fe612a962576 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -19,7 +19,6 @@
#define __ARM64_KVM_MMIO_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
/*
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 61505676d085..0bf8b4320a91 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/cpufeature.h>
/*
* As we only have the TTBR0_EL2 register, we cannot express
@@ -158,7 +159,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
#endif
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
@@ -302,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+ int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+
+ return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e87898ba96..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
/*
* This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
new file mode 100644
index 000000000000..fd5f42886251
--- /dev/null
+++ b/arch/arm64/include/asm/paravirt.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_ARM64_PARAVIRT_H
+#define _ASM_ARM64_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+struct pv_time_ops {
+ unsigned long long (*steal_clock)(int cpu);
+};
+extern struct pv_time_ops pv_time_ops;
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return pv_time_ops.steal_clock(cpu);
+}
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d6739e836f7b..5c25b831273d 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -90,7 +90,23 @@
/*
* Contiguous page definitions.
*/
-#define CONT_PTES (_AC(1, UL) << CONT_SHIFT)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define CONT_PTE_SHIFT 5
+#define CONT_PMD_SHIFT 5
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define CONT_PTE_SHIFT 7
+#define CONT_PMD_SHIFT 5
+#else
+#define CONT_PTE_SHIFT 4
+#define CONT_PMD_SHIFT 4
+#endif
+
+#define CONT_PTES (1 << CONT_PTE_SHIFT)
+#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
+#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
+#define CONT_PMDS (1 << CONT_PMD_SHIFT)
+#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
+#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
/* the the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a9426b69..69d2e2f86bce 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
@@ -166,6 +167,16 @@ extern struct page *empty_zero_page;
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_young(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+
+/*
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+ * so that we don't erroneously return false for pages that have been
+ * remapped as PROT_NONE but are yet to be flushed from the TLB.
+ */
+#define pte_accessible(mm, pte) \
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -216,7 +227,8 @@ static inline pte_t pte_mkspecial(pte_t pte)
static inline pte_t pte_mkcont(pte_t pte)
{
- return set_pte_bit(pte, __pgprot(PTE_CONT));
+ pte = set_pte_bit(pte, __pgprot(PTE_CONT));
+ return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
}
static inline pte_t pte_mknoncont(pte_t pte)
@@ -224,6 +236,11 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
+static inline pmd_t pmd_mkcont(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
+}
+
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
@@ -275,10 +292,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
- if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
- pte_valid(*ptep)) {
- BUG_ON(!pte_young(pte));
- BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+ if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+ pte_valid(*ptep) && pte_valid(pte)) {
+ VM_WARN_ONCE(!pte_young(pte),
+ "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(*ptep), pte_val(pte));
+ VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
+ "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(*ptep), pte_val(pte));
}
set_pte(ptep, pte);
@@ -293,7 +314,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Hugetlb definitions.
*/
-#define HUGE_MAX_HSTATE 2
+#define HUGE_MAX_HSTATE 4
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -659,7 +680,8 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
+void pgd_cache_init(void);
+#define pgtable_cache_init pgd_cache_init
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
index 4df608a8459e..e368a55ebd22 100644
--- a/arch/arm64/include/asm/shmparam.h
+++ b/arch/arm64/include/asm/shmparam.h
@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
-#define COMPAT_SHMLBA 0x4000
+#define COMPAT_SHMLBA (4 * PAGE_SIZE)
#include <asm-generic/shmparam.h>
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index c85e96d174a5..fc9682bfe002 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,9 +26,28 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ unsigned int tmp;
+ arch_spinlock_t lockval;
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+ asm volatile(
+" sevl\n"
+"1: wfe\n"
+"2: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 1b\n"
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
+ /* LSE atomics */
+" nop\n"
+" nop\n")
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ :
+ : "memory");
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 7318f6d54aa9..801a16dbbdf6 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -16,14 +16,19 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
+struct task_struct;
+
struct stackframe {
unsigned long fp;
unsigned long sp;
unsigned long pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ unsigned int graph;
+#endif
};
-extern int unwind_frame(struct stackframe *frame);
-extern void walk_stackframe(struct stackframe *frame,
+extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
+extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d48ab5b41f52..4aeebec3d882 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,8 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <linux/stringify.h>
+
#include <asm/opcodes.h>
/*
@@ -208,6 +210,8 @@
#else
+#include <linux/types.h>
+
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
" .equ __reg_num_x\\num, \\num\n"
@@ -232,6 +236,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
val |= set;
asm volatile("msr sctlr_el1, %0" : : "r" (val));
}
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+#define write_sysreg(v, r) do { \
+ u64 __val = (u64)v; \
+ asm volatile("msr " __stringify(r) ", %0" \
+ : : "r" (__val)); \
+} while (0)
+
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 90c7ff233735..abd64bd1f6d9 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -73,10 +73,16 @@ register unsigned long current_stack_pointer asm ("sp");
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
+/*
+ * struct thread_info can be accessed directly via sp_el0.
+ */
static inline struct thread_info *current_thread_info(void)
{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct thread_info *)sp_el0;
}
#define thread_saved_pc(tsk) \