aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--Documentation/arm64/booting.rst10
-rw-r--r--Documentation/devicetree/bindings/perf/arm,cmn.yaml2
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arm64/Kconfig108
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/ftrace.h7
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/mte.h1
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/include/asm/stacktrace.h32
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S17
-rw-r--r--arch/arm64/kernel/ftrace.c17
-rw-r--r--arch/arm64/kernel/machine_kexec.c9
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c12
-rw-r--r--arch/arm64/kernel/mte.c30
-rw-r--r--arch/arm64/kernel/setup.c17
-rw-r--r--arch/arm64/kernel/stacktrace.c124
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S21
-rw-r--r--arch/arm64/lib/mte.S4
-rw-r--r--arch/arm64/mm/copypage.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c46
-rw-r--r--arch/arm64/mm/init.c71
-rw-r--r--arch/arm64/mm/trans_pgd.c2
-rw-r--r--arch/x86/kernel/ftrace.c17
-rw-r--r--drivers/of/fdt.c33
-rw-r--r--drivers/of/kexec.c9
-rw-r--r--drivers/perf/arm-cmn.c609
-rw-r--r--drivers/perf/arm_pmu_acpi.c4
-rw-r--r--drivers/perf/arm_spe_pmu.c3
-rw-r--r--drivers/perf/hisilicon/Makefile2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c409
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c18
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h1
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/uaccess.h22
-rw-r--r--kernel/crash_core.c3
-rw-r--r--kernel/trace/fgraph.c18
-rw-r--r--mm/gup.c29
48 files changed, 1398 insertions, 389 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 3f1cc5e317ed..1b543c3109f4 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -808,7 +808,7 @@
Documentation/admin-guide/kdump/kdump.rst for an example.
crashkernel=size[KMG],high
- [KNL, X86-64] range could be above 4G. Allow kernel
+ [KNL, X86-64, ARM64] range could be above 4G. Allow kernel
to allocate physical memory region from top, so could
be above 4G if system have more than 4G ram installed.
Otherwise memory region will be allocated below 4G, if
@@ -821,14 +821,20 @@
that require some amount of low memory, e.g. swiotlb
requires at least 64M+32K low memory, also enough extra
low memory is needed to make sure DMA buffers for 32-bit
- devices won't run out. Kernel would try to allocate at
+ devices won't run out. Kernel would try to allocate
at least 256M below 4G automatically.
- This one let user to specify own low range under 4G
+ This one lets the user specify own low range under 4G
for second kernel instead.
0: to disable low allocation.
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
+ [KNL, ARM64] range in low memory.
+ This one lets the user specify a low range in the
+ DMA zone for the crash dump kernel.
+ It will be ignored when crashkernel=X,high is not used
+ or memory reserved is located in the DMA zones.
+
cryptomgr.notests
[KNL] Disable crypto self-tests
diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst
index 29884b261aa9..8aefa1001ae5 100644
--- a/Documentation/arm64/booting.rst
+++ b/Documentation/arm64/booting.rst
@@ -350,6 +350,16 @@ Before jumping into the kernel, the following conditions must be met:
- SMCR_EL2.FA64 (bit 31) must be initialised to 0b1.
+ For CPUs with the Memory Tagging Extension feature (FEAT_MTE2):
+
+ - If EL3 is present:
+
+ - SCR_EL3.ATA (bit 26) must be initialised to 0b1.
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HCR_EL2.ATA (bit 56) must be initialised to 0b1.
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level. Where the values documented
diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
index 2d4219ec7eda..2e51072e794a 100644
--- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml
+++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
@@ -14,6 +14,8 @@ properties:
compatible:
enum:
- arm,cmn-600
+ - arm,cmn-650
+ - arm,cmn-700
- arm,ci-700
reg:
diff --git a/arch/Kconfig b/arch/Kconfig
index 29b0167c088b..b34032279926 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -24,6 +24,13 @@ config KEXEC_ELF
config HAVE_IMA_KEXEC
bool
+config ARCH_HAS_SUBPAGE_FAULTS
+ bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
+
config HOTPLUG_SMT
bool
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0897984918e8..1b3e961ffb00 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -253,31 +253,31 @@ config ARM64_CONT_PMD_SHIFT
default 4
config ARCH_MMAP_RND_BITS_MIN
- default 14 if ARM64_64K_PAGES
- default 16 if ARM64_16K_PAGES
- default 18
+ default 14 if ARM64_64K_PAGES
+ default 16 if ARM64_16K_PAGES
+ default 18
# max bits determined by the following formula:
# VA_BITS - PAGE_SHIFT - 3
config ARCH_MMAP_RND_BITS_MAX
- default 19 if ARM64_VA_BITS=36
- default 24 if ARM64_VA_BITS=39
- default 27 if ARM64_VA_BITS=42
- default 30 if ARM64_VA_BITS=47
- default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
- default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
- default 33 if ARM64_VA_BITS=48
- default 14 if ARM64_64K_PAGES
- default 16 if ARM64_16K_PAGES
- default 18
+ default 19 if ARM64_VA_BITS=36
+ default 24 if ARM64_VA_BITS=39
+ default 27 if ARM64_VA_BITS=42
+ default 30 if ARM64_VA_BITS=47
+ default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
+ default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
+ default 33 if ARM64_VA_BITS=48
+ default 14 if ARM64_64K_PAGES
+ default 16 if ARM64_16K_PAGES
+ default 18
config ARCH_MMAP_RND_COMPAT_BITS_MIN
- default 7 if ARM64_64K_PAGES
- default 9 if ARM64_16K_PAGES
- default 11
+ default 7 if ARM64_64K_PAGES
+ default 9 if ARM64_16K_PAGES
+ default 11
config ARCH_MMAP_RND_COMPAT_BITS_MAX
- default 16
+ default 16
config NO_IOPORT_MAP
def_bool y if !PCI
@@ -304,7 +304,7 @@ config GENERIC_HWEIGHT
def_bool y
config GENERIC_CSUM
- def_bool y
+ def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -1037,8 +1037,7 @@ config SOCIONEXT_SYNQUACER_PREITS
If unsure, say Y.
-endmenu
-
+endmenu # "ARM errata workarounds via the alternatives framework"
choice
prompt "Page size"
@@ -1566,9 +1565,9 @@ config SETEND_EMULATION
be unexpected results in the applications.
If unsure, say Y
-endif
+endif # ARMV8_DEPRECATED
-endif
+endif # COMPAT
menu "ARMv8.1 architectural features"
@@ -1593,15 +1592,15 @@ config ARM64_PAN
bool "Enable support for Privileged Access Never (PAN)"
default y
help
- Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
- prevents the kernel or hypervisor from accessing user-space (EL0)
- memory directly.
+ Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+ prevents the kernel or hypervisor from accessing user-space (EL0)
+ memory directly.
- Choosing this option will cause any unprotected (not using
- copy_to_user et al) memory access to fail with a permission fault.
+ Choosing this option will cause any unprotected (not using
+ copy_to_user et al) memory access to fail with a permission fault.
- The feature is detected at runtime, and will remain as a 'nop'
- instruction if the cpu does not implement the feature.
+ The feature is detected at runtime, and will remain as a 'nop'
+ instruction if the cpu does not implement the feature.
config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc)
@@ -1629,15 +1628,15 @@ config ARM64_USE_LSE_ATOMICS
built with binutils >= 2.25 in order for the new instructions
to be used.
-endmenu
+endmenu # "ARMv8.1 architectural features"
menu "ARMv8.2 architectural features"
config AS_HAS_ARMV8_2
- def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a)
+ def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a)
config AS_HAS_SHA3
- def_bool $(as-instr,.arch armv8.2-a+sha3)
+ def_bool $(as-instr,.arch armv8.2-a+sha3)
config ARM64_PMEM
bool "Enable support for persistent memory"
@@ -1681,7 +1680,7 @@ config ARM64_CNP
at runtime, and does not affect PEs that do not implement
this feature.
-endmenu
+endmenu # "ARMv8.2 architectural features"
menu "ARMv8.3 architectural features"
@@ -1744,7 +1743,7 @@ config AS_HAS_PAC
config AS_HAS_CFI_NEGATE_RA_STATE
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
-endmenu
+endmenu # "ARMv8.3 architectural features"
menu "ARMv8.4 architectural features"
@@ -1785,7 +1784,7 @@ config ARM64_TLB_RANGE
The feature introduces new assembly instructions, and they were
support when binutils >= 2.30.
-endmenu
+endmenu # "ARMv8.4 architectural features"
menu "ARMv8.5 architectural features"
@@ -1871,6 +1870,7 @@ config ARM64_MTE
depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
+ select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
@@ -1892,7 +1892,7 @@ config ARM64_MTE
Documentation/arm64/memory-tagging-extension.rst.
-endmenu
+endmenu # "ARMv8.5 architectural features"
menu "ARMv8.7 architectural features"
@@ -1901,12 +1901,12 @@ config ARM64_EPAN
default y
depends on ARM64_PAN
help
- Enhanced Privileged Access Never (EPAN) allows Privileged
- Access Never to be used with Execute-only mappings.
+ Enhanced Privileged Access Never (EPAN) allows Privileged
+ Access Never to be used with Execute-only mappings.
- The feature is detected at runtime, and will remain disabled
- if the cpu does not implement the feature.
-endmenu
+ The feature is detected at runtime, and will remain disabled
+ if the cpu does not implement the feature.
+endmenu # "ARMv8.7 architectural features"
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
@@ -1993,7 +1993,7 @@ config ARM64_DEBUG_PRIORITY_MASKING
the validity of ICC_PMR_EL1 when calling concerned functions.
If unsure, say N
-endif
+endif # ARM64_PSEUDO_NMI
config RELOCATABLE
bool "Build a relocatable kernel image" if EXPERT
@@ -2052,7 +2052,19 @@ config STACKPROTECTOR_PER_TASK
def_bool y
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG
-endmenu
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
+config ARCH_NR_GPIO
+ int
+ default 2048 if ARCH_APPLE
+ default 0
+ help
+ Maximum number of GPIOs in the system.
+
+ If unsure, leave the default value.
+
+endmenu # "Kernel Features"
menu "Boot options"
@@ -2116,7 +2128,7 @@ config EFI
help
This option provides support for runtime services provided
by UEFI firmware (such as non-volatile variables, realtime
- clock, and platform reset). A UEFI stub is also provided to
+ clock, and platform reset). A UEFI stub is also provided to
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
@@ -2131,7 +2143,7 @@ config DMI
However, even with this option, the resultant kernel should
continue to boot on existing non-UEFI platforms.
-endmenu
+endmenu # "Boot options"
config SYSVIPC_COMPAT
def_bool y
@@ -2152,7 +2164,7 @@ config ARCH_HIBERNATION_HEADER
config ARCH_SUSPEND_POSSIBLE
def_bool y
-endmenu
+endmenu # "Power management options"
menu "CPU Power Management"
@@ -2160,7 +2172,7 @@ source "drivers/cpuidle/Kconfig"
source "drivers/cpufreq/Kconfig"
-endmenu
+endmenu # "CPU Power Management"
source "drivers/acpi/Kconfig"
@@ -2168,4 +2180,4 @@ source "arch/arm64/kvm/Kconfig"
if CRYPTO
source "arch/arm64/crypto/Kconfig"
-endif
+endif # CRYPTO
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 30b123cde02c..de9a18d3026f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -325,4 +325,4 @@ config ARCH_ZYNQMP
help
This enables support for Xilinx ZynqMP Family
-endmenu
+endmenu # "Platform selection"
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ff8f4511df71..92331c07c2d1 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -36,7 +36,7 @@
#define MIDR_VARIANT(midr) \
(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
#define MIDR_IMPLEMENTOR_SHIFT 24
-#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1494cfa8639b..dbc45a4157fa 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -80,8 +80,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct dyn_ftrace;
+struct ftrace_ops;
+struct ftrace_regs;
+
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
+
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
#endif
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1242f71937f8..d656822b13f1 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -44,6 +44,8 @@ extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
#define __HAVE_ARCH_HUGE_PTE_CLEAR
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
+#define __HAVE_ARCH_HUGE_PTEP_GET
+extern pte_t huge_ptep_get(pte_t *ptep);
extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz);
#define set_huge_swap_pte_at set_huge_swap_pte_at
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index adcb937342f1..aa523591a44e 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -47,6 +47,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
+size_t mte_probe_user_range(const char __user *uaddr, size_t size);
#else /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 66671ff05183..dd3d12bce07b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -49,7 +49,7 @@
#define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD PTRS_PER_PTE
+#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3))
#endif
/*
@@ -59,7 +59,7 @@
#define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PTRS_PER_PUD PTRS_PER_PTE
+#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3))
#endif
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 94e147e5456c..9c0a9bfd6b07 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1001,7 +1001,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
*/
static inline bool arch_faults_on_old_pte(void)
{
- WARN_ON(preemptible());
+ /* The register read below requires a stable CPU to make any sense */
+ cant_migrate();
return !cpu_has_hw_af();
}
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index e77cdef9ca29..aec9315bf156 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -31,38 +31,6 @@ struct stack_info {
enum stack_type type;
};
-/*
- * A snapshot of a frame record or fp/lr register values, along with some
- * accounting information necessary for robust unwinding.
- *
- * @fp: The fp value in the frame record (or the real fp)
- * @pc: The lr value in the frame record (or the real lr)
- *
- * @stacks_done: Stacks which have been entirely unwound, for which it is no
- * longer valid to unwind to.
- *
- * @prev_fp: The fp that pointed to this frame record, or a synthetic value
- * of 0. This is used to ensure that within a stack, each
- * subsequent frame record is at an increasing address.
- * @prev_type: The type of stack this frame record was on, or a synthetic
- * value of STACK_TYPE_UNKNOWN. This is used to detect a
- * transition from one stack to another.
- *
- * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
- * associated with the most recently encountered replacement lr
- * value.
- */
-struct stackframe {
- unsigned long fp;
- unsigned long pc;
- DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
- unsigned long prev_fp;
- enum stack_type prev_type;
-#ifdef CONFIG_KRETPROBES
- struct llist_node *kr_cur;
-#endif
-};
-
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e8dce0cc5eaa..63f9c828f1a7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -460,4 +460,19 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src,
}
#endif
+#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+static inline size_t probe_subpage_writeable(const char __user *uaddr,
+ size_t size)
+{
+ if (!system_supports_mte())
+ return 0;
+ return mte_probe_user_range(uaddr, size);
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 4c9b5b4b7a0b..49f4863c6c56 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -215,7 +215,7 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
-const struct midr_range cavium_erratum_23154_cpus[] = {
+static const struct midr_range cavium_erratum_23154_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_THUNDERX),
MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e535480a4069..d42a205ef625 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -97,12 +97,6 @@ SYM_CODE_START(ftrace_common)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
- nop // If enabled, this will be replaced
- // "b ftrace_graph_caller"
-#endif
-
/*
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need
@@ -127,17 +121,6 @@ ftrace_common_return:
ret x9
SYM_CODE_END(ftrace_common)
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_CODE_START(ftrace_graph_caller)
- ldr x0, [sp, #S_PC]
- sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
- add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
- bl prepare_ftrace_return
- b ftrace_common_return
-SYM_CODE_END(ftrace_graph_caller)
-#endif
-
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
/*
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 4506c4a90ac1..f447c4a36f69 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -268,6 +268,22 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ /*
+ * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
+ * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
+ * in which we can safely modify the LR.
+ */
+ struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+ unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
+
+ prepare_ftrace_return(ip, parent, frame_pointer(regs));
+}
+#else
/*
* Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
* depending on @enable.
@@ -297,5 +313,6 @@ int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index e16b248699d5..19c2d487cb08 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -329,8 +329,13 @@ bool crash_is_nosave(unsigned long pfn)
/* in reserved memory? */
addr = __pfn_to_phys(pfn);
- if ((addr < crashk_res.start) || (crashk_res.end < addr))
- return false;
+ if ((addr < crashk_res.start) || (crashk_res.end < addr)) {
+ if (!crashk_low_res.end)
+ return false;
+
+ if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr))
+ return false;
+ }
if (!kexec_crash_image)
return true;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 59c648d51848..889951291cc0 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -65,10 +65,18 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ if (crashk_low_res.end) {
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret)
+ goto out;
+ }
- if (!ret)
- ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+out:
kfree(cmem);
return ret;
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 78b3e0f8e997..35697a09926f 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -15,6 +15,7 @@
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/barrier.h>
@@ -543,3 +544,32 @@ static int register_mte_tcf_preferred_sysctl(void)
return 0;
}
subsys_initcall(register_mte_tcf_preferred_sysctl);
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+size_t mte_probe_user_range(const char __user *uaddr, size_t size)
+{
+ const char __user *end = uaddr + size;
+ int err = 0;
+ char val;
+
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return size;
+
+ uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
+ while (uaddr < end) {
+ /*
+ * A read is sufficient for mte, the caller should have probed
+ * for the pte write permission if required.
+ */
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return end - uaddr;
+ uaddr += MTE_GRANULE_SIZE;
+ }
+ (void)val;
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3505789cf4bd..fea3223704b6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -225,6 +225,8 @@ static void __init request_standard_resources(void)
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
+ insert_resource(&iomem_resource, &kernel_code);
+ insert_resource(&iomem_resource, &kernel_data);
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
@@ -246,20 +248,7 @@ static void __init request_standard_resources(void)
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
}
- request_resource(&iomem_resource, res);
-
- if (kernel_code.start >= res->start &&
- kernel_code.end <= res->end)
- request_resource(res, &kernel_code);
- if (kernel_data.start >= res->start &&
- kernel_data.end <= res->end)
- request_resource(res, &kernel_data);
-#ifdef CONFIG_KEXEC_CORE
- /* Userspace will find "Crash kernel" region in /proc/iomem. */
- if (crashk_res.end && crashk_res.start >= res->start &&
- crashk_res.end <= res->end)
- request_resource(res, &crashk_res);
-#endif
+ insert_resource(&iomem_resource, res);
}
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index e4103e085681..0467cb79f080 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -19,43 +19,60 @@
#include <asm/stacktrace.h>
/*
- * AArch64 PCS assigns the frame pointer to x29.
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
*
- * A simple function prologue looks like this:
- * sub sp, sp, #0x10
- * stp x29, x30, [sp]
- * mov x29, sp
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The lr value in the frame record (or the real lr)
*
- * A simple function epilogue looks like this:
- * mov sp, x29
- * ldp x29, x30, [sp]
- * add sp, sp, #0x10
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
*/
+struct unwind_state {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
+};
-
-static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
+ unsigned long pc)
{
- frame->fp = fp;
- frame->pc = pc;
+ state->fp = fp;
+ state->pc = pc;
#ifdef CONFIG_KRETPROBES
- frame->kr_cur = NULL;
+ state->kr_cur = NULL;
#endif
/*
* Prime the first unwind.
*
- * In unwind_frame() we'll check that the FP points to a valid stack,
+ * In unwind_next() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
- bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
- frame->prev_fp = 0;
- frame->prev_type = STACK_TYPE_UNKNOWN;
+ bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
+ state->prev_fp = 0;
+ state->prev_type = STACK_TYPE_UNKNOWN;
}
-NOKPROBE_SYMBOL(start_backtrace);
+NOKPROBE_SYMBOL(unwind_init);
/*
* Unwind from one frame record (A) to the next frame record (B).
@@ -64,15 +81,12 @@ NOKPROBE_SYMBOL(start_backtrace);
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_frame(struct task_struct *tsk,
- struct stackframe *frame)
+static int notrace unwind_next(struct task_struct *tsk,
+ struct unwind_state *state)
{
- unsigned long fp = frame->fp;
+ unsigned long fp = state->fp;
struct stack_info info;
- if (!tsk)
- tsk = current;
-
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
@@ -83,7 +97,7 @@ static int notrace unwind_frame(struct task_struct *tsk,
if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
- if (test_bit(info.type, frame->stacks_done))
+ if (test_bit(info.type, state->stacks_done))
return -EINVAL;
/*
@@ -99,27 +113,27 @@ static int notrace unwind_frame(struct task_struct *tsk,
* stack to another, it's never valid to unwind back to that first
* stack.
*/
- if (info.type == frame->prev_type) {
- if (fp <= frame->prev_fp)
+ if (info.type == state->prev_type) {
+ if (fp <= state->prev_fp)
return -EINVAL;
} else {
- set_bit(frame->prev_type, frame->stacks_done);
+ set_bit(state->prev_type, state->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
- * prev_type are only meaningful to the next unwind_frame() invocation.
+ * prev_type are only meaningful to the next unwind_next() invocation.
*/
- frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
- frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
- frame->prev_fp = fp;
- frame->prev_type = info.type;
+ state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ state->prev_fp = fp;
+ state->prev_type = info.type;
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
+ state->pc = ptrauth_strip_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
+ (state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
@@ -127,37 +141,37 @@ static int notrace unwind_frame(struct task_struct *tsk,
* to hook a function return.
* So replace it to an original value.
*/
- orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
- (void *)frame->fp);
- if (WARN_ON_ONCE(frame->pc == orig_pc))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
+ (void *)state->fp);
+ if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
- frame->pc = orig_pc;
+ state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
- if (is_kretprobe_trampoline(frame->pc))
- frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
+ if (is_kretprobe_trampoline(state->pc))
+ state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0;
}
-NOKPROBE_SYMBOL(unwind_frame);
+NOKPROBE_SYMBOL(unwind_next);
-static void notrace walk_stackframe(struct task_struct *tsk,
- struct stackframe *frame,
- bool (*fn)(void *, unsigned long), void *data)
+static void notrace unwind(struct task_struct *tsk,
+ struct unwind_state *state,
+ stack_trace_consume_fn consume_entry, void *cookie)
{
while (1) {
int ret;
- if (!fn(data, frame->pc))
+ if (!consume_entry(cookie, state->pc))
break;
- ret = unwind_frame(tsk, frame);
+ ret = unwind_next(tsk, state);
if (ret < 0)
break;
}
}
-NOKPROBE_SYMBOL(walk_stackframe);
+NOKPROBE_SYMBOL(unwind);
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
@@ -196,17 +210,17 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
- struct stackframe frame;
+ struct unwind_state state;
if (regs)
- start_backtrace(&frame, regs->regs[29], regs->pc);
+ unwind_init(&state, regs->regs[29], regs->pc);
else if (task == current)
- start_backtrace(&frame,
+ unwind_init(&state,
(unsigned long)__builtin_frame_address(1),
(unsigned long)__builtin_return_address(0));
else
- start_backtrace(&frame, thread_saved_fp(task),
+ unwind_init(&state, thread_saved_fp(task),
thread_saved_pc(task));
- walk_stackframe(task, &frame, consume_entry, cookie);
+ unwind(task, &state, consume_entry, cookie);
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index edaf0faf766f..2d4a8f995175 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -93,7 +93,6 @@ jiffies = jiffies_64;
#ifdef CONFIG_HIBERNATION
#define HIBERNATE_TEXT \
- . = ALIGN(SZ_4K); \
__hibernate_exit_text_start = .; \
*(.hibernate_exit.text) \
__hibernate_exit_text_end = .;
@@ -103,7 +102,6 @@ jiffies = jiffies_64;
#ifdef CONFIG_KEXEC_CORE
#define KEXEC_TEXT \
- . = ALIGN(SZ_4K); \
__relocate_new_kernel_start = .; \
*(.kexec_relocate.text) \
__relocate_new_kernel_end = .;
@@ -170,9 +168,6 @@ SECTIONS
KPROBES_TEXT
HYPERVISOR_TEXT
IDMAP_TEXT
- HIBERNATE_TEXT
- KEXEC_TEXT
- TRAMP_TEXT
*(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
@@ -194,6 +189,14 @@ SECTIONS
HYPERVISOR_DATA_SECTIONS
+ /* code sections that are never executed via the kernel mapping */
+ .rodata.text : {
+ TRAMP_TEXT
+ HIBERNATE_TEXT
+ KEXEC_TEXT
+ . = ALIGN(PAGE_SIZE);
+ }
+
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
idmap_pg_end = .;
@@ -337,8 +340,8 @@ ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
#ifdef CONFIG_HIBERNATION
-ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
- <= SZ_4K, "Hibernate exit text too big or misaligned")
+ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K,
+ "Hibernate exit text is bigger than 4 KiB")
#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
@@ -362,7 +365,7 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
#ifdef CONFIG_KEXEC_CORE
/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
-ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
- <= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K,
+ "kexec relocation code is bigger than 4 KiB")
ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
#endif
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 8590af3c98c0..eeb9e45bcce8 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -93,7 +93,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
mov x3, x1
cbz x2, 2f
1:
- user_ldst 2f, ldtrb, w4, x1, 0
+USER(2f, ldtrb w4, [x1])
lsl x4, x4, #MTE_TAG_SHIFT
stg x4, [x0], #MTE_GRANULE_SIZE
add x1, x1, #1
@@ -120,7 +120,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
1:
ldg x4, [x1]
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
- user_ldst 2f, sttrb, w4, x0, 0
+USER(2f, sttrb w4, [x0])
add x0, x0, #1
add x1, x1, #MTE_GRANULE_SIZE
subs x2, x2, #1
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index b5447e53cd73..0dea80bf6de4 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -16,8 +16,8 @@
void copy_highpage(struct page *to, struct page *from)
{
- struct page *kto = page_address(to);
- struct page *kfrom = page_address(from);
+ void *kto = page_address(to);
+ void *kfrom = page_address(from);
copy_page(kto, kfrom);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index cbace1c9e137..64bb078e2e7b 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -158,6 +158,28 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
return contig_ptes;
}
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ int ncontig, i;
+ size_t pgsize;
+ pte_t orig_pte = ptep_get(ptep);
+
+ if (!pte_present(orig_pte) || !pte_cont(orig_pte))
+ return orig_pte;
+
+ ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
+ for (i = 0; i < ncontig; i++, ptep++) {
+ pte_t pte = ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+ return orig_pte;
+}
+
/*
* Changing some bits of contiguous entries requires us to follow a
* Break-Before-Make approach, breaking the whole contiguous set
@@ -166,15 +188,14 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*
* This helper performs the break step.
*/
-static pte_t get_clear_flush(struct mm_struct *mm,
+static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = huge_ptep_get(ptep);
- bool valid = pte_valid(orig_pte);
- unsigned long i, saddr = addr;
+ pte_t orig_pte = ptep_get(ptep);
+ unsigned long i;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@@ -190,11 +211,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
-
- if (valid) {
- struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
- flush_tlb_range(&vma, saddr, addr);
- }
return orig_pte;
}
@@ -385,14 +401,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = huge_ptep_get(ptep);
+ pte_t orig_pte = ptep_get(ptep);
if (!pte_cont(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
/*
@@ -408,11 +424,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
- if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ if (pte_write(pte) != pte_write(ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
- pte_t orig_pte = huge_ptep_get(ptep + i);
+ pte_t orig_pte = ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
@@ -443,7 +459,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
- orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
@@ -476,7 +492,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
- pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
pte = pte_wrprotect(pte);
hugeprot = pte_pgprot(pte);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 1e7b1550e2fc..a1410143ea62 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -90,6 +90,32 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+ insert_resource(&iomem_resource, &crashk_low_res);
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -100,17 +126,35 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
- unsigned long long crash_max = arm64_dma_phys_limit;
+ unsigned long long crash_low_size = 0;
+ unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ char *cmdline = boot_command_line;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ /* crashkernel=X[@offset] */
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low can be specified or not, but invalid value
+ * is not allowed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret && (ret != -ENOENT))
+ return;
+
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ } else if (ret || !crash_size) {
+ /* The specified value is invalid */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
@@ -118,8 +162,7 @@ static void __init reserve_crashkernel(void)
if (crash_base)
crash_max = crash_base + crash_size;
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
if (!crash_base) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -127,6 +170,12 @@ static void __init reserve_crashkernel(void)
return;
}
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
+ }
+
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
@@ -135,8 +184,12 @@ static void __init reserve_crashkernel(void)
* map. Inform kmemleak so that it won't try to access it.
*/
kmemleak_ignore_phys(crash_base);
+ if (crashk_low_res.end)
+ kmemleak_ignore_phys(crashk_low_res.start);
+
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
}
/*
@@ -157,7 +210,7 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
+static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
@@ -176,7 +229,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
- max_zone_pfns[ZONE_NORMAL] = max;
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init(max_zone_pfns);
}
@@ -374,7 +427,7 @@ void __init bootmem_init(void)
* done after the fixed reservations
*/
sparse_init();
- zone_sizes_init(min, max);
+ zone_sizes_init();
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index d7da8ca40d2e..4ea2eefbc053 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -238,7 +238,7 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
int this_level, index, level_lsb, level_msb;
dst_addr &= PAGE_MASK;
- prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
for (this_level = 3; this_level >= 0; this_level--) {
levels[this_level] = trans_alloc(info);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1e31c7d21597..b09d73c2ba89 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -579,9 +579,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
extern void ftrace_graph_call(void);
static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
@@ -610,18 +608,7 @@ int ftrace_disable_ftrace_graph_caller(void)
return ftrace_mod_jmp(ip, &ftrace_stub);
}
-#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
-int ftrace_enable_ftrace_graph_caller(void)
-{
- return 0;
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
-#endif /* !CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
/*
* Hook the return address and push it in the stack of return addrs
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ec315b060cd5..2f248d0acc04 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -973,16 +973,24 @@ static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
+/*
+ * The main usage of linux,usable-memory-range is for crash dump kernel.
+ * Originally, the number of usable-memory regions is one. Now there may
+ * be two regions, low region and high region.
+ * To make compatibility with existing user-space and older kdump, the low
+ * region is always the last range of linux,usable-memory-range if exist.
+ */
+#define MAX_USABLE_RANGES 2
+
/**
* early_init_dt_check_for_usable_mem_range - Decode usable memory range
* location from flat tree
*/
void __init early_init_dt_check_for_usable_mem_range(void)
{
- const __be32 *prop;
- int len;
- phys_addr_t cap_mem_addr;
- phys_addr_t cap_mem_size;
+ struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
+ const __be32 *prop, *endp;
+ int len, i;
unsigned long node = chosen_node_offset;
if ((long)node < 0)
@@ -991,16 +999,21 @@ void __init early_init_dt_check_for_usable_mem_range(void)
pr_debug("Looking for usable-memory-range property... ");
prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
- if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
+ if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
return;
- cap_mem_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
- cap_mem_size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ endp = prop + (len / sizeof(__be32));
+ for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
+ rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
- pr_debug("cap_mem_start=%pa cap_mem_size=%pa\n", &cap_mem_addr,
- &cap_mem_size);
+ pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
+ i, &rgn[i].base, &rgn[i].size);
+ }
- memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
+ memblock_cap_memory_range(rgn[0].base, rgn[0].size);
+ for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
+ memblock_add(rgn[i].base, rgn[i].size);
}
#ifdef CONFIG_SERIAL_EARLYCON
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index b9bd1cff1793..8d374cc552be 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -386,6 +386,15 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
crashk_res.end - crashk_res.start + 1);
if (ret)
goto out;
+
+ if (crashk_low_res.end) {
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
+ "linux,usable-memory-range",
+ crashk_low_res.start,
+ crashk_low_res.end - crashk_low_res.start + 1);
+ if (ret)
+ goto out;
+ }
}
/* add bootargs */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 9c1d82be7a2f..80d8309652a4 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -39,7 +39,7 @@
#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
-#define CMN_MAX_DIMENSION 8
+#define CMN_MAX_DIMENSION 12
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
@@ -52,6 +52,10 @@
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+#define CMN_CFGM_INFO_GLOBAL_1 0x908
+#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
+#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
+
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO_P0 0x0008
#define CMN_MXP__CONNECT_INFO_P1 0x0010
@@ -59,18 +63,26 @@
#define CMN_MXP__CONNECT_INFO_P3 0x0030
#define CMN_MXP__CONNECT_INFO_P4 0x0038
#define CMN_MXP__CONNECT_INFO_P5 0x0040
+#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
-#define CMN_PMU_EVENTn_ID_SHIFT(n) ((n) * 8)
+#define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42)
+#define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35)
+/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
+#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
+
+/* HN-Ps are weird... */
+#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
-#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
+#define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19)
+#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17)
#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
@@ -143,8 +155,8 @@
/* Event attributes */
#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0)
-#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16)
-#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24)
+#define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16)
+#define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27)
#define CMN_CONFIG_BYNODEID BIT_ULL(31)
#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32)
@@ -177,9 +189,14 @@
enum cmn_model {
- CMN_ANY = -1,
CMN600 = 1,
- CI700 = 2,
+ CMN650 = 2,
+ CMN700 = 4,
+ CI700 = 8,
+ /* ...and then we can use bitmap tricks for commonality */
+ CMN_ANY = -1,
+ NOT_CMN600 = -2,
+ CMN_650ON = CMN650 | CMN700,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
@@ -191,6 +208,14 @@ enum cmn_revision {
CMN600_R2P0,
CMN600_R3P0,
CMN600_R3P1,
+ CMN650_R0P0 = 0,
+ CMN650_R1P0,
+ CMN650_R1P1,
+ CMN650_R2P0,
+ CMN650_R1P2,
+ CMN700_R0P0 = 0,
+ CMN700_R1P0,
+ CMN700_R2P0,
CI700_R0P0 = 0,
CI700_R1P0,
CI700_R2P0,
@@ -211,13 +236,26 @@ enum cmn_node_type {
CMN_TYPE_RND = 0xd,
CMN_TYPE_RNSAM = 0xf,
CMN_TYPE_MTSX,
+ CMN_TYPE_HNP,
CMN_TYPE_CXRA = 0x100,
- CMN_TYPE_CXHA = 0x101,
- CMN_TYPE_CXLA = 0x102,
+ CMN_TYPE_CXHA,
+ CMN_TYPE_CXLA,
+ CMN_TYPE_CCRA,
+ CMN_TYPE_CCHA,
+ CMN_TYPE_CCLA,
+ CMN_TYPE_CCLA_RNI,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
+enum cmn_filter_select {
+ SEL_NONE = -1,
+ SEL_OCCUP1ID,
+ SEL_CLASS_OCCUP_ID,
+ SEL_CBUSY_SNTHROTTLE_SEL,
+ SEL_MAX
+};
+
struct arm_cmn_node {
void __iomem *pmu_base;
u16 id, logid;
@@ -227,15 +265,17 @@ struct arm_cmn_node {
union {
/* DN/HN-F/CXHA */
struct {
- u8 occupid_val;
- u8 occupid_count;
- };
+ u8 val : 4;
+ u8 count : 4;
+ } occupid[SEL_MAX];
/* XP */
u8 dtc;
};
union {
u8 event[4];
__le32 event_sel;
+ u16 event_w[4];
+ __le64 event_sel_w;
};
};
@@ -278,6 +318,8 @@ struct arm_cmn {
struct {
unsigned int rsp_vc_num : 2;
unsigned int dat_vc_num : 2;
+ unsigned int snp_vc_num : 2;
+ unsigned int req_vc_num : 2;
};
struct arm_cmn_node *xps;
@@ -307,9 +349,7 @@ struct arm_cmn_nodeid {
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
- int dim = max(cmn->mesh_x, cmn->mesh_y);
-
- return dim > 4 ? 3 : 2;
+ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
}
static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
@@ -361,7 +401,8 @@ static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
{
- switch(type) {
+ switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
+ case 0x00: return " |";
case 0x01: return " RN-I |";
case 0x02: return " RN-D |";
case 0x04: return " RN-F_B |";
@@ -371,6 +412,7 @@ static const char *arm_cmn_device_type(u8 type)
case 0x08: return " HN-T |";
case 0x09: return " HN-I |";
case 0x0a: return " HN-D |";
+ case 0x0b: return " HN-P |";
case 0x0c: return " SN-F |";
case 0x0d: return " SBSX |";
case 0x0e: return " HN-F |";
@@ -383,8 +425,12 @@ static const char *arm_cmn_device_type(u8 type)
case 0x15: return "RN-F_D_E|";
case 0x16: return " RN-F_C |";
case 0x17: return "RN-F_C_E|";
+ case 0x18: return " RN-F_E |";
+ case 0x19: return "RN-F_E_E|";
case 0x1c: return " MTSX |";
- default: return " |";
+ case 0x1d: return " HN-V |";
+ case 0x1e: return " CCG |";
+ default: return " ???? |";
}
}
@@ -492,11 +538,13 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
- u64 dtm_idx[2];
+ u64 dtm_idx[4];
unsigned int dtc_idx;
u8 dtcs_used;
u8 num_dns;
u8 dtm_offset;
+ bool wide_sel;
+ enum cmn_filter_select filter_sel;
};
#define for_each_hw_dn(hw, dn, i) \
@@ -522,7 +570,8 @@ struct arm_cmn_event_attr {
struct device_attribute attr;
enum cmn_model model;
enum cmn_node_type type;
- u8 eventid;
+ enum cmn_filter_select fsel;
+ u16 eventid;
u8 occupid;
};
@@ -532,23 +581,17 @@ struct arm_cmn_format_attr {
int config;
};
-#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \
+#define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
(&((struct arm_cmn_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
.model = _model, \
.type = _type, \
.eventid = _eventid, \
.occupid = _occupid, \
+ .fsel = _fsel, \
}})[0].attr.attr)
-
-static bool arm_cmn_is_occup_event(enum cmn_model model,
- enum cmn_node_type type, unsigned int id)
-{
- if (type == CMN_TYPE_DVM)
- return (model == CMN600 && id == 0x05) ||
- (model == CI700 && id == 0x0c);
- return type == CMN_TYPE_HNF && id == 0x0f;
-}
+#define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \
+ _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
static ssize_t arm_cmn_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -565,7 +608,7 @@ static ssize_t arm_cmn_event_show(struct device *dev,
"type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
eattr->type, eattr->eventid);
- if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid))
+ if (eattr->fsel > SEL_NONE)
return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
eattr->type, eattr->eventid, eattr->occupid);
@@ -580,20 +623,25 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
struct arm_cmn_event_attr *eattr;
+ enum cmn_node_type type;
+ u16 eventid;
eattr = container_of(attr, typeof(*eattr), attr.attr);
if (!(eattr->model & cmn->model))
return 0;
+ type = eattr->type;
+ eventid = eattr->eventid;
+
/* Watchpoints aren't nodes, so avoid confusion */
- if (eattr->type == CMN_TYPE_WP)
+ if (type == CMN_TYPE_WP)
return attr->mode;
/* Hide XP events for unused interfaces/channels */
- if (eattr->type == CMN_TYPE_XP) {
- unsigned int intf = (eattr->eventid >> 2) & 7;
- unsigned int chan = eattr->eventid >> 5;
+ if (type == CMN_TYPE_XP) {
+ unsigned int intf = (eventid >> 2) & 7;
+ unsigned int chan = eventid >> 5;
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
@@ -602,43 +650,107 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
return 0;
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
- (chan == 6 && cmn->dat_vc_num < 2))
+ (chan == 6 && cmn->dat_vc_num < 2) ||
+ (chan == 7 && cmn->snp_vc_num < 2) ||
+ (chan == 8 && cmn->req_vc_num < 2))
return 0;
}
/* Revision-specific differences */
- if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) {
- if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
- return 0;
+ if (cmn->model == CMN600) {
+ if (cmn->rev < CMN600_R1P3) {
+ if (type == CMN_TYPE_CXRA && eventid > 0x10)
+ return 0;
+ }
+ if (cmn->rev < CMN600_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid == 0x1b)
+ return 0;
+ if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
+ return 0;
+ }
+ } else if (cmn->model == CMN650) {
+ if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid > 0x22)
+ return 0;
+ if (type == CMN_TYPE_SBSX && eventid == 0x17)
+ return 0;
+ if (type == CMN_TYPE_RNI && eventid > 0x10)
+ return 0;
+ }
+ } else if (cmn->model == CMN700) {
+ if (cmn->rev < CMN700_R2P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2c)
+ return 0;
+ if (type == CMN_TYPE_CCHA && eventid > 0x74)
+ return 0;
+ if (type == CMN_TYPE_CCLA && eventid > 0x27)
+ return 0;
+ }
+ if (cmn->rev < CMN700_R1P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2b)
+ return 0;
+ }
}
- if (!arm_cmn_node(cmn, eattr->type))
+ if (!arm_cmn_node(cmn, type))
return 0;
return attr->mode;
}
-#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \
- CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup)
+#define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \
+ _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
#define CMN_EVENT_DTC(_name) \
- CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0)
-#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \
- CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup)
+ CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
+#define _CMN_EVENT_HNF(_model, _name, _event, _occup, _fsel) \
+ _CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup, _fsel)
#define CMN_EVENT_HNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
+#define CMN_EVENT_HNP(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
#define __CMN_EVENT_XP(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
#define CMN_EVENT_SBSX(_model, _name, _event) \
- CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
+ CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
#define CMN_EVENT_RNID(_model, _name, _event) \
- CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0)
+ CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
#define CMN_EVENT_MTSX(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
+#define CMN_EVENT_CXRA(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
+#define CMN_EVENT_CXHA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
+#define CMN_EVENT_CCRA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
+#define CMN_EVENT_CCHA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
+#define CMN_EVENT_CCLA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
+#define CMN_EVENT_CCLA_RNI(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_DVM(_model, _name, _event) \
- _CMN_EVENT_DVM(_model, _name, _event, 0)
+ _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
+#define CMN_EVENT_DVM_OCC(_model, _name, _event) \
+ _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \
+ _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
+ _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
#define CMN_EVENT_HNF(_model, _name, _event) \
- _CMN_EVENT_HNF(_model, _name, _event, 0)
+ _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE)
+#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
+ _CMN_EVENT_HNF(_model, _name##_class0, _event, 0, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class1, _event, 1, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class2, _event, 2, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class3, _event, 3, SEL_CLASS_OCCUP_ID)
+#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
+ _CMN_EVENT_HNF(_model, _name##_all, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group0_read, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group0_write, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group1_read, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group1_write, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_read, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_write, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
+
#define _CMN_EVENT_XP(_name, _event) \
__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
@@ -657,7 +769,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
- _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5))
+ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
+ _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
+ _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
static struct attribute *arm_cmn_event_attrs[] = {
@@ -672,23 +786,27 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02),
CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2),
- CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01),
- CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02),
- CMN_EVENT_DVM(CI700, dvmop_pici, 0x03),
- CMN_EVENT_DVM(CI700, dvmop_vici, 0x04),
- CMN_EVENT_DVM(CI700, dvmsync, 0x05),
- CMN_EVENT_DVM(CI700, vmid_filtered, 0x06),
- CMN_EVENT_DVM(CI700, rndop_filtered, 0x07),
- CMN_EVENT_DVM(CI700, retry, 0x08),
- CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09),
- CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a),
- CMN_EVENT_DVM(CI700, trkfull, 0x0b),
- _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0),
- _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1),
- _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2),
+ CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04),
+ CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05),
+ CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06),
+ CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07),
+ CMN_EVENT_DVM(NOT_CMN600, retry, 0x08),
+ CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09),
+ CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a),
+ CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b),
+ CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c),
+ CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d),
+ CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e),
+ CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f),
+ CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10),
+ CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11),
+ CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12),
+ CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13),
+ CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14),
CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01),
CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
@@ -704,11 +822,11 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4, SEL_OCCUP1ID),
CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
@@ -725,9 +843,22 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d),
CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e),
CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f),
- CMN_EVENT_HNF(CI700, atomic_fwd, 0x20),
- CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21),
- CMN_EVENT_HNF(CI700, mpam_softlim, 0x22),
+ CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20),
+ CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21),
+ CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22),
+ CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23),
+ CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24),
+ CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25),
+ CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26),
+ CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27),
+ CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28),
+ CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29),
+ CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a),
+ CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b),
+ CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c),
+ CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d),
+ CMN_EVENT_HNF(CMN700, nc_excl, 0x2e),
+ CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f),
CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
@@ -749,12 +880,33 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNI(nonpcie_serialization, 0x31),
CMN_EVENT_HNI(pcie_serialization, 0x32),
+ /*
+ * HN-P events squat on top of the HN-I similarly to DVM events, except
+ * for being crammed into the same physical node as well. And of course
+ * where would the fun be if the same events were in the same order...
+ */
+ CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01),
+ CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02),
+ CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03),
+ CMN_EVENT_HNP(rrt_wr_alloc, 0x04),
+ CMN_EVENT_HNP(rdt_wr_alloc, 0x05),
+ CMN_EVENT_HNP(wdb_alloc, 0x06),
+ CMN_EVENT_HNP(awvalid_no_awready, 0x07),
+ CMN_EVENT_HNP(awready_no_awvalid, 0x08),
+ CMN_EVENT_HNP(wvalid_no_wready, 0x09),
+ CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11),
+ CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12),
+ CMN_EVENT_HNP(rrt_rd_alloc, 0x13),
+ CMN_EVENT_HNP(rdt_rd_alloc, 0x14),
+ CMN_EVENT_HNP(arvalid_no_arready, 0x15),
+ CMN_EVENT_HNP(arready_no_arvalid, 0x16),
+
CMN_EVENT_XP(txflit_valid, 0x01),
CMN_EVENT_XP(txflit_stall, 0x02),
CMN_EVENT_XP(partial_dat_flit, 0x03),
/* We treat watchpoints as a special made-up class of XP events */
- CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0),
- CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01),
CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02),
@@ -768,7 +920,7 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14),
CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
- CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17),
+ CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17),
CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23),
@@ -795,12 +947,25 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_RNID(CMN600, rdb_replay, 0x12),
CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13),
CMN_EVENT_RNID(CMN600, rdb_ord, 0x14),
- CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11),
- CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15),
- CMN_EVENT_RNID(CI700, wrt_throttled, 0x16),
+ CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11),
+ CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
+ CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16),
+ CMN_EVENT_RNID(CMN700, ldb_full, 0x17),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
+ CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20),
+ CMN_EVENT_RNID(CMN700, awid_hash, 0x21),
+ CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22),
+ CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23),
CMN_EVENT_MTSX(tc_lookup, 0x01),
CMN_EVENT_MTSX(tc_fill, 0x02),
@@ -815,6 +980,118 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b),
CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c),
+ CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01),
+ CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02),
+ CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03),
+ CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04),
+ CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05),
+ CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06),
+ CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07),
+ CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08),
+ CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09),
+ CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
+ CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11),
+ CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),
+
+ CMN_EVENT_CXHA(rddatbyp, 0x21),
+ CMN_EVENT_CXHA(chirsp_up_stall, 0x22),
+ CMN_EVENT_CXHA(chidat_up_stall, 0x23),
+ CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24),
+ CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25),
+ CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26),
+ CMN_EVENT_CXHA(reqtrk_occ, 0x27),
+ CMN_EVENT_CXHA(rdb_occ, 0x28),
+ CMN_EVENT_CXHA(rdbyp_occ, 0x29),
+ CMN_EVENT_CXHA(wdb_occ, 0x2a),
+ CMN_EVENT_CXHA(snptrk_occ, 0x2b),
+ CMN_EVENT_CXHA(sdb_occ, 0x2c),
+ CMN_EVENT_CXHA(snphaz_occ, 0x2d),
+
+ CMN_EVENT_CCRA(rht_occ, 0x41),
+ CMN_EVENT_CCRA(sht_occ, 0x42),
+ CMN_EVENT_CCRA(rdb_occ, 0x43),
+ CMN_EVENT_CCRA(wdb_occ, 0x44),
+ CMN_EVENT_CCRA(ssb_occ, 0x45),
+ CMN_EVENT_CCRA(snp_bcasts, 0x46),
+ CMN_EVENT_CCRA(req_chains, 0x47),
+ CMN_EVENT_CCRA(req_chain_avglen, 0x48),
+ CMN_EVENT_CCRA(chirsp_stalls, 0x49),
+ CMN_EVENT_CCRA(chidat_stalls, 0x4a),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50),
+ CMN_EVENT_CCRA(external_chirsp_stalls, 0x51),
+ CMN_EVENT_CCRA(external_chidat_stalls, 0x52),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55),
+ CMN_EVENT_CCRA(rht_alloc, 0x56),
+ CMN_EVENT_CCRA(sht_alloc, 0x57),
+ CMN_EVENT_CCRA(rdb_alloc, 0x58),
+ CMN_EVENT_CCRA(wdb_alloc, 0x59),
+ CMN_EVENT_CCRA(ssb_alloc, 0x5a),
+
+ CMN_EVENT_CCHA(rddatbyp, 0x61),
+ CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
+ CMN_EVENT_CCHA(chidat_up_stall, 0x63),
+ CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
+ CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
+ CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
+ CMN_EVENT_CCHA(reqtrk_occ, 0x67),
+ CMN_EVENT_CCHA(rdb_occ, 0x68),
+ CMN_EVENT_CCHA(rdbyp_occ, 0x69),
+ CMN_EVENT_CCHA(wdb_occ, 0x6a),
+ CMN_EVENT_CCHA(snptrk_occ, 0x6b),
+ CMN_EVENT_CCHA(sdb_occ, 0x6c),
+ CMN_EVENT_CCHA(snphaz_occ, 0x6d),
+ CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
+ CMN_EVENT_CCHA(rdb_alloc, 0x6f),
+ CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
+ CMN_EVENT_CCHA(wdb_alloc, 0x71),
+ CMN_EVENT_CCHA(snptrk_alloc, 0x72),
+ CMN_EVENT_CCHA(sdb_alloc, 0x73),
+ CMN_EVENT_CCHA(snphaz_alloc, 0x74),
+ CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
+ CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
+ CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
+ CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
+ CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
+ CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
+ CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
+ CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
+ CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
+ CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
+ CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
+ CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
+ CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
+ CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
+ CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
+ CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
+
+ CMN_EVENT_CCLA(rx_cxs, 0x21),
+ CMN_EVENT_CCLA(tx_cxs, 0x22),
+ CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23),
+ CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24),
+ CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25),
+ CMN_EVENT_CCLA(link_crdbuf_occ, 0x26),
+ CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27),
+ CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28),
+ CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29),
+ CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
+ CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b),
+
NULL
};
@@ -1032,6 +1309,42 @@ static void arm_cmn_event_read(struct perf_event *event)
local64_add(delta, &event->count);
}
+static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
+ enum cmn_filter_select fsel, u8 occupid)
+{
+ u64 reg;
+
+ if (fsel == SEL_NONE)
+ return 0;
+
+ if (!dn->occupid[fsel].count) {
+ dn->occupid[fsel].val = occupid;
+ reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
+ dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
+ FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
+ dn->occupid[SEL_CLASS_OCCUP_ID].val) |
+ FIELD_PREP(CMN__PMU_OCCUP1_ID,
+ dn->occupid[SEL_OCCUP1ID].val);
+ writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
+ } else if (dn->occupid[fsel].val != occupid) {
+ return -EBUSY;
+ }
+ dn->occupid[fsel].count++;
+ return 0;
+}
+
+static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
+ int eventid, bool wide_sel)
+{
+ if (wide_sel) {
+ dn->event_w[dtm_idx] = eventid;
+ writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ } else {
+ dn->event[dtm_idx] = eventid;
+ writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ }
+}
+
static void arm_cmn_event_start(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
@@ -1058,8 +1371,8 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- dn->event[dtm_idx] = CMN_EVENT_EVENTID(event);
- writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
+ hw->wide_sel);
}
}
@@ -1086,8 +1399,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- dn->event[dtm_idx] = 0;
- writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
}
arm_cmn_event_read(event);
@@ -1095,7 +1407,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
struct arm_cmn_val {
u8 dtm_count[CMN_MAX_DTMS];
- u8 occupid[CMN_MAX_DTMS];
+ u8 occupid[CMN_MAX_DTMS][SEL_MAX];
u8 wp[CMN_MAX_DTMS][4];
int dtc_count;
bool cycles;
@@ -1108,7 +1420,6 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
struct arm_cmn_node *dn;
enum cmn_node_type type;
int i;
- u8 occupid;
if (is_software_event(event))
return;
@@ -1120,16 +1431,14 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
}
val->dtc_count++;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- occupid = CMN_EVENT_OCCUPID(event) + 1;
- else
- occupid = 0;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, dtm = dn->dtm;
+ int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
val->dtm_count[dtm]++;
- val->occupid[dtm] = occupid;
+
+ if (sel > SEL_NONE)
+ val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
if (type != CMN_TYPE_WP)
continue;
@@ -1147,7 +1456,6 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
enum cmn_node_type type;
struct arm_cmn_val *val;
int i, ret = -EINVAL;
- u8 occupid;
if (leader == event)
return 0;
@@ -1172,18 +1480,14 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
if (val->dtc_count == CMN_DT_NUM_COUNTERS)
goto done;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- occupid = CMN_EVENT_OCCUPID(event) + 1;
- else
- occupid = 0;
-
for_each_hw_dn(hw, dn, i) {
- int wp_idx, wp_cmb, dtm = dn->dtm;
+ int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
goto done;
- if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm])
+ if (sel > SEL_NONE && val->occupid[dtm][sel] &&
+ val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
goto done;
if (type != CMN_TYPE_WP)
@@ -1204,6 +1508,22 @@ done:
return ret;
}
+static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
+ enum cmn_node_type type,
+ unsigned int eventid)
+{
+ struct arm_cmn_event_attr *e;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
+ e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
+ if (e->model & model && e->type == type && e->eventid == eventid)
+ return e->fsel;
+ }
+ return SEL_NONE;
+}
+
+
static int arm_cmn_event_init(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
@@ -1228,18 +1548,23 @@ static int arm_cmn_event_init(struct perf_event *event)
if (type == CMN_TYPE_DTC)
return 0;
+ eventid = CMN_EVENT_EVENTID(event);
/* For watchpoints we need the actual XP node here */
if (type == CMN_TYPE_WP) {
type = CMN_TYPE_XP;
/* ...and we need a "real" direction */
- eventid = CMN_EVENT_EVENTID(event);
if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
return -EINVAL;
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
+ } else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
+ hw->wide_sel = true;
}
+ /* This is sufficiently annoying to recalculate, so cache it */
+ hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
+
bynodeid = CMN_EVENT_BYNODEID(event);
nodeid = CMN_EVENT_NODEID(event);
@@ -1281,8 +1606,8 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
if (type == CMN_TYPE_WP)
dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- hw->dn[i].occupid_count--;
+ if (hw->filter_sel > SEL_NONE)
+ hw->dn[i].occupid[hw->filter_sel].count--;
dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
@@ -1362,18 +1687,8 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
(nid.port << 4) + (nid.dev << 2);
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) {
- u8 occupid = CMN_EVENT_OCCUPID(event);
-
- if (dn->occupid_count == 0) {
- dn->occupid_val = occupid;
- writel_relaxed(occupid,
- dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
- } else if (dn->occupid_val != occupid) {
- goto free_dtms;
- }
- dn->occupid_count++;
- }
+ if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
+ goto free_dtms;
}
arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
@@ -1622,6 +1937,10 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
/* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
if (dn->type == CMN_TYPE_RND)
dn->type = CMN_TYPE_RNI;
+
+ /* We split the RN-I off already, so let the CCLA part match CCLA events */
+ if (dn->type == CMN_TYPE_CCLA_RNI)
+ dn->type = CMN_TYPE_CCLA;
}
writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
@@ -1652,6 +1971,18 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c
node->type, node->logid, offset);
}
+static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
+{
+ switch (type) {
+ case CMN_TYPE_HNP:
+ return CMN_TYPE_HNI;
+ case CMN_TYPE_CCLA_RNI:
+ return CMN_TYPE_RNI;
+ default:
+ return CMN_TYPE_INVALID;
+ }
+}
+
static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
void __iomem *cfg_region;
@@ -1676,6 +2007,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
+ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
+ cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
+ cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);
+
reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
@@ -1692,8 +2027,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
}
- /* Cheeky +1 to help terminate pointer-based iteration later */
- dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL);
+ /*
+ * Some nodes effectively have two separate types, which we'll handle
+ * by creating one of each internally. For a (very) safe initial upper
+ * bound, account for double the number of non-XP nodes.
+ */
+ dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
+ sizeof(*dn), GFP_KERNEL);
if (!dn)
return -ENOMEM;
@@ -1794,6 +2134,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_MTSX:
case CMN_TYPE_CXRA:
case CMN_TYPE_CXHA:
+ case CMN_TYPE_CCRA:
+ case CMN_TYPE_CCHA:
+ case CMN_TYPE_CCLA:
dn++;
break;
/* Nothing to see here */
@@ -1802,6 +2145,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_RNSAM:
case CMN_TYPE_CXLA:
break;
+ /*
+ * Split "optimised" combination nodes into separate
+ * types for the different event sets. Offsetting the
+ * base address lets us handle the second pmu_event_sel
+ * register via the normal mechanism later.
+ */
+ case CMN_TYPE_HNP:
+ case CMN_TYPE_CCLA_RNI:
+ dn[1] = dn[0];
+ dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
+ dn[1].type = arm_cmn_subtype(dn->type);
+ dn += 2;
+ break;
/* Something has gone horribly wrong */
default:
dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
@@ -1810,9 +2166,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
}
}
- /* Correct for any nodes we skipped */
+ /* Correct for any nodes we added or skipped */
cmn->num_dns = dn - cmn->dns;
+ /* Cheeky +1 to help terminate pointer-based iteration later */
sz = (void *)(dn + 1) - (void *)cmn->dns;
dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
if (dn)
@@ -1970,6 +2327,8 @@ static int arm_cmn_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
{ .compatible = "arm,cmn-600", .data = (void *)CMN600 },
+ { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
+ { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
{ .compatible = "arm,ci-700", .data = (void *)CI700 },
{}
};
@@ -1979,6 +2338,8 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", CMN600 },
+ { "ARMHC650", CMN650 },
+ { "ARMHC700", CMN700 },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index f5c7a845cd7b..96ffadd654ff 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -159,7 +159,9 @@ static int arm_pmu_acpi_parse_irqs(void)
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
- armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(irq, cpu);
+ if (err)
+ goto out_err;
}
return 0;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d44bcc29d99c..db670b265897 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1035,6 +1035,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
fallthrough;
case 2:
spe_pmu->counter_sz = 12;
+ break;
+ case 3:
+ spe_pmu->counter_sz = 16;
}
dev_info(dev,
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 506ed39e3266..6be83517acaa 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
new file mode 100644
index 000000000000..a9bb73f76be4
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
+ *
+ * Copyright (C) 2022 HiSilicon Limited
+ * Author: Qi Liu <[email protected]>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+
+#define pr_fmt(fmt) "cpa pmu: " fmt
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* CPA register definition */
+#define CPA_PERF_CTRL 0x1c00
+#define CPA_EVENT_CTRL 0x1c04
+#define CPA_INT_MASK 0x1c70
+#define CPA_INT_STATUS 0x1c78
+#define CPA_INT_CLEAR 0x1c7c
+#define CPA_EVENT_TYPE0 0x1c80
+#define CPA_VERSION 0x1cf0
+#define CPA_CNT0_LOWER 0x1d00
+#define CPA_CFG_REG 0x0534
+
+/* CPA operation command */
+#define CPA_PERF_CTRL_EN BIT_ULL(0)
+#define CPA_EVTYPE_MASK 0xffUL
+#define CPA_PM_CTRL BIT_ULL(9)
+
+/* CPA has 8-counters */
+#define CPA_NR_COUNTERS 0x8
+#define CPA_COUNTER_BITS 64
+#define CPA_NR_EVENTS 0xff
+#define CPA_REG_OFFSET 0x8
+
+static u32 hisi_cpa_pmu_get_counter_offset(int idx)
+{
+ return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET);
+}
+
+static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(CPA_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * CPA_EVENT_TYPE1 is chosen.
+ */
+ reg = CPA_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = CPA_REG_OFFSET * reg_idx;
+
+ /* Write event code to CPA_EVENT_TYPEx Register */
+ val = readl(cpa_pmu->base + reg);
+ val &= ~(CPA_EVTYPE_MASK << shift);
+ val |= type << shift;
+ writel(val, cpa_pmu->base + reg);
+}
+
+static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_PERF_CTRL);
+ val |= CPA_PERF_CTRL_EN;
+ writel(val, cpa_pmu->base + CPA_PERF_CTRL);
+}
+
+static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_PERF_CTRL);
+ val &= ~(CPA_PERF_CTRL_EN);
+ writel(val, cpa_pmu->base + CPA_PERF_CTRL);
+}
+
+static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_CFG_REG);
+ val |= CPA_PM_CTRL;
+ writel(val, cpa_pmu->base + CPA_CFG_REG);
+}
+
+static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_CFG_REG);
+ val &= ~(CPA_PM_CTRL);
+ writel(val, cpa_pmu->base + CPA_CFG_REG);
+}
+
+static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in CPA_EVENT_CTRL register */
+ val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
+}
+
+static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in CPA_EVENT_CTRL register */
+ val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
+ val &= ~(1UL << hwc->idx);
+ writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
+}
+
+static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(cpa_pmu->base + CPA_INT_MASK);
+ val &= ~(1UL << hwc->idx);
+ writel(val, cpa_pmu->base + CPA_INT_MASK);
+}
+
+static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(cpa_pmu->base + CPA_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, cpa_pmu->base + CPA_INT_MASK);
+}
+
+static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu)
+{
+ return readl(cpa_pmu->base + CPA_INT_STATUS);
+}
+
+static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx)
+{
+ writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = {
+ { "HISI0281", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
+
+static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *cpa_pmu)
+{
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &cpa_pmu->sicl_id)) {
+ dev_err(&pdev->dev, "Can not read sicl-id\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &cpa_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id\n");
+ return -EINVAL;
+ }
+
+ cpa_pmu->ccl_id = -1;
+ cpa_pmu->sccl_id = -1;
+ cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cpa_pmu->base))
+ return PTR_ERR(cpa_pmu->base);
+
+ cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_cpa_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-15"),
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_cpa_pmu_format_attr,
+};
+
+static struct attribute *hisi_cpa_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00),
+ HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61),
+ HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62),
+ HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1),
+ HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2),
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_cpa_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
+ .attrs = hisi_cpa_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_cpa_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
+ &hisi_cpa_pmu_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_identifier_group = {
+ .attrs = hisi_cpa_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
+ &hisi_cpa_pmu_format_group,
+ &hisi_cpa_pmu_events_group,
+ &hisi_cpa_pmu_cpumask_attr_group,
+ &hisi_cpa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = {
+ .write_evtype = hisi_cpa_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_cpa_pmu_start_counters,
+ .stop_counters = hisi_cpa_pmu_stop_counters,
+ .enable_counter = hisi_cpa_pmu_enable_counter,
+ .disable_counter = hisi_cpa_pmu_disable_counter,
+ .enable_counter_int = hisi_cpa_pmu_enable_counter_int,
+ .disable_counter_int = hisi_cpa_pmu_disable_counter_int,
+ .write_counter = hisi_cpa_pmu_write_counter,
+ .read_counter = hisi_cpa_pmu_read_counter,
+ .get_int_status = hisi_cpa_pmu_get_int_status,
+ .clear_int_status = hisi_cpa_pmu_clear_int_status,
+};
+
+static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *cpa_pmu)
+{
+ int ret;
+
+ ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev);
+ if (ret)
+ return ret;
+
+ cpa_pmu->counter_bits = CPA_COUNTER_BITS;
+ cpa_pmu->check_event = CPA_NR_EVENTS;
+ cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups;
+ cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops;
+ cpa_pmu->num_counters = CPA_NR_COUNTERS;
+ cpa_pmu->dev = &pdev->dev;
+ cpa_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_cpa_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *cpa_pmu;
+ char *name;
+ int ret;
+
+ cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL);
+ if (!cpa_pmu)
+ return -ENOMEM;
+
+ ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
+ cpa_pmu->sicl_id, cpa_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ cpa_pmu->pmu = (struct pmu) {
+ .name = name,
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = cpa_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ /* Power Management should be disabled before using CPA PMU. */
+ hisi_cpa_pmu_disable_pm(cpa_pmu);
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ &cpa_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&cpa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(cpa_pmu->dev, "PMU register failed\n");
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cpa_pmu);
+ return ret;
+}
+
+static int hisi_cpa_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&cpa_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ &cpa_pmu->node);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return 0;
+}
+
+static struct platform_driver hisi_cpa_pmu_driver = {
+ .driver = {
+ .name = "hisi_cpa_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_cpa_pmu_probe,
+ .remove = hisi_cpa_pmu_remove,
+};
+
+static int __init hisi_cpa_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ "AP_PERF_ARM_HISI_CPA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("setup hotplug failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_cpa_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_cpa_pmu_module_init);
+
+static void __exit hisi_cpa_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_cpa_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
+}
+module_exit(hisi_cpa_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Qi Liu <[email protected]>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index bad99d149172..a0ee84d97c41 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -258,13 +258,12 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
/*
- * Use the SCCL_ID and the index ID to identify the PA PMU,
- * while SCCL_ID is the nearst SCCL_ID from this SICL and
- * CPU core is chosen from this SCCL to manage this PMU.
+ * As PA PMU is in a SICL, use the SICL_ID and the index ID
+ * to identify the PA PMU.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sccl_id)) {
- dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ &pa_pmu->sicl_id)) {
+ dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
@@ -275,6 +274,7 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev,
}
pa_pmu->ccl_id = -1;
+ pa_pmu->sccl_id = -1;
pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pa_pmu->base)) {
@@ -399,13 +399,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
if (ret)
return ret;
- /*
- * PA is attached in SICL and the CPU core is chosen to manage this
- * PMU which is the nearest SCCL, while its SCCL_ID is greater than
- * one with the SICL_ID.
- */
+
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
- pa_pmu->sccl_id - 1, pa_pmu->index_id);
+ pa_pmu->sicl_id, pa_pmu->index_id);
if (!name)
return -ENOMEM;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 358e4e284a62..980b9ee6eb14 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -458,6 +458,10 @@ static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
int sccl_id, ccl_id;
+ /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
+ if (hisi_pmu->sccl_id == -1)
+ return true;
+
if (hisi_pmu->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 7f5841d6f592..96eeddad55ff 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -81,6 +81,7 @@ struct hisi_pmu {
struct device *dev;
struct hlist_node node;
int sccl_id;
+ int sicl_id;
int ccl_id;
void __iomem *base;
/* the ID of the PMU modules */
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index ee67305f822d..282d3a071a67 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -146,12 +146,12 @@ static int tad_pmu_event_init(struct perf_event *event)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
- if (!event->attr.disabled)
- return -EINVAL;
-
if (event->attr.type != event->pmu->type)
return -ENOENT;
+ if (!event->attr.disabled)
+ return -EINVAL;
+
if (event->state != PERF_EVENT_STATE_OFF)
return -EINVAL;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index a1317a483512..dca3537a8dcc 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -35,7 +35,7 @@ union sbi_pmu_ctr_info {
};
};
-/**
+/*
* RISC-V doesn't have hetergenous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
*/
@@ -477,7 +477,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
{
- /**
+ /*
* No need to check the error because we are disabling all the counters
* which may include counters that are not enabled yet.
*/
@@ -494,7 +494,7 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
}
-/**
+/*
* This function starts all the used counters in two step approach.
* Any counter that did not overflow can be start in a single step
* while the overflowed counters need to be started with updated initialization
@@ -563,7 +563,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* Overflow status register should only be read after counter are stopped */
overflow = csr_read(CSR_SSCOUNTOVF);
- /**
+ /*
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be6c24577dbe..9bf0616a3069 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2565,7 +2565,12 @@ static noinline int search_ioctl(struct inode *inode,
while (1) {
ret = -EFAULT;
- if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
+ /*
+ * Ensure that the whole user buffer is faulted in at sub-page
+ * granularity, otherwise the loop may live-lock.
+ */
+ if (fault_in_subpage_writeable(ubuf + sk_offset,
+ *buf_size - sk_offset))
break;
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 82e33137f917..b66c5f389159 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -222,6 +222,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 993994cd943a..6165283bdb6f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1046,6 +1046,7 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
* Fault in userspace address range.
*/
size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
size_t fault_in_readable(const char __user *uaddr, size_t size);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 546179418ffa..5a328cf02b75 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -231,6 +231,28 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/**
+ * probe_subpage_writeable: probe the user range for write faults at sub-page
+ * granularity (e.g. arm64 MTE)
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns 0 on success, the number of bytes not probed on fault.
+ *
+ * It is expected that the caller checked for the write permission of each
+ * page in the range either by put_user() or GUP. The architecture port can
+ * implement a more efficient get_user() probing if the same sub-page faults
+ * are triggered by either a read or a write.
+ */
+static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline __must_check unsigned long
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 256cf6db573c..4d57c03714f4 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -243,9 +243,8 @@ static int __init __parse_crashkernel(char *cmdline,
*crash_base = 0;
ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
-
if (!ck_cmdline)
- return -EINVAL;
+ return -ENOENT;
ck_cmdline += strlen(name);
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 8f4fb328133a..289311680c29 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -30,6 +30,24 @@ int ftrace_graph_active;
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
+/*
+ * archs can override this function if they must do something
+ * to enable hook for graph tracer.
+ */
+int __weak ftrace_enable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * to disable hook for graph tracer.
+ */
+int __weak ftrace_disable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
/**
* ftrace_graph_stop - set to permanently disable function graph tracing
*
diff --git a/mm/gup.c b/mm/gup.c
index f598a037eb04..501bc150792c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1648,6 +1648,35 @@ out:
}
EXPORT_SYMBOL(fault_in_writeable);
+/**
+ * fault_in_subpage_writeable - fault in an address range for writing
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Fault in a user address range for writing while checking for permissions at
+ * sub-page granularity (e.g. arm64 MTE). This function should be used when
+ * the caller cannot guarantee forward progress of a copy_to_user() loop.
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
+{
+ size_t faulted_in;
+
+ /*
+ * Attempt faulting in at page granularity first for page table
+ * permission checking. The arch-specific probe_subpage_writeable()
+ * functions may not check for this.
+ */
+ faulted_in = size - fault_in_writeable(uaddr, size);
+ if (faulted_in)
+ faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
+
+ return size - faulted_in;
+}
+EXPORT_SYMBOL(fault_in_subpage_writeable);
+
/*
* fault_in_safe_writeable - fault in an address range for writing
* @uaddr: start of address range