From 964db79d6c186cc2ecc6ae46f98eed7e0ea8cf71 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Nov 2020 18:53:55 +0100 Subject: of/address: Introduce of_dma_get_max_cpu_address() Introduce of_dma_get_max_cpu_address(), which provides the highest CPU physical address addressable by all DMA masters in the system. It's specially useful for setting memory zones sizes at early boot time. Signed-off-by: Nicolas Saenz Julienne Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20201119175400.9995-4-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- include/linux/of.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 5d51891cbf1a..9ed5b8532c30 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -558,6 +558,8 @@ int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); +phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); + #else /* CONFIG_OF */ static inline void of_core_init(void) @@ -995,6 +997,11 @@ static inline int of_map_id(struct device_node *np, u32 id, return -EINVAL; } +static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) +{ + return PHYS_ADDR_MAX; +} + #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ -- cgit From 2b8652936f0ca9ca2e6c984ae76c7bfcda1b3f22 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Nov 2020 18:53:58 +0100 Subject: arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel Signed-off-by: Nicolas Saenz Julienne Tested-by: Jeremy Linton Acked-by: Lorenzo Pieralisi Acked-by: Hanjun Guo Cc: Jeremy Linton Cc: Lorenzo Pieralisi Cc: Nicolas Saenz Julienne Cc: Rob Herring Cc: Christoph Hellwig Cc: Robin Murphy Cc: Hanjun Guo Cc: Sudeep Holla Cc: Anshuman Khandual Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 5 ++++- drivers/acpi/arm64/iort.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi_iort.h | 4 ++++ 3 files changed, 63 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b736890ad3d3..5e534d674c9c 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -193,11 +194,13 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits) static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; + unsigned int __maybe_unused acpi_zone_dma_bits; unsigned int __maybe_unused dt_zone_dma_bits; #ifdef CONFIG_ZONE_DMA + acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); - zone_dma_bits = min(32U, dt_zone_dma_bits); + zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 9929ff50c0c0..1787406684aa 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1718,3 +1718,58 @@ void __init acpi_iort_init(void) iort_init_platform_devices(); } + +#ifdef CONFIG_ZONE_DMA +/* + * Extract the highest CPU physical address accessible to all DMA masters in + * the system. PHYS_ADDR_MAX is returned when no constrained device is found. + */ +phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void) +{ + phys_addr_t limit = PHYS_ADDR_MAX; + struct acpi_iort_node *node, *end; + struct acpi_table_iort *iort; + acpi_status status; + int i; + + if (acpi_disabled) + return limit; + + status = acpi_get_table(ACPI_SIG_IORT, 0, + (struct acpi_table_header **)&iort); + if (ACPI_FAILURE(status)) + return limit; + + node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset); + end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length); + + for (i = 0; i < iort->node_count; i++) { + if (node >= end) + break; + + switch (node->type) { + struct acpi_iort_named_component *ncomp; + struct acpi_iort_root_complex *rc; + phys_addr_t local_limit; + + case ACPI_IORT_NODE_NAMED_COMPONENT: + ncomp = (struct acpi_iort_named_component *)node->node_data; + local_limit = DMA_BIT_MASK(ncomp->memory_address_limit); + limit = min_not_zero(limit, local_limit); + break; + + case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: + if (node->revision < 1) + break; + + rc = (struct acpi_iort_root_complex *)node->node_data; + local_limit = DMA_BIT_MASK(rc->memory_address_limit); + limit = min_not_zero(limit, local_limit); + break; + } + node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length); + } + acpi_put_table(&iort->header); + return limit; +} +#endif diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 20a32120bb88..1a12baa58e40 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -38,6 +38,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); const struct iommu_ops *iort_iommu_configure_id(struct device *dev, const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); +phys_addr_t acpi_iort_dma_get_max_cpu_address(void); #else static inline void acpi_iort_init(void) { } static inline u32 iort_msi_map_id(struct device *dev, u32 id) @@ -55,6 +56,9 @@ static inline const struct iommu_ops *iort_iommu_configure_id( static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) { return 0; } + +static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void) +{ return PHYS_ADDR_MAX; } #endif #endif /* __ACPI_IORT_H__ */ -- cgit From 04435217f96869ac3a8f055ff68c5237a60bcd7e Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Nov 2020 18:53:59 +0100 Subject: mm: Remove examples from enum zone_type comment We can't really list every setup in common code. On top of that they are unlikely to stay true for long as things change in the arch trees independently of this comment. Suggested-by: Christoph Hellwig Signed-off-by: Nicolas Saenz Julienne Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20201119175400.9995-8-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- include/linux/mmzone.h | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fb3bf696c05e..9d0c454d23cd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -354,26 +354,6 @@ enum zone_type { * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit * platforms may need both zones as they support peripherals with * different DMA addressing limitations. - * - * Some examples: - * - * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the - * rest of the lower 4G. - * - * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on - * the specific device. - * - * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the - * lower 4G. - * - * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary - * depending on the specific device. - * - * - s390 uses ZONE_DMA fixed to the lower 2G. - * - * - ia64 and riscv only use ZONE_DMA32. - * - * - parisc uses neither. */ #ifdef CONFIG_ZONE_DMA ZONE_DMA, -- cgit From 23acdc76f1798b090bb9dcc90671cd29d929834e Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Thu, 12 Nov 2020 18:53:34 -0800 Subject: signal: clear non-uapi flag bits when passing/returning sa_flags Previously we were not clearing non-uapi flag bits in sigaction.sa_flags when storing the userspace-provided sa_flags or when returning them via oldact. Start doing so. This allows userspace to detect missing support for flag bits and allows the kernel to use non-uapi bits internally, as we are already doing in arch/x86 for two flag bits. Now that this change is in place, we no longer need the code in arch/x86 that was hiding these bits from userspace, so remove it. This is technically a userspace-visible behavior change for sigaction, as the unknown bits returned via oldact.sa_flags are no longer set. However, we are free to define the behavior for unknown bits exactly because their behavior is currently undefined, so for now we can define the meaning of each of them to be "clear the bit in oldact.sa_flags unless the bit becomes known in the future". Furthermore, this behavior is consistent with OpenBSD [1], illumos [2] and XNU [3] (FreeBSD [4] and NetBSD [5] fail the syscall if unknown bits are set). So there is some precedent for this behavior in other kernels, and in particular in XNU, which is probably the most popular kernel among those that I looked at, which means that this change is less likely to be a compatibility issue. Link: [1] https://github.com/openbsd/src/blob/f634a6a4b5bf832e9c1de77f7894ae2625e74484/sys/kern/kern_sig.c#L278 Link: [2] https://github.com/illumos/illumos-gate/blob/76f19f5fdc974fe5be5c82a556e43a4df93f1de1/usr/src/uts/common/syscall/sigaction.c#L86 Link: [3] https://github.com/apple/darwin-xnu/blob/a449c6a3b8014d9406c2ddbdc81795da24aa7443/bsd/kern/kern_sig.c#L480 Link: [4] https://github.com/freebsd/freebsd/blob/eded70c37057857c6e23fae51f86b8f8f43cd2d0/sys/kern/kern_sig.c#L699 Link: [5] https://github.com/NetBSD/src/blob/3365779becdcedfca206091a645a0e8e22b2946e/sys/kern/sys_sig.c#L473 Signed-off-by: Peter Collingbourne Reviewed-by: Dave Martin Acked-by: "Eric W. Biederman" Link: https://linux-review.googlesource.com/id/I35aab6f5be932505d90f3b3450c083b4db1eca86 Link: https://lkml.kernel.org/r/878dbcb5f47bc9b11881c81f745c0bef5c23f97f.1605235762.git.pcc@google.com Signed-off-by: Eric W. Biederman --- arch/arm/include/asm/signal.h | 2 ++ arch/parisc/include/asm/signal.h | 2 ++ arch/x86/kernel/signal_compat.c | 7 ------- include/linux/signal_types.h | 12 ++++++++++++ kernel/signal.c | 10 ++++++++++ 5 files changed, 26 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 65530a042009..430be7774402 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -17,6 +17,8 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER) + #define __ARCH_HAS_SA_RESTORER #include diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index 715c96ba2ec8..30dd1e43ef88 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -21,6 +21,8 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +#define __ARCH_UAPI_SA_FLAGS _SA_SIGGFAULT + #include #endif /* !__ASSEMBLY */ diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index a7f3e12cfbdb..ddfd919be46c 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -165,16 +165,9 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact) { signal_compat_build_tests(); - /* Don't leak in-kernel non-uapi flags to user-space */ - if (oact) - oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); - if (!act) return; - /* Don't let flags to be set from userspace */ - act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); - if (in_ia32_syscall()) act->sa.sa_flags |= SA_IA32_ABI; if (in_x32_syscall()) diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index f8a90ae9c6ec..a7887ad84d36 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -68,4 +68,16 @@ struct ksignal { int sig; }; +#ifndef __ARCH_UAPI_SA_FLAGS +#ifdef SA_RESTORER +#define __ARCH_UAPI_SA_FLAGS SA_RESTORER +#else +#define __ARCH_UAPI_SA_FLAGS 0 +#endif +#endif + +#define UAPI_SA_FLAGS \ + (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \ + SA_NODEFER | SA_RESETHAND | __ARCH_UAPI_SA_FLAGS) + #endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/kernel/signal.c b/kernel/signal.c index ef8f2a28d37c..8f5bd12ee41b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3985,6 +3985,16 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) if (oact) *oact = *k; + /* + * Clear unknown flag bits in order to allow userspace to detect missing + * support for flag bits and to allow the kernel to use non-uapi bits + * internally. + */ + if (act) + act->sa.sa_flags &= UAPI_SA_FLAGS; + if (oact) + oact->sa.sa_flags &= UAPI_SA_FLAGS; + sigaction_compat_abi(act, oact); if (act) { -- cgit From 6ac05e832a9e96f9b1c42a8917cdd317d7b6c8fa Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 20 Nov 2020 12:33:45 -0800 Subject: signal: define the SA_EXPOSE_TAGBITS bit in sa_flags Architectures that support address tagging, such as arm64, may want to expose fault address tag bits to the signal handler to help diagnose memory errors. However, these bits have not been previously set, and their presence may confuse unaware user applications. Therefore, introduce a SA_EXPOSE_TAGBITS flag bit in sa_flags that a signal handler may use to explicitly request that the bits are set. The generic signal handler APIs expect to receive tagged addresses. Architectures may specify how to untag addresses in the case where SA_EXPOSE_TAGBITS is clear by defining the arch_untagged_si_addr function. Signed-off-by: Peter Collingbourne Acked-by: "Eric W. Biederman" Link: https://linux-review.googlesource.com/id/I16dd0ed2081f091fce97be0190cb8caa874c26cb Link: https://lkml.kernel.org/r/13cf24d00ebdd8e1f55caf1821c7c29d54100191.1605904350.git.pcc@google.com Signed-off-by: Eric W. Biederman --- include/linux/signal.h | 14 ++++++++++++++ include/linux/signal_types.h | 2 +- include/uapi/asm-generic/signal-defs.h | 3 +++ kernel/signal.c | 24 ++++++++++++++++++++++++ 4 files changed, 42 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index b256f9c65661..205526c4003a 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -469,4 +469,18 @@ struct seq_file; extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); #endif +#ifndef arch_untagged_si_addr +/* + * Given a fault address and a signal and si_code which correspond to the + * _sigfault union member, returns the address that must appear in si_addr if + * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags. + */ +static inline void __user *arch_untagged_si_addr(void __user *addr, + unsigned long sig, + unsigned long si_code) +{ + return addr; +} +#endif + #endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index a7887ad84d36..68e06c75c5b2 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -78,6 +78,6 @@ struct ksignal { #define UAPI_SA_FLAGS \ (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \ - SA_NODEFER | SA_RESETHAND | __ARCH_UAPI_SA_FLAGS) + SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS) #endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/uapi/asm-generic/signal-defs.h b/include/uapi/asm-generic/signal-defs.h index c790f67304ba..fe929e7b77ca 100644 --- a/include/uapi/asm-generic/signal-defs.h +++ b/include/uapi/asm-generic/signal-defs.h @@ -20,6 +20,8 @@ * so this bit allows flag bit support to be detected from userspace while * allowing an old kernel to be distinguished from a kernel that supports every * flag bit. + * SA_EXPOSE_TAGBITS exposes an architecture-defined set of tag bits in + * siginfo.si_addr. * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. @@ -41,6 +43,7 @@ /* 0x00000100 used on sparc */ /* 0x00000200 used on sparc */ #define SA_UNSUPPORTED 0x00000400 +#define SA_EXPOSE_TAGBITS 0x00000800 /* 0x00010000 used on mips */ /* 0x01000000 used on x86 */ /* 0x02000000 used on x86 */ diff --git a/kernel/signal.c b/kernel/signal.c index 8f34819e80de..26018c59821d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2524,6 +2524,26 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info) return signr; } +static void hide_si_addr_tag_bits(struct ksignal *ksig) +{ + switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { + case SIL_FAULT: + case SIL_FAULT_MCEERR: + case SIL_FAULT_BNDERR: + case SIL_FAULT_PKUERR: + ksig->info.si_addr = arch_untagged_si_addr( + ksig->info.si_addr, ksig->sig, ksig->info.si_code); + break; + case SIL_KILL: + case SIL_TIMER: + case SIL_POLL: + case SIL_CHLD: + case SIL_RT: + case SIL_SYS: + break; + } +} + bool get_signal(struct ksignal *ksig) { struct sighand_struct *sighand = current->sighand; @@ -2761,6 +2781,10 @@ relock: spin_unlock_irq(&sighand->siglock); ksig->sig = signr; + + if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) + hide_si_addr_tag_bits(ksig); + return ksig->sig > 0; } -- cgit From 367c820ef08082e68df8a3bc12e62393af21e4b5 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Wed, 7 Oct 2020 14:21:43 +0530 Subject: arm64: Enable perf events based hard lockup detector With the recent feature added to enable perf events to use pseudo NMIs as interrupts on platforms which support GICv3 or later, its now been possible to enable hard lockup detector (or NMI watchdog) on arm64 platforms. So enable corresponding support. One thing to note here is that normally lockup detector is initialized just after the early initcalls but PMU on arm64 comes up much later as device_initcall(). So we need to re-initialize lockup detection once PMU has been initialized. Signed-off-by: Sumit Garg Acked-by: Alexandru Elisei Link: https://lore.kernel.org/r/1602060704-10921-1-git-send-email-sumit.garg@linaro.org Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 ++ arch/arm64/kernel/perf_event.c | 41 +++++++++++++++++++++++++++++++++++++++-- drivers/perf/arm_pmu.c | 5 +++++ include/linux/perf/arm_pmu.h | 2 ++ 4 files changed, 48 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..7d3440770b14 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -170,6 +170,8 @@ config ARM64 select HAVE_NMI select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS + select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3605f77ad4df..38bb07eff872 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 @@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = { static int __init armv8_pmu_driver_init(void) { + int ret; + if (acpi_disabled) - return platform_driver_register(&armv8_pmu_driver); + ret = platform_driver_register(&armv8_pmu_driver); else - return arm_pmu_acpi_probe(armv8_pmuv3_init); + ret = arm_pmu_acpi_probe(armv8_pmuv3_init); + + /* + * Try to re-initialize lockup detector after PMU init in + * case PMU events are triggered via NMIs. + */ + if (ret == 0 && arm_pmu_irq_is_nmi()) + lockup_detector_init(); + + return ret; } device_initcall(armv8_pmu_driver_init) @@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time_zero = 1; userpg->cap_user_time_short = 1; } + +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +/* + * Safe maximum CPU frequency in case a particular platform doesn't implement + * cpufreq driver. Although, architecture doesn't put any restrictions on + * maximum frequency but 5 GHz seems to be safe maximum given the available + * Arm CPUs in the market which are clocked much less than 5 GHz. On the other + * hand, we can't make it much higher as it would lead to a large hard-lockup + * detection timeout on parts which are running slower (eg. 1GHz on + * Developerbox) and doesn't possess a cpufreq driver. + */ +#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz +u64 hw_nmi_get_sample_period(int watchdog_thresh) +{ + unsigned int cpu = smp_processor_id(); + unsigned long max_cpu_freq; + + max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL; + if (!max_cpu_freq) + max_cpu_freq = SAFE_MAX_CPU_FREQ; + + return (u64)max_cpu_freq * watchdog_thresh; +} +#endif diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index cb2f55f450e4..794a37d50853 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -726,6 +726,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) return per_cpu(hw_events->irq, cpu); } +bool arm_pmu_irq_is_nmi(void) +{ + return has_nmi; +} + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 505480217cf1..bf7966776c55 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } #endif +bool arm_pmu_irq_is_nmi(void); + /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc_atomic(void); -- cgit From a2abe7cbd8fe2db5ff386c968e2273d9dc6c468d Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 30 Nov 2020 15:34:41 -0800 Subject: scs: switch to vmapped shadow stacks The kernel currently uses kmem_cache to allocate shadow call stacks, which means an overflows may not be immediately detected and can potentially result in another task's shadow stack to be overwritten. This change switches SCS to use virtually mapped shadow stacks for tasks, which increases shadow stack size to a full page and provides more robust overflow detection, similarly to VMAP_STACK. Signed-off-by: Sami Tolvanen Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201130233442.2562064-2-samitolvanen@google.com Signed-off-by: Will Deacon --- include/linux/scs.h | 12 ++++----- kernel/scs.c | 71 ++++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 66 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/scs.h b/include/linux/scs.h index 6dec390cf154..2a506c2a16f4 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -15,12 +15,8 @@ #ifdef CONFIG_SHADOW_CALL_STACK -/* - * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit - * architecture) provided ~40% safety margin on stack usage while keeping - * memory allocation overhead reasonable. - */ -#define SCS_SIZE SZ_1K +#define SCS_ORDER 0 +#define SCS_SIZE (PAGE_SIZE << SCS_ORDER) #define GFP_SCS (GFP_KERNEL | __GFP_ZERO) /* An illegal pointer value to mark the end of the shadow stack. */ @@ -33,6 +29,8 @@ #define task_scs(tsk) (task_thread_info(tsk)->scs_base) #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) +void *scs_alloc(int node); +void scs_free(void *s); void scs_init(void); int scs_prepare(struct task_struct *tsk, int node); void scs_release(struct task_struct *tsk); @@ -61,6 +59,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk) #else /* CONFIG_SHADOW_CALL_STACK */ +static inline void *scs_alloc(int node) { return NULL; } +static inline void scs_free(void *s) {} static inline void scs_init(void) {} static inline void scs_task_reset(struct task_struct *tsk) {} static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } diff --git a/kernel/scs.c b/kernel/scs.c index 4ff4a7ba0094..e2a71fc82fa0 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -5,26 +5,49 @@ * Copyright (C) 2019 Google LLC */ +#include #include #include #include -#include +#include #include -static struct kmem_cache *scs_cache; - static void __scs_account(void *s, int account) { - struct page *scs_page = virt_to_page(s); + struct page *scs_page = vmalloc_to_page(s); mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB, account * (SCS_SIZE / SZ_1K)); } -static void *scs_alloc(int node) +/* Matches NR_CACHED_STACKS for VMAP_STACK */ +#define NR_CACHED_SCS 2 +static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]); + +static void *__scs_alloc(int node) { - void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); + int i; + void *s; + + for (i = 0; i < NR_CACHED_SCS; i++) { + s = this_cpu_xchg(scs_cache[i], NULL); + if (s) { + kasan_unpoison_vmalloc(s, SCS_SIZE); + memset(s, 0, SCS_SIZE); + return s; + } + } + + return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_SCS, PAGE_KERNEL, 0, node, + __builtin_return_address(0)); +} +void *scs_alloc(int node) +{ + void *s; + + s = __scs_alloc(node); if (!s) return NULL; @@ -34,21 +57,47 @@ static void *scs_alloc(int node) * Poison the allocation to catch unintentional accesses to * the shadow stack when KASAN is enabled. */ - kasan_poison_object_data(scs_cache, s); + kasan_poison_vmalloc(s, SCS_SIZE); __scs_account(s, 1); return s; } -static void scs_free(void *s) +void scs_free(void *s) { + int i; + __scs_account(s, -1); - kasan_unpoison_object_data(scs_cache, s); - kmem_cache_free(scs_cache, s); + + /* + * We cannot sleep as this can be called in interrupt context, + * so use this_cpu_cmpxchg to update the cache, and vfree_atomic + * to free the stack. + */ + + for (i = 0; i < NR_CACHED_SCS; i++) + if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) + return; + + vfree_atomic(s); +} + +static int scs_cleanup(unsigned int cpu) +{ + int i; + void **cache = per_cpu_ptr(scs_cache, cpu); + + for (i = 0; i < NR_CACHED_SCS; i++) { + vfree(cache[i]); + cache[i] = NULL; + } + + return 0; } void __init scs_init(void) { - scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL); + cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL, + scs_cleanup); } int scs_prepare(struct task_struct *tsk, int node) -- cgit From ac20ffbb0279aae7be48567fb734eae7d050769e Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 30 Nov 2020 15:34:42 -0800 Subject: arm64: scs: use vmapped IRQ and SDEI shadow stacks Use scs_alloc() to allocate also IRQ and SDEI shadow stacks instead of using statically allocated stacks. Signed-off-by: Sami Tolvanen Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201130233442.2562064-3-samitolvanen@google.com [will: Move CONFIG_SHADOW_CALL_STACK check into init_irq_scs()] Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 1 - arch/arm64/kernel/entry.S | 6 ++-- arch/arm64/kernel/irq.c | 21 ++++++++++++++ arch/arm64/kernel/scs.c | 16 ----------- arch/arm64/kernel/sdei.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/scs.h | 4 --- 6 files changed, 94 insertions(+), 24 deletions(-) delete mode 100644 arch/arm64/kernel/scs.c (limited to 'include/linux') diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bbaf0bc4ad60..86364ab6f13f 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -58,7 +58,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o -obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso/ probes/ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b295fb912b12..5c2ac4b5b2da 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -441,7 +441,7 @@ SYM_CODE_END(__swpan_exit_el0) #ifdef CONFIG_SHADOW_CALL_STACK /* also switch to the irq shadow stack */ - adr_this_cpu scs_sp, irq_shadow_call_stack, x26 + ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26 #endif 9998: @@ -1097,9 +1097,9 @@ SYM_CODE_START(__sdei_asm_handler) #ifdef CONFIG_SHADOW_CALL_STACK /* Use a separate shadow call stack for normal and critical events */ cbnz w4, 3f - adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6 + ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 b 4f -3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6 +3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 4: #endif diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 9cf2fb87584a..ac54c21140d4 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,25 @@ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); + +DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); +#endif + +static void init_irq_scs(void) +{ + int cpu; + + if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + return; + + for_each_possible_cpu(cpu) + per_cpu(irq_shadow_call_stack_ptr, cpu) = + scs_alloc(cpu_to_node(cpu)); +} + #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { @@ -54,6 +74,7 @@ static void init_irq_stacks(void) void __init init_IRQ(void) { init_irq_stacks(); + init_irq_scs(); irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c deleted file mode 100644 index e8f7ff45dd8f..000000000000 --- a/arch/arm64/kernel/scs.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Shadow Call Stack support. - * - * Copyright (C) 2019 Google LLC - */ - -#include -#include - -DEFINE_SCS(irq_shadow_call_stack); - -#ifdef CONFIG_ARM_SDE_INTERFACE -DEFINE_SCS(sdei_shadow_call_stack_normal); -DEFINE_SCS(sdei_shadow_call_stack_critical); -#endif diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 7689f2031c0c..d12fd786b267 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,14 @@ DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); #endif +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); +#endif + static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) { unsigned long *p; @@ -90,6 +99,59 @@ static int init_sdei_stacks(void) return err; } +static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = per_cpu(*ptr, cpu); + if (s) { + per_cpu(*ptr, cpu) = NULL; + scs_free(s); + } +} + +static void free_sdei_scs(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + } +} + +static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = scs_alloc(cpu_to_node(cpu)); + if (!s) + return -ENOMEM; + per_cpu(*ptr, cpu) = s; + + return 0; +} + +static int init_sdei_scs(void) +{ + int cpu; + int err = 0; + + for_each_possible_cpu(cpu) { + err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + if (err) + break; + err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + if (err) + break; + } + + if (err) + free_sdei_scs(); + + return err; +} + static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); @@ -138,6 +200,14 @@ unsigned long sdei_arch_get_entry_point(int conduit) return 0; } + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) { + if (init_sdei_scs()) { + if (IS_ENABLED(CONFIG_VMAP_STACK)) + free_sdei_stacks(); + return 0; + } + } + sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 diff --git a/include/linux/scs.h b/include/linux/scs.h index 2a506c2a16f4..18122d9e17ff 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -22,10 +22,6 @@ /* An illegal pointer value to mark the end of the shadow stack. */ #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) -/* Allocate a static per-CPU shadow stack */ -#define DEFINE_SCS(name) \ - DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ - #define task_scs(tsk) (task_thread_info(tsk)->scs_base) #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) -- cgit