aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-08 07:19:58 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-08 07:19:58 -1000
commit9ea7edac83630a9e8a05042b8750eaa10ecb6a38 (patch)
tree98bd60f3fffa87e09faf11328bdee20785159953 /arch
parent51b47860ad8058ae54e4789b5f9b253fd555d2e9 (diff)
parent81235ae0c846e1fb46a2c6fe9283fe2b2b24f7dc (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Here is a (hopefully) final round of arm64 fixes for 6.12 that address some user-visible floating point register corruption. Both of the Marks have been working on this for a couple of weeks and we've ended up in a position where SVE is solid but SME still has enough pending issues that the most pragmatic solution for the release and stable backports is to disable the feature. Yes, it's a shame, but the hardware is rare as hen's teeth at the moment and we're better off getting back to a known good state before fixing it all properly. We're also improving the selftests for 6.13 to help avoid merging broken code in the future. Anyway, the good news is that we're removing a lot more code than we're adding. Summary: - Fix handling of SVE traps from userspace on preemptible kernels when converting the saved floating point state into SVE state. - Remove broken support for the SMCCCv1.3 "SVE discard hint" optimisation. - Disable SME support, as the current support code suffers from numerous issues around signal delivery, ptrace access and context-switch which can lead to user-visible corruption of the register state" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Kconfig: Make SME depend on BROKEN for now arm64: smccc: Remove broken support for SMCCCv1.3 SVE discard hint arm64/sve: Discard stale CPU state when handling SVE traps
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/smccc-call.S35
3 files changed, 5 insertions, 32 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd9df6dcc593..70d7f4f20225 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2214,6 +2214,7 @@ config ARM64_SME
bool "ARM Scalable Matrix Extension support"
default y
depends on ARM64_SVE
+ depends on BROKEN
help
The Scalable Matrix Extension (SME) is an extension to the AArch64
execution state which utilises a substantial subset of the SVE
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 77006df20a75..6d21971ae559 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1367,6 +1367,7 @@ static void sve_init_regs(void)
} else {
fpsimd_to_sve(current);
current->thread.fp_type = FP_STATE_SVE;
+ fpsimd_flush_task_state(current);
}
}
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 487381164ff6..2def9d0dd3dd 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -7,48 +7,19 @@
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
-#include <asm/thread_info.h>
-
-/*
- * If we have SMCCC v1.3 and (as is likely) no SVE state in
- * the registers then set the SMCCC hint bit to say there's no
- * need to preserve it. Do this by directly adjusting the SMCCC
- * function value which is already stored in x0 ready to be called.
- */
-SYM_FUNC_START(__arm_smccc_sve_check)
-
- ldr_l x16, smccc_has_sve_hint
- cbz x16, 2f
-
- get_current_task x16
- ldr x16, [x16, #TSK_TI_FLAGS]
- tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state?
- tbnz x16, #TIF_SVE, 2f // Does that state include SVE?
-
-1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT
-
-2: ret
-SYM_FUNC_END(__arm_smccc_sve_check)
-EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
- stp x29, x30, [sp, #-16]!
- mov x29, sp
-alternative_if ARM64_SVE
- bl __arm_smccc_sve_check
-alternative_else_nop_endif
\instr #0
- ldr x4, [sp, #16]
+ ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
- ldr x4, [sp, #24]
+ ldr x4, [sp, #8]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
-1: ldp x29, x30, [sp], #16
- ret
+1: ret
.endm
/*