From 6251d38059ae22304ede4f3748af9f795bdbf4fd Mon Sep 17 00:00:00 2001 From: Besar Wicaksono Date: Wed, 28 Sep 2022 19:28:34 -0500 Subject: ACPI: ARM Performance Monitoring Unit Table (APMT) initial support ARM Performance Monitoring Unit Table describes the properties of PMU support in ARM-based system. The APMT table contains a list of nodes, each represents a PMU in the system that conforms to ARM CoreSight PMU architecture. The properties of each node include information required to access the PMU (e.g. MMIO base address, interrupt number) and also identification. For more detailed information, please refer to the specification below: * APMT: https://developer.arm.com/documentation/den0117/latest * ARM Coresight PMU: https://developer.arm.com/documentation/ihi0091/latest The initial support adds the detection of APMT table and generic infrastructure to create platform devices for ARM CoreSight PMUs. Similar to IORT the root pointer of APMT is preserved during runtime and each PMU platform device is given a pointer to the corresponding APMT node. Signed-off-by: Besar Wicaksono Acked-by: Rafael J. Wysocki Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20220929002834.32664-1-bwicaksono@nvidia.com Signed-off-by: Will Deacon --- include/linux/acpi_apmt.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/linux/acpi_apmt.h (limited to 'include/linux') diff --git a/include/linux/acpi_apmt.h b/include/linux/acpi_apmt.h new file mode 100644 index 000000000000..40bd634d082f --- /dev/null +++ b/include/linux/acpi_apmt.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * ARM CoreSight PMU driver. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + * + */ + +#ifndef __ACPI_APMT_H__ +#define __ACPI_APMT_H__ + +#include + +#ifdef CONFIG_ACPI_APMT +void acpi_apmt_init(void); +#else +static inline void acpi_apmt_init(void) { } +#endif /* CONFIG_ACPI_APMT */ + +#endif /* __ACPI_APMT_H__ */ -- cgit From fe40ffdb7656d1f9c42dd402740765ff8b418b17 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 30 Sep 2022 12:18:44 +0100 Subject: arm_pmu: rework ACPI probing The current ACPI PMU probing logic tries to associate PMUs with CPUs when the CPU is first brought online, in order to handle late hotplug, though PMUs are only registered during early boot, and so for late hotplugged CPUs this can only associate the CPU with an existing PMU. We tried to be clever and the have the arm_pmu_acpi_cpu_starting() callback allocate a struct arm_pmu when no matching instance is found, in order to avoid duplication of logic. However, as above this doesn't do anything useful for late hotplugged CPUs, and this requires us to allocate memory in an atomic context, which is especially problematic for PREEMPT_RT, as reported by Valentin and Pierre. This patch reworks the probing to detect PMUs for all online CPUs in the arm_pmu_acpi_probe() function, which is more aligned with how DT probing works. The arm_pmu_acpi_cpu_starting() callback only tries to associate CPUs with an existing arm_pmu instance, avoiding the problem of allocating in atomic context. Note that as we didn't previously register PMUs for late-hotplugged CPUs, this change doesn't result in a loss of existing functionality, though we will now warn when we cannot associate a CPU with a PMU. This change allows us to pull the hotplug callback registration into the arm_pmu_acpi_probe() function, as we no longer need the callbacks to be invoked shortly after probing the boot CPUs, and can register it without invoking the calls. For the moment the arm_pmu_acpi_init() initcall remains to register the SPE PMU, though in future this should probably be moved elsewhere (e.g. the arm64 ACPI init code), since this doesn't need to be tied to the regular CPU PMU code. Signed-off-by: Mark Rutland Reported-by: Valentin Schneider Link: https://lore.kernel.org/r/20210810134127.1394269-2-valentin.schneider@arm.com/ Reported-by: Pierre Gondois Link: https://lore.kernel.org/linux-arm-kernel/20220912155105.1443303-1-pierre.gondois@arm.com/ Cc: Pierre Gondois Cc: Valentin Schneider Cc: Will Deacon Reviewed-and-tested-by: Pierre Gondois Link: https://lore.kernel.org/r/20220930111844.1522365-4-mark.rutland@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 17 ++------ drivers/perf/arm_pmu_acpi.c | 95 +++++++++++++++++++++++--------------------- include/linux/perf/arm_pmu.h | 1 - 3 files changed, 52 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 3f07df5a7e95..82a6d22e8ee2 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -861,16 +861,16 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } -static struct arm_pmu *__armpmu_alloc(gfp_t flags) +struct arm_pmu *armpmu_alloc(void) { struct arm_pmu *pmu; int cpu; - pmu = kzalloc(sizeof(*pmu), flags); + pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); if (!pmu) goto out; - pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); + pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); if (!pmu->hw_events) { pr_info("failed to allocate per-cpu PMU data.\n"); goto out_free_pmu; @@ -916,17 +916,6 @@ out: return NULL; } -struct arm_pmu *armpmu_alloc(void) -{ - return __armpmu_alloc(GFP_KERNEL); -} - -struct arm_pmu *armpmu_alloc_atomic(void) -{ - return __armpmu_alloc(GFP_ATOMIC); -} - - void armpmu_free(struct arm_pmu *pmu) { free_percpu(pmu->hw_events); diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 99abea3b2cc9..a085e45b509e 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -13,6 +13,7 @@ #include #include +#include #include static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus); @@ -204,26 +205,6 @@ static struct arm_pmu *arm_pmu_acpi_find_pmu(void) return NULL; } -static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) -{ - struct arm_pmu *pmu; - - pmu = arm_pmu_acpi_find_pmu(); - if (pmu) - return pmu; - - pmu = armpmu_alloc_atomic(); - if (!pmu) { - pr_warn("Unable to allocate PMU for CPU%d\n", - smp_processor_id()); - return NULL; - } - - pmu->acpi_cpuid = read_cpuid_id(); - - return pmu; -} - /* * Check whether the new IRQ is compatible with those already associated with * the PMU (e.g. we don't have mismatched PPIs). @@ -286,26 +267,45 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu) if (per_cpu(probed_pmus, cpu)) return 0; - pmu = arm_pmu_acpi_find_alloc_pmu(); - if (!pmu) - return -ENOMEM; + pmu = arm_pmu_acpi_find_pmu(); + if (!pmu) { + pr_warn_ratelimited("Unable to associate CPU%d with a PMU\n", + cpu); + return 0; + } arm_pmu_acpi_associate_pmu_cpu(pmu, cpu); - - /* - * Ideally, we'd probe the PMU here when we find the first matching - * CPU. We can't do that for several reasons; see the comment in - * arm_pmu_acpi_init(). - * - * So for the time being, we're done. - */ return 0; } +static void arm_pmu_acpi_probe_matching_cpus(struct arm_pmu *pmu, + unsigned long cpuid) +{ + int cpu; + + for_each_online_cpu(cpu) { + unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr; + + if (cpu_cpuid == cpuid) + arm_pmu_acpi_associate_pmu_cpu(pmu, cpu); + } +} + int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { int pmu_idx = 0; - int cpu, ret; + unsigned int cpu; + int ret; + + ret = arm_pmu_acpi_parse_irqs(); + if (ret) + return ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_ACPI_STARTING, + "perf/arm/pmu_acpi:starting", + arm_pmu_acpi_cpu_starting, NULL); + if (ret) + return ret; /* * Initialise and register the set of PMUs which we know about right @@ -320,13 +320,26 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) * For the moment, as with the platform/DT case, we need at least one * of a PMU's CPUs to be online at probe time. */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { struct arm_pmu *pmu = per_cpu(probed_pmus, cpu); + unsigned long cpuid; char *base_name; - if (!pmu || pmu->name) + /* If we've already probed this CPU, we have nothing to do */ + if (pmu) continue; + pmu = armpmu_alloc(); + if (!pmu) { + pr_warn("Unable to allocate PMU for CPU%d\n", + cpu); + } + + cpuid = per_cpu(cpu_data, cpu).reg_midr; + pmu->acpi_cpuid = cpuid; + + arm_pmu_acpi_probe_matching_cpus(pmu, cpuid); + ret = init_fn(pmu); if (ret == -ENODEV) { /* PMU not handled by this driver, or not present */ @@ -351,26 +364,16 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) } } - return 0; + return ret; } static int arm_pmu_acpi_init(void) { - int ret; - if (acpi_disabled) return 0; arm_spe_acpi_register_device(); - ret = arm_pmu_acpi_parse_irqs(); - if (ret) - return ret; - - ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING, - "perf/arm/pmu_acpi:starting", - arm_pmu_acpi_cpu_starting, NULL); - - return ret; + return 0; } subsys_initcall(arm_pmu_acpi_init) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 0356cb6a215d..0c15c5b7f801 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -174,7 +174,6 @@ void kvm_host_pmu_init(struct arm_pmu *pmu); /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); -struct arm_pmu *armpmu_alloc_atomic(void); void armpmu_free(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu); int armpmu_request_irq(int irq, int cpu); -- cgit From 9beccca0984022a844850e32f0d7dd80d4a225de Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 27 Oct 2022 17:59:07 +0200 Subject: scs: add support for dynamic shadow call stacks In order to allow arches to use code patching to conditionally emit the shadow stack pushes and pops, rather than always taking the performance hit even on CPUs that implement alternatives such as stack pointer authentication on arm64, add a Kconfig symbol that can be set by the arch to omit the SCS codegen itself, without otherwise affecting how support code for SCS and compiler options (for register reservation, for instance) are emitted. Also, add a static key and some plumbing to omit the allocation of shadow call stack for dynamic SCS configurations if SCS is disabled at runtime. Signed-off-by: Ard Biesheuvel Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Reviewed-by: Sami Tolvanen Tested-by: Sami Tolvanen Link: https://lore.kernel.org/r/20221027155908.1940624-3-ardb@kernel.org Signed-off-by: Will Deacon --- Makefile | 2 ++ arch/Kconfig | 7 +++++++ include/linux/scs.h | 18 ++++++++++++++++++ kernel/scs.c | 14 ++++++++++++-- 4 files changed, 39 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Makefile b/Makefile index ac2ec990422d..69b40b629f3c 100644 --- a/Makefile +++ b/Makefile @@ -966,8 +966,10 @@ LDFLAGS_vmlinux += --gc-sections endif ifdef CONFIG_SHADOW_CALL_STACK +ifndef CONFIG_DYNAMIC_SCS CC_FLAGS_SCS := -fsanitize=shadow-call-stack KBUILD_CFLAGS += $(CC_FLAGS_SCS) +endif export CC_FLAGS_SCS endif diff --git a/arch/Kconfig b/arch/Kconfig index 8f138e580d1a..072a1b39e3af 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -651,6 +651,13 @@ config SHADOW_CALL_STACK reading and writing arbitrary memory may be able to locate them and hijack control flow by modifying the stacks. +config DYNAMIC_SCS + bool + help + Set by the arch code if it relies on code patching to insert the + shadow call stack push and pop instructions rather than on the + compiler. + config LTO bool help diff --git a/include/linux/scs.h b/include/linux/scs.h index 18122d9e17ff..4ab5bdc898cf 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -53,6 +53,22 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk) return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; } +DECLARE_STATIC_KEY_FALSE(dynamic_scs_enabled); + +static inline bool scs_is_dynamic(void) +{ + if (!IS_ENABLED(CONFIG_DYNAMIC_SCS)) + return false; + return static_branch_likely(&dynamic_scs_enabled); +} + +static inline bool scs_is_enabled(void) +{ + if (!IS_ENABLED(CONFIG_DYNAMIC_SCS)) + return true; + return scs_is_dynamic(); +} + #else /* CONFIG_SHADOW_CALL_STACK */ static inline void *scs_alloc(int node) { return NULL; } @@ -62,6 +78,8 @@ static inline void scs_task_reset(struct task_struct *tsk) {} static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } static inline void scs_release(struct task_struct *tsk) {} static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } +static inline bool scs_is_enabled(void) { return false; } +static inline bool scs_is_dynamic(void) { return false; } #endif /* CONFIG_SHADOW_CALL_STACK */ diff --git a/kernel/scs.c b/kernel/scs.c index b7e1b096d906..d7809affe740 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -12,6 +12,10 @@ #include #include +#ifdef CONFIG_DYNAMIC_SCS +DEFINE_STATIC_KEY_FALSE(dynamic_scs_enabled); +#endif + static void __scs_account(void *s, int account) { struct page *scs_page = vmalloc_to_page(s); @@ -101,14 +105,20 @@ static int scs_cleanup(unsigned int cpu) void __init scs_init(void) { + if (!scs_is_enabled()) + return; cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL, scs_cleanup); } int scs_prepare(struct task_struct *tsk, int node) { - void *s = scs_alloc(node); + void *s; + if (!scs_is_enabled()) + return 0; + + s = scs_alloc(node); if (!s) return -ENOMEM; @@ -148,7 +158,7 @@ void scs_release(struct task_struct *tsk) { void *s = task_scs(tsk); - if (!s) + if (!scs_is_enabled() || !s) return; WARN(task_scs_end_corrupted(tsk), -- cgit From 9705bc70960459ae09f4a5e77283973bb3a40f57 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2022 17:05:17 +0000 Subject: ftrace: pass fregs to arch_ftrace_set_direct_caller() In subsequent patches we'll arrange for architectures to have an ftrace_regs which is entirely distinct from pt_regs. In preparation for this, we need to minimize the use of pt_regs to where strictly necessary in the core ftrace code. This patch changes the prototype of arch_ftrace_set_direct_caller() to take ftrace_regs rather than pt_regs, and moves the extraction of the pt_regs into arch_ftrace_set_direct_caller(). On x86, arch_ftrace_set_direct_caller() can be used even when CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=n, and defines struct ftrace_regs. Due to this, it's necessary to define arch_ftrace_set_direct_caller() as a macro to avoid using an incomplete type. I've also moved the body of arch_ftrace_set_direct_caller() after the CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y defineidion of struct ftrace_regs. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Steven Rostedt Reviewed-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20221103170520.931305-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/s390/include/asm/ftrace.h | 5 ++++- arch/x86/include/asm/ftrace.h | 31 ++++++++++++++++++------------- include/linux/ftrace.h | 9 ++++----- kernel/trace/ftrace.c | 3 +-- 4 files changed, 27 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 6f80ec9c04be..85bc26ccdb87 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -60,6 +60,7 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f fregs->regs.psw.addr = ip; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* * When an ftrace registered caller is tracing a function that is * also set by a register_ftrace_direct() call, it needs to be @@ -67,10 +68,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f * place the direct caller in the ORIG_GPR2 part of pt_regs. This * tells the ftrace_caller that there's a direct caller. */ -static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) { + struct pt_regs *regs = &fregs->regs; regs->orig_gpr2 = addr; } +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ /* * Even though the system call numbers are identical for s390/s390x a diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 908d99b127d3..d2350a8351fa 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -34,19 +34,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } -/* - * When a ftrace registered caller is tracing a function that is - * also set by a register_ftrace_direct() call, it needs to be - * differentiated in the ftrace_caller trampoline. To do this, we - * place the direct caller in the ORIG_AX part of pt_regs. This - * tells the ftrace_caller that there's a direct caller. - */ -static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) -{ - /* Emulate a call */ - regs->orig_ax = addr; -} - #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS struct ftrace_regs { struct pt_regs regs; @@ -72,6 +59,24 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR #endif +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +/* + * When a ftrace registered caller is tracing a function that is + * also set by a register_ftrace_direct() call, it needs to be + * differentiated in the ftrace_caller trampoline. To do this, we + * place the direct caller in the ORIG_AX part of pt_regs. This + * tells the ftrace_caller that there's a direct caller. + */ +static inline void +__arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +{ + /* Emulate a call */ + regs->orig_ax = addr; +} +#define arch_ftrace_set_direct_caller(fregs, addr) \ + __arch_ftrace_set_direct_caller(&(fregs)->regs, addr) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + #ifdef CONFIG_DYNAMIC_FTRACE struct dyn_arch_ftrace { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 62557d4bffc2..f201fcbfffb0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -37,9 +37,10 @@ extern void ftrace_boot_snapshot(void); static inline void ftrace_boot_snapshot(void) { } #endif -#ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops; struct ftrace_regs; + +#ifdef CONFIG_FUNCTION_TRACER /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that @@ -427,9 +428,7 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi { return -ENODEV; } -#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* * This must be implemented by the architecture. * It is the way the ftrace direct_ops helper, when called @@ -443,9 +442,9 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi * the return from the trampoline jump to the direct caller * instead of going back to the function it just traced. */ -static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) { } -#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #ifdef CONFIG_STACK_TRACER diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7dc023641bf1..9eb42120ac44 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2487,14 +2487,13 @@ ftrace_add_rec_direct(unsigned long ip, unsigned long addr, static void call_direct_funcs(unsigned long ip, unsigned long pip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { - struct pt_regs *regs = ftrace_get_regs(fregs); unsigned long addr; addr = ftrace_find_rec_direct(ip); if (!addr) return; - arch_ftrace_set_direct_caller(regs, addr); + arch_ftrace_set_direct_caller(fregs, addr); } struct ftrace_ops direct_ops = { -- cgit From 0ef86097f127d0d73e19aa4dcf86106105e7db09 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2022 17:05:18 +0000 Subject: ftrace: rename ftrace_instruction_pointer_set() -> ftrace_regs_set_instruction_pointer() In subsequent patches we'll add a sew of ftrace_regs_{get,set}_*() helpers. In preparation, this patch renames ftrace_instruction_pointer_set() to ftrace_regs_set_instruction_pointer(). There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Steven Rostedt Reviewed-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20221103170520.931305-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/powerpc/include/asm/ftrace.h | 5 +++-- arch/s390/include/asm/ftrace.h | 5 +++-- arch/x86/include/asm/ftrace.h | 2 +- include/linux/ftrace.h | 9 ++++----- kernel/livepatch/patch.c | 2 +- 5 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 3cee7115441b..c3eb48f67566 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -37,8 +37,9 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * return fregs->regs.msr ? &fregs->regs : NULL; } -static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs, - unsigned long ip) +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long ip) { regs_set_return_ip(&fregs->regs, ip); } diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 85bc26ccdb87..113d194acea0 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -54,8 +54,9 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * return NULL; } -static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs, - unsigned long ip) +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long ip) { fregs->regs.psw.addr = ip; } diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index d2350a8351fa..b2925d342c65 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -48,7 +48,7 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs) return &fregs->regs; } -#define ftrace_instruction_pointer_set(fregs, _ip) \ +#define ftrace_regs_set_instruction_pointer(fregs, _ip) \ do { (fregs)->regs.ip = (_ip); } while (0) struct ftrace_ops; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f201fcbfffb0..ec3657a60f85 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -111,12 +111,11 @@ struct ftrace_regs { #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) /* - * ftrace_instruction_pointer_set() is to be defined by the architecture - * if to allow setting of the instruction pointer from the ftrace_regs - * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports - * live kernel patching. + * ftrace_regs_set_instruction_pointer() is to be defined by the architecture + * if to allow setting of the instruction pointer from the ftrace_regs when + * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching. */ -#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) +#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0) #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 4c4f5a776d80..4152c71507e2 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, if (func->nop) goto unlock; - ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func); + ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func); unlock: ftrace_test_recursion_unlock(bit); -- cgit From 94d095ffa0e16bb7f161a2b73bbe5c2795d499a8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2022 17:05:19 +0000 Subject: ftrace: abstract DYNAMIC_FTRACE_WITH_ARGS accesses In subsequent patches we'll arrange for architectures to have an ftrace_regs which is entirely distinct from pt_regs. In preparation for this, we need to minimize the use of pt_regs to where strictly necessary in the core ftrace code. This patch adds new ftrace_regs_{get,set}_*() helpers which can be used to manipulate ftrace_regs. When CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y, these can always be used on any ftrace_regs, and when CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=n these can be used when regs are available. A new ftrace_regs_has_args(fregs) helper is added which code can use to check when these are usable. Co-developed-by: Florent Revest Signed-off-by: Florent Revest Signed-off-by: Mark Rutland Cc: Masami Hiramatsu Cc: Steven Rostedt Reviewed-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20221103170520.931305-4-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/powerpc/include/asm/ftrace.h | 19 +++++++++++++++++++ arch/s390/include/asm/ftrace.h | 19 +++++++++++++++++++ arch/x86/include/asm/ftrace.h | 16 ++++++++++++++++ include/linux/ftrace.h | 29 +++++++++++++++++++++++++++++ kernel/trace/Kconfig | 6 +++--- 5 files changed, 86 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index c3eb48f67566..259b9dd5fe1c 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -44,6 +44,25 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, regs_set_return_ip(&fregs->regs, ip); } +static __always_inline unsigned long +ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs) +{ + return instruction_pointer(&fregs->regs); +} + +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&(fregs)->regs) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(&(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + struct ftrace_ops; #define ftrace_graph_func ftrace_graph_func diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 113d194acea0..e5c5cb1207e2 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -54,6 +54,12 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * return NULL; } +static __always_inline unsigned long +ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) +{ + return fregs->regs.psw.addr; +} + static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) @@ -61,6 +67,19 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, fregs->regs.psw.addr = ip; } +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&(fregs)->regs) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(&(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* * When an ftrace registered caller is tracing a function that is diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b2925d342c65..5061ac98ffa1 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -51,6 +51,22 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs) #define ftrace_regs_set_instruction_pointer(fregs, _ip) \ do { (fregs)->regs.ip = (_ip); } while (0) +#define ftrace_regs_get_instruction_pointer(fregs) \ + ((fregs)->regs.ip) + +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&(fregs)->regs) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(&(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + struct ftrace_ops; #define ftrace_graph_func ftrace_graph_func void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ec3657a60f85..99f1146614c0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -126,6 +126,35 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs return arch_ftrace_get_regs(fregs); } +/* + * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs. + * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs. + */ +static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) +{ + if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)) + return true; + + return ftrace_get_regs(fregs) != NULL; +} + +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +#define ftrace_regs_get_instruction_pointer(fregs) \ + instruction_pointer(ftrace_get_regs(fregs)) +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(ftrace_get_regs(fregs), n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(ftrace_get_regs(fregs)) +#define ftrace_regs_return_value(fregs) \ + regs_return_value(ftrace_get_regs(fregs)) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(ftrace_get_regs(fregs), ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(ftrace_get_regs(fregs)) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) +#endif + typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e9e95c790b8e..2c6611c13f99 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -46,10 +46,10 @@ config HAVE_DYNAMIC_FTRACE_WITH_ARGS bool help If this is set, then arguments and stack can be found from - the pt_regs passed into the function callback regs parameter + the ftrace_regs passed into the function callback regs parameter by default, even without setting the REGS flag in the ftrace_ops. - This allows for use of regs_get_kernel_argument() and - kernel_stack_pointer(). + This allows for use of ftrace_regs_get_argument() and + ftrace_regs_get_stack_pointer(). config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE bool -- cgit From 229d58e31678dece365060c50a39a99a2b1dc729 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 16 Nov 2022 17:03:24 +0000 Subject: firmware: arm_ffa: Move constants to header file FF-A function IDs and error codes will be needed in the hypervisor too, so move to them to the header file where they can be shared. Rename the version constants with an "FFA_" prefix so that they are less likely to clash with other code in the tree. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Quentin Perret Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20221116170335.2341003-2-qperret@google.com Signed-off-by: Will Deacon --- drivers/firmware/arm_ffa/driver.c | 101 ++++---------------------------------- include/linux/arm_ffa.h | 83 +++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 91 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index d5e86ef40b89..fa85c64d3ded 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -36,81 +36,6 @@ #include "common.h" #define FFA_DRIVER_VERSION FFA_VERSION_1_0 - -#define FFA_SMC(calling_convention, func_num) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ - ARM_SMCCC_OWNER_STANDARD, (func_num)) - -#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) -#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) - -#define FFA_ERROR FFA_SMC_32(0x60) -#define FFA_SUCCESS FFA_SMC_32(0x61) -#define FFA_INTERRUPT FFA_SMC_32(0x62) -#define FFA_VERSION FFA_SMC_32(0x63) -#define FFA_FEATURES FFA_SMC_32(0x64) -#define FFA_RX_RELEASE FFA_SMC_32(0x65) -#define FFA_RXTX_MAP FFA_SMC_32(0x66) -#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) -#define FFA_RXTX_UNMAP FFA_SMC_32(0x67) -#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) -#define FFA_ID_GET FFA_SMC_32(0x69) -#define FFA_MSG_POLL FFA_SMC_32(0x6A) -#define FFA_MSG_WAIT FFA_SMC_32(0x6B) -#define FFA_YIELD FFA_SMC_32(0x6C) -#define FFA_RUN FFA_SMC_32(0x6D) -#define FFA_MSG_SEND FFA_SMC_32(0x6E) -#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) -#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) -#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) -#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) -#define FFA_MEM_DONATE FFA_SMC_32(0x71) -#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) -#define FFA_MEM_LEND FFA_SMC_32(0x72) -#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) -#define FFA_MEM_SHARE FFA_SMC_32(0x73) -#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) -#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) -#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) -#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) -#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) -#define FFA_MEM_RECLAIM FFA_SMC_32(0x77) -#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) -#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) -#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) -#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) -#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) - -/* - * For some calls it is necessary to use SMC64 to pass or return 64-bit values. - * For such calls FFA_FN_NATIVE(name) will choose the appropriate - * (native-width) function ID. - */ -#ifdef CONFIG_64BIT -#define FFA_FN_NATIVE(name) FFA_FN64_##name -#else -#define FFA_FN_NATIVE(name) FFA_##name -#endif - -/* FFA error codes. */ -#define FFA_RET_SUCCESS (0) -#define FFA_RET_NOT_SUPPORTED (-1) -#define FFA_RET_INVALID_PARAMETERS (-2) -#define FFA_RET_NO_MEMORY (-3) -#define FFA_RET_BUSY (-4) -#define FFA_RET_INTERRUPTED (-5) -#define FFA_RET_DENIED (-6) -#define FFA_RET_RETRY (-7) -#define FFA_RET_ABORTED (-8) - -#define MAJOR_VERSION_MASK GENMASK(30, 16) -#define MINOR_VERSION_MASK GENMASK(15, 0) -#define MAJOR_VERSION(x) ((u16)(FIELD_GET(MAJOR_VERSION_MASK, (x)))) -#define MINOR_VERSION(x) ((u16)(FIELD_GET(MINOR_VERSION_MASK, (x)))) -#define PACK_VERSION_INFO(major, minor) \ - (FIELD_PREP(MAJOR_VERSION_MASK, (major)) | \ - FIELD_PREP(MINOR_VERSION_MASK, (minor))) -#define FFA_VERSION_1_0 PACK_VERSION_INFO(1, 0) #define FFA_MIN_VERSION FFA_VERSION_1_0 #define SENDER_ID_MASK GENMASK(31, 16) @@ -120,12 +45,6 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/* - * FF-A specification mentions explicitly about '4K pages'. This should - * not be confused with the kernel PAGE_SIZE, which is the translation - * granule kernel is configured and may be one among 4K, 16K and 64K. - */ -#define FFA_PAGE_SIZE SZ_4K /* * Keeping RX TX buffer size as 4K for now * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config @@ -178,9 +97,9 @@ static struct ffa_drv_info *drv_info; */ static u32 ffa_compatible_version_find(u32 version) { - u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version); - u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION); - u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION); + u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version); + u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION); + u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION); if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) return version; @@ -204,16 +123,16 @@ static int ffa_version_check(u32 *version) if (ver.a0 < FFA_MIN_VERSION) { pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", - MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0), - MAJOR_VERSION(FFA_MIN_VERSION), - MINOR_VERSION(FFA_MIN_VERSION)); + FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), + FFA_MAJOR_VERSION(FFA_MIN_VERSION), + FFA_MINOR_VERSION(FFA_MIN_VERSION)); return -EINVAL; } - pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION), - MINOR_VERSION(FFA_DRIVER_VERSION)); - pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0), - MINOR_VERSION(ver.a0)); + pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), + FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); + pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0), + FFA_MINOR_VERSION(ver.a0)); *version = ffa_compatible_version_find(ver.a0); return 0; diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index 5f02d2e6b9d9..daff44d777fa 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -11,6 +11,89 @@ #include #include +#define FFA_SMC(calling_convention, func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ + ARM_SMCCC_OWNER_STANDARD, (func_num)) + +#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) +#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) + +#define FFA_ERROR FFA_SMC_32(0x60) +#define FFA_SUCCESS FFA_SMC_32(0x61) +#define FFA_INTERRUPT FFA_SMC_32(0x62) +#define FFA_VERSION FFA_SMC_32(0x63) +#define FFA_FEATURES FFA_SMC_32(0x64) +#define FFA_RX_RELEASE FFA_SMC_32(0x65) +#define FFA_RXTX_MAP FFA_SMC_32(0x66) +#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) +#define FFA_RXTX_UNMAP FFA_SMC_32(0x67) +#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) +#define FFA_ID_GET FFA_SMC_32(0x69) +#define FFA_MSG_POLL FFA_SMC_32(0x6A) +#define FFA_MSG_WAIT FFA_SMC_32(0x6B) +#define FFA_YIELD FFA_SMC_32(0x6C) +#define FFA_RUN FFA_SMC_32(0x6D) +#define FFA_MSG_SEND FFA_SMC_32(0x6E) +#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) +#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) +#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) +#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) +#define FFA_MEM_DONATE FFA_SMC_32(0x71) +#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) +#define FFA_MEM_LEND FFA_SMC_32(0x72) +#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) +#define FFA_MEM_SHARE FFA_SMC_32(0x73) +#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) +#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) +#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) +#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) +#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) +#define FFA_MEM_RECLAIM FFA_SMC_32(0x77) +#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) +#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) +#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) +#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) +#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) + +/* + * For some calls it is necessary to use SMC64 to pass or return 64-bit values. + * For such calls FFA_FN_NATIVE(name) will choose the appropriate + * (native-width) function ID. + */ +#ifdef CONFIG_64BIT +#define FFA_FN_NATIVE(name) FFA_FN64_##name +#else +#define FFA_FN_NATIVE(name) FFA_##name +#endif + +/* FFA error codes. */ +#define FFA_RET_SUCCESS (0) +#define FFA_RET_NOT_SUPPORTED (-1) +#define FFA_RET_INVALID_PARAMETERS (-2) +#define FFA_RET_NO_MEMORY (-3) +#define FFA_RET_BUSY (-4) +#define FFA_RET_INTERRUPTED (-5) +#define FFA_RET_DENIED (-6) +#define FFA_RET_RETRY (-7) +#define FFA_RET_ABORTED (-8) + +/* FFA version encoding */ +#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) +#define FFA_MINOR_VERSION_MASK GENMASK(15, 0) +#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x)))) +#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x)))) +#define FFA_PACK_VERSION_INFO(major, minor) \ + (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ + FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) +#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) + +/** + * FF-A specification mentions explicitly about '4K pages'. This should + * not be confused with the kernel PAGE_SIZE, which is the translation + * granule kernel is configured and may be one among 4K, 16K and 64K. + */ +#define FFA_PAGE_SIZE SZ_4K + /* FFA Bus/Device/Driver related */ struct ffa_device { int vm_id; -- cgit From c8e320b00a2a720862b9c028153c681b1a50aa61 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 16 Nov 2022 17:03:25 +0000 Subject: firmware: arm_ffa: Move comment before the field it is documenting This is consistent with the other comments in the struct. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Quentin Perret Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20221116170335.2341003-3-qperret@google.com Signed-off-by: Will Deacon --- include/linux/arm_ffa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index daff44d777fa..c87aeecaa9b2 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -244,11 +244,11 @@ struct ffa_mem_region_attributes { */ #define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) u8 flag; - u32 composite_off; /* * Offset in bytes from the start of the outer `ffa_memory_region` to * an `struct ffa_mem_region_addr_range`. */ + u32 composite_off; u64 reserved; }; -- cgit