diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/debug.S | 11 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 110 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 18 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 140 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 26 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux-xip.lds.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.h | 127 |
10 files changed, 203 insertions, 248 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index e112072b579d..d92f44bdf438 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -89,11 +89,18 @@ ENTRY(printascii) 2: teq r1, #'\n' bne 3f mov r1, #'\r' - waituart r2, r3 +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 mov r1, #'\n' -3: waituart r2, r3 +3: +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 b 1b diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7fff88e61252..08660ae9dcbc 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp, if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; - /* Else, fall through */ + fallthrough; default: return -EINVAL; } @@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Else, fall through */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; goto out; @@ -683,6 +683,40 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +/* + * Arm32 hardware does not always report a watchpoint hit address that matches + * one of the watchpoints set. It can also report an address "near" the + * watchpoint if a single instruction access both watched and unwatched + * addresses. There is no straight-forward way, short of disassembling the + * offending instruction, to map that address back to the watchpoint. This + * function computes the distance of the memory access from the watchpoint as a + * heuristic for the likelyhood that a given access triggered the watchpoint. + * + * See this same function in the arm64 platform code, which has the same + * problem. + * + * The function returns the distance of the address from the bytes watched by + * the watchpoint. In case of an exact match, it returns 0. + */ +static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + u32 wp_low, wp_high; + u32 lens, lene; + + lens = __ffs(ctrl->len); + lene = __fls(ctrl->len); + + wp_low = val + lens; + wp_high = val + lene; + if (addr < wp_low) + return wp_low - addr; + else if (addr > wp_high) + return addr - wp_high; + else + return 0; +} + static int watchpoint_fault_on_uaccess(struct pt_regs *regs, struct arch_hw_breakpoint *info) { @@ -692,23 +726,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs, static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - int i, access; - u32 val, ctrl_reg, alignment_mask; + int i, access, closest_match = 0; + u32 min_dist = -1, dist; + u32 val, ctrl_reg; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); + /* + * Find all watchpoints that match the reported address. If no exact + * match is found. Attribute the hit to the closest watchpoint. + */ + rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) { - rcu_read_lock(); - wp = slots[i]; - if (wp == NULL) - goto unlock; + continue; - info = counter_arch_bp(wp); /* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these @@ -717,33 +753,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, */ if (debug_arch < ARM_DEBUG_ARCH_V7_1) { BUG_ON(i > 0); + info = counter_arch_bp(wp); info->trigger = wp->attr.bp_addr; } else { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) - alignment_mask = 0x7; - else - alignment_mask = 0x3; - - /* Check if the watchpoint value matches. */ - val = read_wb_reg(ARM_BASE_WVR + i); - if (val != (addr & ~alignment_mask)) - goto unlock; - - /* Possible match, check the byte address select. */ - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); - decode_ctrl_reg(ctrl_reg, &ctrl); - if (!((1 << (addr & alignment_mask)) & ctrl.len)) - goto unlock; - /* Check that the access type matches. */ if (debug_exception_updates_fsr()) { access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) - goto unlock; + continue; } + val = read_wb_reg(ARM_BASE_WVR + i); + ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + dist = get_distance_from_watchpoint(addr, val, &ctrl); + if (dist < min_dist) { + min_dist = dist; + closest_match = i; + } + /* Is this an exact match? */ + if (dist != 0) + continue; + /* We have a winner. */ + info = counter_arch_bp(wp); info->trigger = addr; } @@ -765,13 +799,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * we can single-step over the watchpoint trigger. */ if (!is_default_overflow_handler(wp)) - goto unlock; - + continue; step: enable_single_step(wp, instruction_pointer(regs)); -unlock: - rcu_read_unlock(); } + + if (min_dist > 0 && min_dist != -1) { + /* No exact match found. */ + wp = slots[closest_match]; + info = counter_arch_bp(wp); + info->trigger = addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(wp, regs); + if (is_default_overflow_handler(wp)) + enable_single_step(wp, instruction_pointer(regs)); + } + + rcu_read_unlock(); } static void watchpoint_single_step_handler(unsigned long pc) @@ -884,7 +928,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); - /* Fall through */ + fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; @@ -933,7 +977,7 @@ static bool core_has_os_save_restore(void) ARM_DBG_READ(c1, c1, 4, oslsr); if (oslsr & ARM_OSLSR_OSLM0) return true; - /* Else, fall through */ + fallthrough; default: return false; } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ee514034c0a1..698b6f636156 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -18,7 +18,6 @@ * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ -#include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d8e18cdd96d3..3f65d0ac9f63 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -843,20 +843,26 @@ early_param("mem", early_mem); static void __init request_standard_resources(const struct machine_desc *mdesc) { - struct memblock_region *region; + phys_addr_t start, end, res_end; struct resource *res; + u64 i; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); - for_each_memblock(memory, region) { - phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + for_each_mem_range(i, &start, &end) { unsigned long boot_alias_start; /* + * In memblock, end points to the first byte after the + * range while in resourses, end points to the last byte in + * the range. + */ + res_end = end - 1; + + /* * Some systems have a special memory alias which is only * used for booting. We need to advertise this region to * kexec-tools so they know where bootable RAM is located. @@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; - res->end = phys_to_idmap(end); + res->end = phys_to_idmap(res_end); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); } @@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) sizeof(*res)); res->name = "System RAM"; res->start = start; - res->end = end; + res->end = res_end; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index c9dc912b83f0..c1892f733f20 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5d9da61eff62..48099c6e1e4a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -26,6 +26,7 @@ #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/bugs.h> @@ -65,18 +66,26 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_* */ - IPI_CPU_BACKTRACE, + IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries. */ + MAX_IPI }; +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; + +static void ipi_setup(int cpu); + static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops __ro_after_init; @@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu) return cpu != 0; } +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} + /* * __cpu_disable runs on the processor to be shutdown. */ @@ -247,6 +267,7 @@ int __cpu_disable(void) * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void) notify_cpu_starting(cpu); + ipi_setup(cpu); + calibrate_delay(); smp_store_cpu_info(cpu); @@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -static void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - if (!__smp_cross_call) - __smp_cross_call = fn; -} - static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_COMPLETION, "completion interrupts"), }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + unsigned int irq; + + if (!ipi_desc[i]) + continue; + + irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } } -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; -} - void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); @@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) handle_IPI(ipinr, regs); } -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { + if ((unsigned)ipinr < NR_IPI) trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } switch (ipinr) { case IPI_WAKEUP: @@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif @@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); ipi_cpu_stop(cpu); - irq_exit(); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif case IPI_COMPLETION: - irq_enter(); ipi_complete(cpu); - irq_exit(); break; case IPI_CPU_BACKTRACE: printk_nmi_enter(); - irq_enter(); - nmi_cpu_backtrace(regs); - irq_exit(); + nmi_cpu_backtrace(get_irq_regs()); printk_nmi_exit(); break; @@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); +} + +/* Legacy version, should go away once all irqchips have been converted */ +void handle_IPI(int ipinr, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + do_handle_IPI(ipinr); + irq_exit(); + set_irq_regs(old_regs); } +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &irq_stat); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); +} + void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); @@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - __smp_cross_call(mask, IPI_CPU_BACKTRACE); + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index b5adaf744630..ef0058de432b 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -178,15 +178,6 @@ static inline void update_cpu_capacity(unsigned int cpuid) {} #endif /* - * The current assumption is that we can power gate each core independently. - * This will be superseded by DT binding once available. - */ -const struct cpumask *cpu_corepower_mask(int cpu) -{ - return &cpu_topology[cpu].thread_sibling; -} - -/* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array @@ -241,20 +232,6 @@ topology_populated: update_siblings_masks(cpuid); } -static inline int cpu_corepower_flags(void) -{ - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; -} - -static struct sched_domain_topology_level arm_topology[] = { -#ifdef CONFIG_SCHED_MC - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array @@ -265,7 +242,4 @@ void __init init_cpu_topology(void) smp_wmb(); parse_dt_topology(); - - /* Set scheduler topology descriptor */ - set_sched_topology(arm_topology); } diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 6d2be994ae58..50136828f5b5 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -9,15 +9,13 @@ #include <linux/sizes.h> -#include <asm-generic/vmlinux.lds.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/mpu.h> #include <asm/page.h> -#include "vmlinux.lds.h" - OUTPUT_ARCH(arm) ENTRY(stext) @@ -152,6 +150,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 7f24bc08403e..5f4922e858d0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -9,15 +9,13 @@ #else #include <linux/pgtable.h> -#include <asm-generic/vmlinux.lds.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/mpu.h> #include <asm/page.h> -#include "vmlinux.lds.h" - OUTPUT_ARCH(arm) ENTRY(stext) @@ -151,6 +149,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } #ifdef CONFIG_STRICT_KERNEL_RWX diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h deleted file mode 100644 index 381a8e105fa5..000000000000 --- a/arch/arm/kernel/vmlinux.lds.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifdef CONFIG_HOTPLUG_CPU -#define ARM_CPU_DISCARD(x) -#define ARM_CPU_KEEP(x) x -#else -#define ARM_CPU_DISCARD(x) x -#define ARM_CPU_KEEP(x) -#endif - -#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ - defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) -#define ARM_EXIT_KEEP(x) x -#define ARM_EXIT_DISCARD(x) -#else -#define ARM_EXIT_KEEP(x) -#define ARM_EXIT_DISCARD(x) x -#endif - -#ifdef CONFIG_MMU -#define ARM_MMU_KEEP(x) x -#define ARM_MMU_DISCARD(x) -#else -#define ARM_MMU_KEEP(x) -#define ARM_MMU_DISCARD(x) x -#endif - -#define PROC_INFO \ - . = ALIGN(4); \ - __proc_info_begin = .; \ - *(.proc.info.init) \ - __proc_info_end = .; - -#define IDMAP_TEXT \ - ALIGN_FUNCTION(); \ - __idmap_text_start = .; \ - *(.idmap.text) \ - __idmap_text_end = .; \ - -#define ARM_DISCARD \ - *(.ARM.exidx.exit.text) \ - *(.ARM.extab.exit.text) \ - *(.ARM.exidx.text.exit) \ - *(.ARM.extab.text.exit) \ - ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ - ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ - ARM_EXIT_DISCARD(EXIT_TEXT) \ - ARM_EXIT_DISCARD(EXIT_DATA) \ - EXIT_CALL \ - ARM_MMU_DISCARD(*(.text.fixup)) \ - ARM_MMU_DISCARD(*(__ex_table)) \ - *(.discard) \ - *(.discard.*) - -#define ARM_TEXT \ - IDMAP_TEXT \ - __entry_text_start = .; \ - *(.entry.text) \ - __entry_text_end = .; \ - IRQENTRY_TEXT \ - SOFTIRQENTRY_TEXT \ - TEXT_TEXT \ - SCHED_TEXT \ - CPUIDLE_TEXT \ - LOCK_TEXT \ - KPROBES_TEXT \ - *(.gnu.warning) \ - *(.glue_7) \ - *(.glue_7t) \ - . = ALIGN(4); \ - *(.got) /* Global offset table */ \ - ARM_CPU_KEEP(PROC_INFO) - -/* Stack unwinding tables */ -#define ARM_UNWIND_SECTIONS \ - . = ALIGN(8); \ - .ARM.unwind_idx : { \ - __start_unwind_idx = .; \ - *(.ARM.exidx*) \ - __stop_unwind_idx = .; \ - } \ - .ARM.unwind_tab : { \ - __start_unwind_tab = .; \ - *(.ARM.extab*) \ - __stop_unwind_tab = .; \ - } - -/* - * The vectors and stubs are relocatable code, and the - * only thing that matters is their relative offsets - */ -#define ARM_VECTORS \ - __vectors_start = .; \ - .vectors 0xffff0000 : AT(__vectors_start) { \ - *(.vectors) \ - } \ - . = __vectors_start + SIZEOF(.vectors); \ - __vectors_end = .; \ - \ - __stubs_start = .; \ - .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ - *(.stubs) \ - } \ - . = __stubs_start + SIZEOF(.stubs); \ - __stubs_end = .; \ - \ - PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); - -#define ARM_TCM \ - __itcm_start = ALIGN(4); \ - .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \ - __sitcm_text = .; \ - *(.tcm.text) \ - *(.tcm.rodata) \ - . = ALIGN(4); \ - __eitcm_text = .; \ - } \ - . = __itcm_start + SIZEOF(.text_itcm); \ - \ - __dtcm_start = .; \ - .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \ - __sdtcm_data = .; \ - *(.tcm.data) \ - . = ALIGN(4); \ - __edtcm_data = .; \ - } \ - . = __dtcm_start + SIZEOF(.data_dtcm); |