Age | Commit message (Collapse) | Author | Files | Lines |
|
There have been some recently reported ORC unwinder warnings like:
WARNING: can't access registers at entry_SYSCALL_64_after_hwframe+0x63/0xcd
WARNING: stack going in the wrong direction? at __sys_setsockopt+0x2c6/0x5b0 net/socket.c:2271
And a KASAN warning:
BUG: KASAN: stack-out-of-bounds in unwind_next_frame (arch/x86/include/asm/ptrace.h:136 arch/x86/kernel/unwind_orc.c:455)
It turns out the 'signal' bit isn't getting propagated from the unwind
hints to the ORC entries, making the unwinder confused at times.
Fixes: ffb1b4a41016 ("x86/unwind/orc: Add 'signal' field to ORC metadata")
Reported-by: kernel test robot <oliver.sang@intel.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/97eef9db60cd86d376a9a40d49d77bb67a8f6526.1676579666.git.jpoimboe@kernel.org
|
|
Replace the instruction::list by allocating instructions in arrays of
256 entries and stringing them together by (amortized) find_insn().
This shrinks instruction by 16 bytes and brings it down to 128.
struct instruction {
- struct list_head list; /* 0 16 */
- struct hlist_node hash; /* 16 16 */
- struct list_head call_node; /* 32 16 */
- struct section * sec; /* 48 8 */
- long unsigned int offset; /* 56 8 */
- /* --- cacheline 1 boundary (64 bytes) --- */
- long unsigned int immediate; /* 64 8 */
- unsigned int len; /* 72 4 */
- u8 type; /* 76 1 */
-
- /* Bitfield combined with previous fields */
+ struct hlist_node hash; /* 0 16 */
+ struct list_head call_node; /* 16 16 */
+ struct section * sec; /* 32 8 */
+ long unsigned int offset; /* 40 8 */
+ long unsigned int immediate; /* 48 8 */
+ u8 len; /* 56 1 */
+ u8 prev_len; /* 57 1 */
+ u8 type; /* 58 1 */
+ s8 instr; /* 59 1 */
+ u32 idx:8; /* 60: 0 4 */
+ u32 dead_end:1; /* 60: 8 4 */
+ u32 ignore:1; /* 60: 9 4 */
+ u32 ignore_alts:1; /* 60:10 4 */
+ u32 hint:1; /* 60:11 4 */
+ u32 save:1; /* 60:12 4 */
+ u32 restore:1; /* 60:13 4 */
+ u32 retpoline_safe:1; /* 60:14 4 */
+ u32 noendbr:1; /* 60:15 4 */
+ u32 entry:1; /* 60:16 4 */
+ u32 visited:4; /* 60:17 4 */
+ u32 no_reloc:1; /* 60:21 4 */
- u16 dead_end:1; /* 76: 8 2 */
- u16 ignore:1; /* 76: 9 2 */
- u16 ignore_alts:1; /* 76:10 2 */
- u16 hint:1; /* 76:11 2 */
- u16 save:1; /* 76:12 2 */
- u16 restore:1; /* 76:13 2 */
- u16 retpoline_safe:1; /* 76:14 2 */
- u16 noendbr:1; /* 76:15 2 */
- u16 entry:1; /* 78: 0 2 */
- u16 visited:4; /* 78: 1 2 */
- u16 no_reloc:1; /* 78: 5 2 */
+ /* XXX 10 bits hole, try to pack */
- /* XXX 2 bits hole, try to pack */
- /* Bitfield combined with next fields */
-
- s8 instr; /* 79 1 */
- struct alt_group * alt_group; /* 80 8 */
- struct instruction * jump_dest; /* 88 8 */
- struct instruction * first_jump_src; /* 96 8 */
+ /* --- cacheline 1 boundary (64 bytes) --- */
+ struct alt_group * alt_group; /* 64 8 */
+ struct instruction * jump_dest; /* 72 8 */
+ struct instruction * first_jump_src; /* 80 8 */
union {
- struct symbol * _call_dest; /* 104 8 */
- struct reloc * _jump_table; /* 104 8 */
- }; /* 104 8 */
- struct alternative * alts; /* 112 8 */
- struct symbol * sym; /* 120 8 */
- /* --- cacheline 2 boundary (128 bytes) --- */
- struct stack_op * stack_ops; /* 128 8 */
- struct cfi_state * cfi; /* 136 8 */
+ struct symbol * _call_dest; /* 88 8 */
+ struct reloc * _jump_table; /* 88 8 */
+ }; /* 88 8 */
+ struct alternative * alts; /* 96 8 */
+ struct symbol * sym; /* 104 8 */
+ struct stack_op * stack_ops; /* 112 8 */
+ struct cfi_state * cfi; /* 120 8 */
- /* size: 144, cachelines: 3, members: 28 */
- /* sum members: 142 */
- /* sum bitfield members: 14 bits, bit holes: 1, sum bit holes: 2 bits */
- /* last cacheline: 16 bytes */
+ /* size: 128, cachelines: 2, members: 29 */
+ /* sum members: 124 */
+ /* sum bitfield members: 22 bits, bit holes: 1, sum bit holes: 10 bits */
};
pre: 5:38.18 real, 213.25 user, 124.90 sys, 23449040 mem
post: 5:03.34 real, 210.75 user, 88.80 sys, 20241232 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.851307606@infradead.org
|
|
The instruction call_dest and jump_table members can never be used at
the same time, their usage depends on type.
struct instruction {
struct list_head list; /* 0 16 */
struct hlist_node hash; /* 16 16 */
struct list_head call_node; /* 32 16 */
struct section * sec; /* 48 8 */
long unsigned int offset; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
long unsigned int immediate; /* 64 8 */
unsigned int len; /* 72 4 */
u8 type; /* 76 1 */
/* Bitfield combined with previous fields */
u16 dead_end:1; /* 76: 8 2 */
u16 ignore:1; /* 76: 9 2 */
u16 ignore_alts:1; /* 76:10 2 */
u16 hint:1; /* 76:11 2 */
u16 save:1; /* 76:12 2 */
u16 restore:1; /* 76:13 2 */
u16 retpoline_safe:1; /* 76:14 2 */
u16 noendbr:1; /* 76:15 2 */
u16 entry:1; /* 78: 0 2 */
u16 visited:4; /* 78: 1 2 */
u16 no_reloc:1; /* 78: 5 2 */
/* XXX 2 bits hole, try to pack */
/* Bitfield combined with next fields */
s8 instr; /* 79 1 */
struct alt_group * alt_group; /* 80 8 */
- struct symbol * call_dest; /* 88 8 */
- struct instruction * jump_dest; /* 96 8 */
- struct instruction * first_jump_src; /* 104 8 */
- struct reloc * jump_table; /* 112 8 */
- struct alternative * alts; /* 120 8 */
+ struct instruction * jump_dest; /* 88 8 */
+ struct instruction * first_jump_src; /* 96 8 */
+ union {
+ struct symbol * _call_dest; /* 104 8 */
+ struct reloc * _jump_table; /* 104 8 */
+ }; /* 104 8 */
+ struct alternative * alts; /* 112 8 */
+ struct symbol * sym; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
- struct symbol * sym; /* 128 8 */
- struct stack_op * stack_ops; /* 136 8 */
- struct cfi_state * cfi; /* 144 8 */
+ struct stack_op * stack_ops; /* 128 8 */
+ struct cfi_state * cfi; /* 136 8 */
- /* size: 152, cachelines: 3, members: 29 */
- /* sum members: 150 */
+ /* size: 144, cachelines: 3, members: 28 */
+ /* sum members: 142 */
/* sum bitfield members: 14 bits, bit holes: 1, sum bit holes: 2 bits */
- /* last cacheline: 24 bytes */
+ /* last cacheline: 16 bytes */
};
pre: 5:39.35 real, 215.58 user, 123.69 sys, 23448736 mem
post: 5:38.18 real, 213.25 user, 124.90 sys, 23449040 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.640914454@infradead.org
|
|
Instead of caching the reloc for each instruction, only keep a
negative cache of not having a reloc (by far the most common case).
struct instruction {
struct list_head list; /* 0 16 */
struct hlist_node hash; /* 16 16 */
struct list_head call_node; /* 32 16 */
struct section * sec; /* 48 8 */
long unsigned int offset; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
long unsigned int immediate; /* 64 8 */
unsigned int len; /* 72 4 */
u8 type; /* 76 1 */
/* Bitfield combined with previous fields */
u16 dead_end:1; /* 76: 8 2 */
u16 ignore:1; /* 76: 9 2 */
u16 ignore_alts:1; /* 76:10 2 */
u16 hint:1; /* 76:11 2 */
u16 save:1; /* 76:12 2 */
u16 restore:1; /* 76:13 2 */
u16 retpoline_safe:1; /* 76:14 2 */
u16 noendbr:1; /* 76:15 2 */
u16 entry:1; /* 78: 0 2 */
u16 visited:4; /* 78: 1 2 */
+ u16 no_reloc:1; /* 78: 5 2 */
- /* XXX 3 bits hole, try to pack */
+ /* XXX 2 bits hole, try to pack */
/* Bitfield combined with next fields */
s8 instr; /* 79 1 */
struct alt_group * alt_group; /* 80 8 */
struct symbol * call_dest; /* 88 8 */
struct instruction * jump_dest; /* 96 8 */
struct instruction * first_jump_src; /* 104 8 */
struct reloc * jump_table; /* 112 8 */
- struct reloc * reloc; /* 120 8 */
+ struct alternative * alts; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
- struct alternative * alts; /* 128 8 */
- struct symbol * sym; /* 136 8 */
- struct stack_op * stack_ops; /* 144 8 */
- struct cfi_state * cfi; /* 152 8 */
+ struct symbol * sym; /* 128 8 */
+ struct stack_op * stack_ops; /* 136 8 */
+ struct cfi_state * cfi; /* 144 8 */
- /* size: 160, cachelines: 3, members: 29 */
- /* sum members: 158 */
- /* sum bitfield members: 13 bits, bit holes: 1, sum bit holes: 3 bits */
- /* last cacheline: 32 bytes */
+ /* size: 152, cachelines: 3, members: 29 */
+ /* sum members: 150 */
+ /* sum bitfield members: 14 bits, bit holes: 1, sum bit holes: 2 bits */
+ /* last cacheline: 24 bytes */
};
pre: 5:48.89 real, 220.96 user, 127.55 sys, 24834672 mem
post: 5:39.35 real, 215.58 user, 123.69 sys, 23448736 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.572145269@infradead.org
|
|
Since we don't have that many types in enum insn_type, force it into a
u8 and re-arrange member to get rid of the holes, saves another 8
bytes.
struct instruction {
struct list_head list; /* 0 16 */
struct hlist_node hash; /* 16 16 */
struct list_head call_node; /* 32 16 */
struct section * sec; /* 48 8 */
long unsigned int offset; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
- unsigned int len; /* 64 4 */
- enum insn_type type; /* 68 4 */
- long unsigned int immediate; /* 72 8 */
- u16 dead_end:1; /* 80: 0 2 */
- u16 ignore:1; /* 80: 1 2 */
- u16 ignore_alts:1; /* 80: 2 2 */
- u16 hint:1; /* 80: 3 2 */
- u16 save:1; /* 80: 4 2 */
- u16 restore:1; /* 80: 5 2 */
- u16 retpoline_safe:1; /* 80: 6 2 */
- u16 noendbr:1; /* 80: 7 2 */
- u16 entry:1; /* 80: 8 2 */
+ long unsigned int immediate; /* 64 8 */
+ unsigned int len; /* 72 4 */
+ u8 type; /* 76 1 */
- /* XXX 7 bits hole, try to pack */
+ /* Bitfield combined with previous fields */
- s8 instr; /* 82 1 */
- u8 visited; /* 83 1 */
+ u16 dead_end:1; /* 76: 8 2 */
+ u16 ignore:1; /* 76: 9 2 */
+ u16 ignore_alts:1; /* 76:10 2 */
+ u16 hint:1; /* 76:11 2 */
+ u16 save:1; /* 76:12 2 */
+ u16 restore:1; /* 76:13 2 */
+ u16 retpoline_safe:1; /* 76:14 2 */
+ u16 noendbr:1; /* 76:15 2 */
+ u16 entry:1; /* 78: 0 2 */
+ u16 visited:4; /* 78: 1 2 */
- /* XXX 4 bytes hole, try to pack */
+ /* XXX 3 bits hole, try to pack */
+ /* Bitfield combined with next fields */
- struct alt_group * alt_group; /* 88 8 */
- struct symbol * call_dest; /* 96 8 */
- struct instruction * jump_dest; /* 104 8 */
- struct instruction * first_jump_src; /* 112 8 */
- struct reloc * jump_table; /* 120 8 */
+ s8 instr; /* 79 1 */
+ struct alt_group * alt_group; /* 80 8 */
+ struct symbol * call_dest; /* 88 8 */
+ struct instruction * jump_dest; /* 96 8 */
+ struct instruction * first_jump_src; /* 104 8 */
+ struct reloc * jump_table; /* 112 8 */
+ struct reloc * reloc; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
- struct reloc * reloc; /* 128 8 */
- struct alternative * alts; /* 136 8 */
- struct symbol * sym; /* 144 8 */
- struct stack_op * stack_ops; /* 152 8 */
- struct cfi_state * cfi; /* 160 8 */
+ struct alternative * alts; /* 128 8 */
+ struct symbol * sym; /* 136 8 */
+ struct stack_op * stack_ops; /* 144 8 */
+ struct cfi_state * cfi; /* 152 8 */
- /* size: 168, cachelines: 3, members: 29 */
- /* sum members: 162, holes: 1, sum holes: 4 */
- /* sum bitfield members: 9 bits, bit holes: 1, sum bit holes: 7 bits */
- /* last cacheline: 40 bytes */
+ /* size: 160, cachelines: 3, members: 29 */
+ /* sum members: 158 */
+ /* sum bitfield members: 13 bits, bit holes: 1, sum bit holes: 3 bits */
+ /* last cacheline: 32 bytes */
};
pre: 5:48.86 real, 220.30 user, 128.34 sys, 24834672 mem
post: 5:48.89 real, 220.96 user, 127.55 sys, 24834672 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.501847188@infradead.org
|
|
struct instruction {
struct list_head list; /* 0 16 */
struct hlist_node hash; /* 16 16 */
struct list_head call_node; /* 32 16 */
struct section * sec; /* 48 8 */
long unsigned int offset; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
unsigned int len; /* 64 4 */
enum insn_type type; /* 68 4 */
long unsigned int immediate; /* 72 8 */
u16 dead_end:1; /* 80: 0 2 */
u16 ignore:1; /* 80: 1 2 */
u16 ignore_alts:1; /* 80: 2 2 */
u16 hint:1; /* 80: 3 2 */
u16 save:1; /* 80: 4 2 */
u16 restore:1; /* 80: 5 2 */
u16 retpoline_safe:1; /* 80: 6 2 */
u16 noendbr:1; /* 80: 7 2 */
u16 entry:1; /* 80: 8 2 */
/* XXX 7 bits hole, try to pack */
s8 instr; /* 82 1 */
u8 visited; /* 83 1 */
/* XXX 4 bytes hole, try to pack */
struct alt_group * alt_group; /* 88 8 */
struct symbol * call_dest; /* 96 8 */
struct instruction * jump_dest; /* 104 8 */
struct instruction * first_jump_src; /* 112 8 */
struct reloc * jump_table; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
struct reloc * reloc; /* 128 8 */
- struct list_head alts; /* 136 16 */
- struct symbol * sym; /* 152 8 */
- struct stack_op * stack_ops; /* 160 8 */
- struct cfi_state * cfi; /* 168 8 */
+ struct alternative * alts; /* 136 8 */
+ struct symbol * sym; /* 144 8 */
+ struct stack_op * stack_ops; /* 152 8 */
+ struct cfi_state * cfi; /* 160 8 */
- /* size: 176, cachelines: 3, members: 29 */
- /* sum members: 170, holes: 1, sum holes: 4 */
+ /* size: 168, cachelines: 3, members: 29 */
+ /* sum members: 162, holes: 1, sum holes: 4 */
/* sum bitfield members: 9 bits, bit holes: 1, sum bit holes: 7 bits */
- /* last cacheline: 48 bytes */
+ /* last cacheline: 40 bytes */
};
pre: 5:58.50 real, 229.64 user, 128.65 sys, 26221520 mem
post: 5:48.86 real, 220.30 user, 128.34 sys, 24834672 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.430556498@infradead.org
|
|
struct instruction {
struct list_head list; /* 0 16 */
struct hlist_node hash; /* 16 16 */
struct list_head call_node; /* 32 16 */
struct section * sec; /* 48 8 */
long unsigned int offset; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
unsigned int len; /* 64 4 */
enum insn_type type; /* 68 4 */
long unsigned int immediate; /* 72 8 */
u16 dead_end:1; /* 80: 0 2 */
u16 ignore:1; /* 80: 1 2 */
u16 ignore_alts:1; /* 80: 2 2 */
u16 hint:1; /* 80: 3 2 */
u16 save:1; /* 80: 4 2 */
u16 restore:1; /* 80: 5 2 */
u16 retpoline_safe:1; /* 80: 6 2 */
u16 noendbr:1; /* 80: 7 2 */
u16 entry:1; /* 80: 8 2 */
/* XXX 7 bits hole, try to pack */
s8 instr; /* 82 1 */
u8 visited; /* 83 1 */
/* XXX 4 bytes hole, try to pack */
struct alt_group * alt_group; /* 88 8 */
struct symbol * call_dest; /* 96 8 */
struct instruction * jump_dest; /* 104 8 */
struct instruction * first_jump_src; /* 112 8 */
struct reloc * jump_table; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
struct reloc * reloc; /* 128 8 */
struct list_head alts; /* 136 16 */
struct symbol * sym; /* 152 8 */
- struct list_head stack_ops; /* 160 16 */
- struct cfi_state * cfi; /* 176 8 */
+ struct stack_op * stack_ops; /* 160 8 */
+ struct cfi_state * cfi; /* 168 8 */
- /* size: 184, cachelines: 3, members: 29 */
- /* sum members: 178, holes: 1, sum holes: 4 */
+ /* size: 176, cachelines: 3, members: 29 */
+ /* sum members: 170, holes: 1, sum holes: 4 */
/* sum bitfield members: 9 bits, bit holes: 1, sum bit holes: 7 bits */
- /* last cacheline: 56 bytes */
+ /* last cacheline: 48 bytes */
};
pre: 5:58.22 real, 226.69 user, 131.22 sys, 26221520 mem
post: 5:58.50 real, 229.64 user, 128.65 sys, 26221520 mem
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.362196959@infradead.org
|
|
In preparation to changing struct instruction around a bit, avoid
passing it's members by pointer and instead pass the whole thing.
A cleanup in it's own right too.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
Tested-by: Thomas Weißschuh <linux@weissschuh.net> # compile and run
Link: https://lore.kernel.org/r/20230208172245.291087549@infradead.org
|
|
Reduce the size of struct special_alt from 72 to 64 bytes.
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Link: https://lore.kernel.org/r/20221216-objtool-memory-v2-7-17968f85a464@weissschuh.net
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
|
|
Reduce the size of struct symbol on x86_64 from 208 to 200 bytes.
This structure is allocated a lot and never freed.
This reduces maximum memory usage while processing vmlinux.o from
2919716 KB to 2917988 KB (-0.5%) on my notebooks "localmodconfig".
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Link: https://lore.kernel.org/r/20221216-objtool-memory-v2-6-17968f85a464@weissschuh.net
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
|
|
By using calloc() instead of malloc() in a loop, libc does not have to
keep around bookkeeping information for each single structure.
This reduces maximum memory usage while processing vmlinux.o from
3153325 KB to 3035668 KB (-3.7%) on my notebooks "localmodconfig".
Note this introduces memory leaks, because some additional structs get
added to the lists later after reading the symbols and sections from the
original object. Luckily we don't really care about memory leaks in
objtool.
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Link: https://lore.kernel.org/r/20221216-objtool-memory-v2-3-17968f85a464@weissschuh.net
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
|
|
It is not used outside of builtin-check.c.
Also remove the unused declaration from builtin.h .
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Link: https://lore.kernel.org/r/20221216-objtool-memory-v2-2-17968f85a464@weissschuh.net
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
|
|
git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
- Add powerpc qspinlock implementation optimised for large system
scalability and paravirt. See the merge message for more details
- Enable objtool to be built on powerpc to generate mcount locations
- Use a temporary mm for code patching with the Radix MMU, so the
writable mapping is restricted to the patching CPU
- Add an option to build the 64-bit big-endian kernel with the ELFv2
ABI
- Sanitise user registers on interrupt entry on 64-bit Book3S
- Many other small features and fixes
Thanks to Aboorva Devarajan, Angel Iglesias, Benjamin Gray, Bjorn
Helgaas, Bo Liu, Chen Lifu, Christoph Hellwig, Christophe JAILLET,
Christophe Leroy, Christopher M. Riedl, Colin Ian King, Deming Wang,
Disha Goel, Dmitry Torokhov, Finn Thain, Geert Uytterhoeven, Gustavo A.
R. Silva, Haowen Bai, Joel Stanley, Jordan Niethe, Julia Lawall, Kajol
Jain, Laurent Dufour, Li zeming, Miaoqian Lin, Michael Jeanson, Nathan
Lynch, Naveen N. Rao, Nayna Jain, Nicholas Miehlbradt, Nicholas Piggin,
Pali Rohár, Randy Dunlap, Rohan McLure, Russell Currey, Sathvika
Vasireddy, Shaomin Deng, Stephen Kitt, Stephen Rothwell, Thomas
Weißschuh, Tiezhu Yang, Uwe Kleine-König, Xie Shaowen, Xiu Jianfeng,
XueBing Chen, Yang Yingliang, Zhang Jiaming, ruanjinjie, Jessica Yu,
and Wolfram Sang.
* tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (181 commits)
powerpc/code-patching: Fix oops with DEBUG_VM enabled
powerpc/qspinlock: Fix 32-bit build
powerpc/prom: Fix 32-bit build
powerpc/rtas: mandate RTAS syscall filtering
powerpc/rtas: define pr_fmt and convert printk call sites
powerpc/rtas: clean up includes
powerpc/rtas: clean up rtas_error_log_max initialization
powerpc/pseries/eeh: use correct API for error log size
powerpc/rtas: avoid scheduling in rtas_os_term()
powerpc/rtas: avoid device tree lookups in rtas_os_term()
powerpc/rtasd: use correct OF API for event scan rate
powerpc/rtas: document rtas_call()
powerpc/pseries: unregister VPA when hot unplugging a CPU
powerpc/pseries: reset the RCU watchdogs after a LPM
powerpc: Take in account addition CPU node when building kexec FDT
powerpc: export the CPU node count
powerpc/cpuidle: Set CPUIDLE_FLAG_POLLING for snooze state
powerpc/dts/fsl: Fix pca954x i2c-mux node names
cxl: Remove unnecessary cxl_pci_window_alignment()
selftests/powerpc: Fix resource leaks
...
|
|
Provide an implementation for arch_pc_relative_reloc(). It is needed to
pass the build once 61c6065ef7ec ("objtool: Allow !PC relative
relocations") is merged.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
|
|
Add architecture specific function to look for relocation records
pointing to architecture specific symbols.
Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Sathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-15-sv@linux.ibm.com
|
|
Some architectures (powerpc) may not support ftrace locations being nop'ed
out at build time. Introduce CONFIG_HAVE_OBJTOOL_NOP_MCOUNT for objtool, as
a means for architectures to enable nop'ing of ftrace locations. Add --mnop
as an option to objtool --mcount, to indicate support for the same.
Also, make sure that --mnop can be passed as an option to objtool only when
--mcount is passed.
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Sathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-12-sv@linux.ibm.com
|
|
In order to allow using objtool on cross-built kernels,
determine size of long from elf data instead of using
sizeof(long) at build time.
For the time being this covers only mcount.
[Sathvika Vasireddy: Rename variable "size" to "addrsize" and function
"elf_class_size()" to "elf_class_addrsize()", and modify
create_mcount_loc_sections() function to follow reverse christmas tree
format to order local variable declarations.]
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Sathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-11-sv@linux.ibm.com
|
|
Some architectures like powerpc support both endianness, it's
therefore not possible to fix the endianness via arch/endianness.h
because there is no easy way to get the target endianness at
build time.
Use the endianness recorded in the file objtool is working on.
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-10-sv@linux.ibm.com
|
|
When moving a symbol in the symtab its index changes and any reloc
referring that symtol-table-index will need to be rewritten too.
In order to facilitate this, objtool simply marks the whole reloc
section 'changed' which will cause the whole section to be
re-generated.
However, finding the relocs that use any given symbol is implemented
rather crudely -- a fully iteration of all sections and their relocs.
Given that some builds have over 20k sections (kallsyms etc..)
iterating all that for *each* symbol moved takes a bit of time.
Instead have each symbol keep a list of relocs that reference it.
This *vastly* improves build times for certain configs.
Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/Y2LlRA7x+8UsE1xf@hirez.programming.kicks-ass.net
|
|
Add the location of all __cfi_##name symbols (as generated by kCFI) to
a section such that we might re-write things at kernel boot.
Notably; boot time re-hashing and FineIBT are the intended use of
this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221027092842.568039454@infradead.org
|
|
When code is compiled with:
-fpatchable-function-entry=${PADDING_BYTES},${PADDING_BYTES}
functions will have PADDING_BYTES of NOP in front of them. Unwinders
and other things that symbolize code locations will typically
attribute these bytes to the preceding function.
Given that these bytes nominally belong to the following symbol this
mis-attribution is confusing.
Inspired by the fact that CFI_CLANG emits __cfi_##name symbols to
claim these bytes, allow objtool to emit __pfx_##name symbols to do
the same.
Therefore add the objtool --prefix=N argument, to conditionally place
a __pfx_##name symbol at N bytes ahead of symbol 'name' when: all
these preceding bytes are NOP and name-N is an instruction boundary.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Yujie Liu <yujie.liu@intel.com>
Link: https://lkml.kernel.org/r/20221028194453.526899822@infradead.org
|
|
Due to how gelf_update_sym*() requires an Elf_Data pointer, and how
libelf keeps Elf_Data in a linked list per section,
elf_update_symbol() ends up having to iterate this list on each
update to find the correct Elf_Data for the index'ed symbol.
By allocating one Elf_Data per new symbol, the list grows per new
symbol, giving an effective O(n^2) insertion time. This is obviously
bloody terrible.
Therefore over-allocate the Elf_Data when an extention is needed.
Except it turns out libelf disregards Elf_Scn::sh_size in favour of
the sum of Elf_Data::d_size. IOW it will happily write out all the
unused space and fill it with:
0000000000000000 0 NOTYPE LOCAL DEFAULT UND
entries (aka zeros). Which obviously violates the STB_LOCAL placement
rule, and is a general pain in the backside for not being the desired
behaviour.
Manually fix-up the Elf_Data size to avoid this problem before calling
elf_update().
This significantly improves performance when adding a significant
number of symbols.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Yujie Liu <yujie.liu@intel.com>
Link: https://lkml.kernel.org/r/20221028194453.461658986@infradead.org
|
|
Currently insn->func contains a instruction -> symbol link for
STT_FUNC symbols. A NULL value is assumed to mean STT_NOTYPE.
However, there are also instructions not covered by any symbol at all.
This can happen due to __weak symbols for example.
Since the current scheme cannot differentiate between no symbol and
STT_NOTYPE symbol, change things around. Make insn->sym point to any
symbol type such that !insn->sym means no symbol and add a helper
insn_func() that check the sym->type to retain the old functionality.
This then prepares the way to add code that depends on the distinction
between STT_NOTYPE and no symbol at all.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
The current find_{symbol,func}_containing() functions are broken in
the face of overlapping symbols, exactly the case that is needed for a
new ibt/endbr supression.
Import interval_tree_generic.h into the tools tree and convert the
symbol tree to an interval tree to support proper range stabs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111146.330203761@infradead.org
|
|
Make the call/func sections selectable via the --hacks option.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111146.120821440@infradead.org
|
|
In preparation for call depth tracking provide a section which collects all
direct calls.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111146.016511961@infradead.org
|
|
For future usage of .init.text exclusion track the init section in the
instruction decoder and use the result in retpoline validation.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111145.910334431@infradead.org
|
|
Objtool doesn't currently much like per-cpu usage in alternatives:
arch/x86/entry/entry_64.o: warning: objtool: .altinstr_replacement+0xf: unsupported relocation in alternatives section
f: 65 c7 04 25 00 00 00 00 00 00 00 80 movl $0x80000000,%gs:0x0 13: R_X86_64_32S __x86_call_depth
Since the R_X86_64_32S relocation is location invariant (it's
computation doesn't include P - the address of the location itself),
it can be trivially allowed.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111145.806607235@infradead.org
|
|
Do fine-grained Kconfig for all the various retbleed parts.
NOTE: if your compiler doesn't support return thunks this will
silently 'upgrade' your mitigation to IBPB, you might not like this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Commit
c536ed2fffd5 ("objtool: Remove SAVE/RESTORE hints")
removed the save/restore unwind hints because they were no longer
needed. Now they're going to be needed again so re-add them.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Since entry asm is tricky, add a validation pass that ensures the
retbleed mitigation has been done before the first actual RET
instruction.
Entry points are those that either have UNWIND_HINT_ENTRY, which acts
as UNWIND_HINT_EMPTY but marks the instruction as an entry point, or
those that have UWIND_HINT_IRET_REGS at +0.
This is basically a variant of validate_branch() that is
intra-function and it will simply follow all branches from marked
entry points and ensures that all paths lead to ANNOTATE_UNRET_END.
If a path hits RET or an indirection the path is a fail and will be
reported.
There are 3 ANNOTATE_UNRET_END instances:
- UNTRAIN_RET itself
- exception from-kernel; this path doesn't need UNTRAIN_RET
- all early exceptions; these also don't need UNTRAIN_RET
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Find all the return-thunk sites and record them in a .return_sites
section such that the kernel can undo this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Commit c087c6e7b551 ("objtool: Fix type of reloc::addend") failed to
appreciate cross building from ILP32 hosts, where 'int' == 'long' and
the issue persists.
As such, use s64/int64_t/Elf64_Sxword for this field and suffer the
pain that is ISO C99 printf formats for it.
Fixes: c087c6e7b551 ("objtool: Fix type of reloc::addend")
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
[peterz: reword changelog, s/long long/s64/]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/alpine.LRH.2.02.2205161041260.11556@file01.intranet.prod.int.rdu2.redhat.com
|
|
The '--lto' option is a confusing way of telling objtool to do stack
validation despite it being a linked object. It's no longer needed now
that an explicit '--stackval' option exists. The '--vmlinux' option is
also redundant.
Remove both options in favor of a straightforward '--link' option which
identifies a linked object.
Also, implicitly set '--link' with a warning if the user forgets to do
so and we can tell that it's a linked object. This makes it easier for
manual vmlinux runs.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/dcd3ceffd15a54822c6183e5766d21ad06082b45.1650300597.git.jpoimboe@redhat.com
|
|
Objtool has some hacks in place to workaround toolchain limitations
which otherwise would break no-instrumentation rules. Make the hacks
explicit (and optional for other arches) by turning it into a cmdline
option and kernel config option.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/b326eeb9c33231b9dfbb925f194ed7ee40edcd7c.1650300597.git.jpoimboe@redhat.com
|
|
Objtool secretly does a jump label hack to overcome the limitations of
the toolchain. Make the hack explicit (and optional for other arches)
by turning it into a cmdline option and kernel config option.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/3bdcbfdd27ecb01ddec13c04bdf756a583b13d24.1650300597.git.jpoimboe@redhat.com
|
|
As part of making objtool more modular, put the existing static call
code behind a new '--static-call' option.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/d59ac57ef3d6d8380cdce20322314c9e2e556750.1650300597.git.jpoimboe@redhat.com
|
|
Now that CONFIG_STACK_VALIDATION is frame-pointer specific, do the same
for the '--stackval' option. Now the '--no-fp' option is redundant and
can be removed.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/f563fa064b3b63d528de250c72012d49e14742a3.1650300597.git.jpoimboe@redhat.com
|
|
Make stack validation an explicit cmdline option so that individual
objtool features can be enabled individually by other arches.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/52da143699574d756e65ca4c9d4acaffe9b0fe5f.1650300597.git.jpoimboe@redhat.com
|
|
To help prevent objtool users from having to do math to convert function
addresses to section addresses, and to help out with finding data
addresses reported by IBT validation, add an option to print the section
address in addition to the function address.
Normal:
vmlinux.o: warning: objtool: fixup_exception()+0x2d1: unreachable instruction
With '--sec-address':
vmlinux.o: warning: objtool: fixup_exception()+0x2d1 (.text+0x76c51): unreachable instruction
Suggested-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/2cea4d5299d53d1a4c09212a6ad7820aa46fda7a.1650300597.git.jpoimboe@redhat.com
|
|
The parentheses in the "func()+off" address output are inconsistent with
how the kernel prints function addresses, breaking Peter's scripts.
Remove them.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/f2bec70312f62ef4f1ea21c134d9def627182ad3.1650300597.git.jpoimboe@redhat.com
|
|
Objtool has a fairly singular focus. It runs on object files and does
validations and transformations which can be combined in various ways.
The subcommand model has never been a good fit, making it awkward to
combine and remove options.
Remove the "check" and "orc" subcommands in favor of a more traditional
cmdline option model. This makes it much more flexible to use, and
easier to port individual features to other arches.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/5c61ebf805e90aefc5fa62bc63468ffae53b9df6.1650300597.git.jpoimboe@redhat.com
|
|
Split the existing options into two groups: actions, which actually do
something; and options, which modify the actions in some way.
Also there's no need to have short flags for all the non-action options.
Reserve short flags for the more important actions.
While at it:
- change a few of the short flags to be more intuitive
- make option descriptions more consistently descriptive
- sort options in the source like they are when printed
- move options to a global struct
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lkml.kernel.org/r/9dcaa752f83aca24b1b21f0b0eeb28a0c181c0b0.1650300597.git.jpoimboe@redhat.com
|
|
Elf{32,64}_Rela::r_addend is of type: Elf{32,64}_Sword, that means
that our reloc::addend needs to be long or face tuncation issues when
we do elf_rebuild_reloc_section():
- 107: 48 b8 00 00 00 00 00 00 00 00 movabs $0x0,%rax 109: R_X86_64_64 level4_kernel_pgt+0x80000067
+ 107: 48 b8 00 00 00 00 00 00 00 00 movabs $0x0,%rax 109: R_X86_64_64 level4_kernel_pgt-0x7fffff99
Fixes: 627fce14809b ("objtool: Add ORC unwind table generation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20220419203807.596871927@infradead.org
|
|
Objtool's function fallthrough detection only works on C objects.
The distinction between C and assembly objects no longer makes sense
with objtool running on vmlinux.o.
Now that copy_user_64.S has been fixed up, and an objtool sibling call
detection bug has been fixed, the asm code is in "compliance" and this
hack is no longer needed. Remove it.
Fixes: ed53a0d97192 ("x86/alternative: Use .ibt_endbr_seal to seal indirect calls")
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/b434cff98eca3a60dcc64c620d7d5d405a0f441c.1649718562.git.jpoimboe@redhat.com
|
|
Find all ENDBR instructions which are never referenced and stick them
in a section such that the kernel can poison them, sealing the
functions from ever being an indirect call target.
This removes about 1-in-4 ENDBR instructions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154319.763643193@infradead.org
|
|
Intel IBT requires that every indirect JMP/CALL targets an ENDBR
instructions, failing this #CP happens and we die. Similarly, all
exception entries should be ENDBR.
Find all code relocations and ensure they're either an ENDBR
instruction or ANNOTATE_NOENDBR. For the exceptions look for
UNWIND_HINT_IRET_REGS at sym+0 not being ENDBR.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154319.705110141@infradead.org
|
|
Intel IBT requires the target of any indirect CALL or JMP instruction
to be the ENDBR instruction; optionally it allows those two
instructions to have a NOTRACK prefix in order to avoid this
requirement.
The kernel will not enable the use of NOTRACK, as such any occurence
of it in compiler generated code should be flagged.
Teach objtool to Decode ENDBR instructions and WARN about NOTRACK
prefixes.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154319.645963517@infradead.org
|
|
Read the new NOENDBR annotation. While there, attempt to not bloat
struct instruction.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154319.586815435@infradead.org
|
|
There's a fun implementation detail on linking STB_WEAK symbols. When
the linker combines two translation units, where one contains a weak
function and the other an override for it. It simply strips the
STB_WEAK symbol from the symbol table, but doesn't actually remove the
code.
The result is that when objtool is ran in a whole-archive kind of way,
it will encounter *heaps* of unused (and unreferenced) code. All
rudiments of weak functions.
Additionally, when a weak implementation is split into a .cold
subfunction that .cold symbol is left in place, even though completely
unused.
Teach objtool to ignore such rudiments by searching for symbol holes;
that is, code ranges that fall outside the given symbol bounds.
Specifically, ignore a sequence of unreachable instruction iff they
occupy a single hole, additionally ignore any .cold subfunctions
referenced.
Both ld.bfd and ld.lld behave like this. LTO builds otoh can (and do)
properly DCE weak functions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154319.232019347@infradead.org
|