aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf
AgeCommit message (Collapse)AuthorFilesLines
2021-12-16bpf: Right align verifier states in verifier logs.Christy Lee1-77/+92
Make the verifier logs more readable, print the verifier states on the corresponding instruction line. If the previous line was not a bpf instruction, then print the verifier states on its own line. Before: Validating test_pkt_access_subprog3() func#3... 86: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R10=fp0 ; int test_pkt_access_subprog3(int val, struct __sk_buff *skb) 86: (bf) r6 = r2 87: R2=ctx(id=0,off=0,imm=0) R6_w=ctx(id=0,off=0,imm=0) 87: (bc) w7 = w1 88: R1=invP(id=0) R7_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) ; return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); 88: (bf) r1 = r6 89: R1_w=ctx(id=0,off=0,imm=0) R6_w=ctx(id=0,off=0,imm=0) 89: (85) call pc+9 Func#4 is global and valid. Skipping. 90: R0_w=invP(id=0) 90: (bc) w8 = w0 91: R0_w=invP(id=0) R8_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) ; return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); 91: (b7) r1 = 123 92: R1_w=invP123 92: (85) call pc+65 Func#5 is global and valid. Skipping. 93: R0=invP(id=0) After: 86: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R10=fp0 ; int test_pkt_access_subprog3(int val, struct __sk_buff *skb) 86: (bf) r6 = r2 ; R2=ctx(id=0,off=0,imm=0) R6_w=ctx(id=0,off=0,imm=0) 87: (bc) w7 = w1 ; R1=invP(id=0) R7_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) ; return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); 88: (bf) r1 = r6 ; R1_w=ctx(id=0,off=0,imm=0) R6_w=ctx(id=0,off=0,imm=0) 89: (85) call pc+9 Func#4 is global and valid. Skipping. 90: R0_w=invP(id=0) 90: (bc) w8 = w0 ; R0_w=invP(id=0) R8_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) ; return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); 91: (b7) r1 = 123 ; R1_w=invP123 92: (85) call pc+65 Func#5 is global and valid. Skipping. 93: R0=invP(id=0) Signed-off-by: Christy Lee <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
2021-12-16bpf: Only print scratched registers and stack slots to verifier logs.Christy Lee1-15/+15
When printing verifier state for any log level, print full verifier state only on function calls or on errors. Otherwise, only print the registers and stack slots that were accessed. Log size differences: verif_scale_loop6 before: 234566564 verif_scale_loop6 after: 72143943 69% size reduction kfree_skb before: 166406 kfree_skb after: 55386 69% size reduction Before: 156: (61) r0 = *(u32 *)(r1 +0) 157: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=ctx(id=0,off=0,imm=0) R2_w=invP0 R10=fp0 fp-8_w=00000000 fp-16_w=00\ 000000 fp-24_w=00000000 fp-32_w=00000000 fp-40_w=00000000 fp-48_w=00000000 fp-56_w=00000000 fp-64_w=00000000 fp-72_w=00000000 fp-80_w=00000\ 000 fp-88_w=00000000 fp-96_w=00000000 fp-104_w=00000000 fp-112_w=00000000 fp-120_w=00000000 fp-128_w=00000000 fp-136_w=00000000 fp-144_w=00\ 000000 fp-152_w=00000000 fp-160_w=00000000 fp-168_w=00000000 fp-176_w=00000000 fp-184_w=00000000 fp-192_w=00000000 fp-200_w=00000000 fp-208\ _w=00000000 fp-216_w=00000000 fp-224_w=00000000 fp-232_w=00000000 fp-240_w=00000000 fp-248_w=00000000 fp-256_w=00000000 fp-264_w=00000000 f\ p-272_w=00000000 fp-280_w=00000000 fp-288_w=00000000 fp-296_w=00000000 fp-304_w=00000000 fp-312_w=00000000 fp-320_w=00000000 fp-328_w=00000\ 000 fp-336_w=00000000 fp-344_w=00000000 fp-352_w=00000000 fp-360_w=00000000 fp-368_w=00000000 fp-376_w=00000000 fp-384_w=00000000 fp-392_w=\ 00000000 fp-400_w=00000000 fp-408_w=00000000 fp-416_w=00000000 fp-424_w=00000000 fp-432_w=00000000 fp-440_w=00000000 fp-448_w=00000000 ; return skb->len; 157: (95) exit Func#4 is safe for any args that match its prototype Validating get_constant() func#5... 158: R1=invP(id=0) R10=fp0 ; int get_constant(long val) 158: (bf) r0 = r1 159: R0_w=invP(id=1) R1=invP(id=1) R10=fp0 ; return val - 122; 159: (04) w0 += -122 160: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=invP(id=1) R10=fp0 ; return val - 122; 160: (95) exit Func#5 is safe for any args that match its prototype Validating get_skb_ifindex() func#6... 161: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0 ; int get_skb_ifindex(int val, struct __sk_buff *skb, int var) 161: (bc) w0 = w3 162: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0 After: 156: (61) r0 = *(u32 *)(r1 +0) 157: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1=ctx(id=0,off=0,imm=0) ; return skb->len; 157: (95) exit Func#4 is safe for any args that match its prototype Validating get_constant() func#5... 158: R1=invP(id=0) R10=fp0 ; int get_constant(long val) 158: (bf) r0 = r1 159: R0_w=invP(id=1) R1=invP(id=1) ; return val - 122; 159: (04) w0 += -122 160: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) ; return val - 122; 160: (95) exit Func#5 is safe for any args that match its prototype Validating get_skb_ifindex() func#6... 161: R1=invP(id=0) R2=ctx(id=0,off=0,imm=0) R3=invP(id=0) R10=fp0 ; int get_skb_ifindex(int val, struct __sk_buff *skb, int var) 161: (bc) w0 = w3 162: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R3=invP(id=0) Signed-off-by: Christy Lee <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-16Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/netJakub Kicinski9-3/+353
No conflicts. Signed-off-by: Jakub Kicinski <[email protected]>
2021-12-16bpf, selftests: Fix racing issue in btf_skc_cls_ingress testMartin KaFai Lau1-2/+14
The libbpf CI reported occasional failure in btf_skc_cls_ingress: test_syncookie:FAIL:Unexpected syncookie states gen_cookie:80326634 recv_cookie:0 bpf prog error at line 97 "error at line 97" means the bpf prog cannot find the listening socket when the final ack is received. It then skipped processing the syncookie in the final ack which then led to "recv_cookie:0". The problem is the userspace program did not do accept() and went ahead to close(listen_fd) before the kernel (and the bpf prog) had a chance to process the final ack. The fix is to add accept() call so that the userspace will wait for the kernel to finish processing the final ack first before close()-ing everything. Fixes: 9a856cae2217 ("bpf: selftest: Add test_btf_skc_cls_ingress") Reported-by: Andrii Nakryiko <[email protected]> Signed-off-by: Martin KaFai Lau <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-16selftest/bpf: Add a test that reads various addresses.Alexei Starovoitov2-0/+32
Add a function to bpf_testmod that returns invalid kernel and user addresses. Then attach an fexit program to that function that tries to read memory through these addresses. This logic checks that bpf_probe_read_kernel and BPF_PROBE_MEM logic is sane. Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
2021-12-16selftests/bpf: Enable cross-building with clangJean-Philippe Brucker1-4/+4
Cross building using clang requires passing the "-target" flag rather than using the CROSS_COMPILE prefix. Makefile.include transforms CROSS_COMPILE into CLANG_CROSS_FLAGS. Clear CROSS_COMPILE for bpftool and the host libbpf, and use the clang flags for urandom_read and bench. Signed-off-by: Jean-Philippe Brucker <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Quentin Monnet <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-16bpf, selftests: Add test case trying to taint map value pointerDaniel Borkmann1-0/+23
Add a test case which tries to taint map value pointer arithmetic into a unknown scalar with subsequent export through the map. Before fix: # ./test_verifier 1186 #1186/u map access: trying to leak tained dst reg FAIL Unexpected success to load! verification time 24 usec stack depth 8 processed 15 insns (limit 1000000) max_states_per_insn 0 total_states 1 peak_states 1 mark_read 1 #1186/p map access: trying to leak tained dst reg FAIL Unexpected success to load! verification time 8 usec stack depth 8 processed 15 insns (limit 1000000) max_states_per_insn 0 total_states 1 peak_states 1 mark_read 1 Summary: 0 PASSED, 0 SKIPPED, 2 FAILED After fix: # ./test_verifier 1186 #1186/u map access: trying to leak tained dst reg OK #1186/p map access: trying to leak tained dst reg OK Summary: 2 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Daniel Borkmann <[email protected]> Reviewed-by: John Fastabend <[email protected]> Acked-by: Alexei Starovoitov <[email protected]>
2021-12-14bpf, selftests: Update test case for atomic cmpxchg on r0 with pointerDaniel Borkmann1-2/+65
Fix up unprivileged test case results for 'Dest pointer in r0' verifier tests given they now need to reject R0 containing a pointer value, and add a couple of new related ones with 32bit cmpxchg as well. root@foo:~/bpf/tools/testing/selftests/bpf# ./test_verifier #0/u invalid and of negative number OK #0/p invalid and of negative number OK [...] #1268/p XDP pkt read, pkt_meta' <= pkt_data, bad access 1 OK #1269/p XDP pkt read, pkt_meta' <= pkt_data, bad access 2 OK #1270/p XDP pkt read, pkt_data <= pkt_meta', good access OK #1271/p XDP pkt read, pkt_data <= pkt_meta', bad access 1 OK #1272/p XDP pkt read, pkt_data <= pkt_meta', bad access 2 OK Summary: 1900 PASSED, 0 SKIPPED, 0 FAILED Acked-by: Brendan Jackman <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
2021-12-14bpf, selftests: Add test case for atomic fetch on spilled pointerDaniel Borkmann2-0/+117
Test whether unprivileged would be able to leak the spilled pointer either by exporting the returned value from the atomic{32,64} operation or by reading and exporting the value from the stack after the atomic operation took place. Note that for unprivileged, the below atomic cmpxchg test case named "Dest pointer in r0 - succeed" is failing. The reason is that in the dst memory location (r10 -8) there is the spilled register r10: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (b7) r1 = 0 3: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=fp 3: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r1) 4: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=mmmmmmmm 4: (79) r1 = *(u64 *)(r0 -8) 5: R0_w=fp0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 5: (b7) r0 = 0 6: R0_w=invP0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm 6: (95) exit However, allowing this case for unprivileged is a bit useless given an update with a new pointer will fail anyway: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (bf) r0 = r10 1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (7b) *(u64 *)(r10 -8) = r0 2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp 2: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r10) R10 leaks addr into mem Acked-by: Brendan Jackman <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
2021-12-14selftests/bpf: Stop using bpf_object__find_program_by_title API.Kui-Feng Lee10-67/+104
bpf_object__find_program_by_title is going to be deprecated. Replace all use cases in tools/testing/selftests/bpf with bpf_object__find_program_by_name or bpf_object__for_each_program. Signed-off-by: Kui-Feng Lee <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-14selftests/bpf: Remove explicit setrlimit(RLIMIT_MEMLOCK) in main selftestsAndrii Nakryiko8-24/+3
As libbpf now is able to automatically take care of RLIMIT_MEMLOCK increase (or skip it altogether on recent enough kernels), remove explicit setrlimit() invocations in bench, test_maps, test_verifier, and test_progs. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-14selftests/bpf: Fix OOB write in test_verifierKumar Kartikeya Dwivedi1-1/+1
The commit referenced below added fixup_map_timer support (to create a BPF map containing timers), but failed to increase the size of the map_fds array, leading to out of bounds write. Fix this by changing MAX_NR_MAPS to 22. Fixes: e60e6962c503 ("selftests/bpf: Add tests for restricted helpers") Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-13selftests/bpf: Fix segfault in bpf_tcp_caJean-Philippe Brucker1-4/+3
Since commit ad9a7f96445b ("libbpf: Improve logging around BPF program loading"), libbpf_debug_print() gets an additional prog_name parameter but doesn't pass it to printf(). Since the format string now expects two arguments, printf() may read uninitialized data and segfault. Pass prog_name through. Fixes: ad9a7f96445b ("libbpf: Improve logging around BPF program loading") Signed-off-by: Jean-Philippe Brucker <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-13selftests/bpf: Add tests for get_func_[arg|ret|arg_cnt] helpersJiri Olsa2-0/+167
Adding tests for get_func_[arg|ret|arg_cnt] helpers. Using these helpers in fentry/fexit/fmod_ret programs. Signed-off-by: Jiri Olsa <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-13selftests/bpf: Add test to access int ptr argument in tracing programJiri Olsa1-0/+12
Adding verifier test for accessing int pointer argument in tracing programs. The test program loads 2nd argument of bpf_modify_return_test function which is int pointer and checks that verifier allows that. Signed-off-by: Jiri Olsa <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-12selftests/bpf: Remove last bpf_create_map_xattr from test_verifierAndrii Nakryiko1-9/+5
bpf_create_map_xattr() call was reintroduced after merging bpf tree into bpf-next tree. Convert the last instance into bpf_map_create() call. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-11selftests/bpf: Add test cases for bpf_strncmp()Hou Tao2-0/+221
Four test cases are added: (1) ensure the return value is expected (2) ensure no const string size is rejected (3) ensure writable target is rejected (4) ensure no null-terminated target is rejected Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-11selftests/bpf: Add benchmark for bpf_strncmp() helperHou Tao5-1/+232
Add benchmark to compare the performance between home-made strncmp() in bpf program and bpf_strncmp() helper. In summary, the performance win of bpf_strncmp() under x86-64 is greater than 18% when the compared string length is greater than 64, and is 179% when the length is 4095. Under arm64 the performance win is even bigger: 33% when the length is greater than 64 and 600% when the length is 4095. The following is the details: no-helper-X: use home-made strncmp() to compare X-sized string helper-Y: use bpf_strncmp() to compare Y-sized string Under x86-64: no-helper-1 3.504 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-1 3.347 ± 0.001M/s (drops 0.000 ± 0.000M/s) no-helper-8 3.357 ± 0.001M/s (drops 0.000 ± 0.000M/s) helper-8 3.307 ± 0.001M/s (drops 0.000 ± 0.000M/s) no-helper-32 3.064 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-32 3.253 ± 0.001M/s (drops 0.000 ± 0.000M/s) no-helper-64 2.563 ± 0.001M/s (drops 0.000 ± 0.000M/s) helper-64 3.040 ± 0.001M/s (drops 0.000 ± 0.000M/s) no-helper-128 1.975 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-128 2.641 ± 0.000M/s (drops 0.000 ± 0.000M/s) no-helper-512 0.759 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-512 1.574 ± 0.000M/s (drops 0.000 ± 0.000M/s) no-helper-2048 0.329 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-2048 0.602 ± 0.000M/s (drops 0.000 ± 0.000M/s) no-helper-4095 0.117 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-4095 0.327 ± 0.000M/s (drops 0.000 ± 0.000M/s) Under arm64: no-helper-1 2.806 ± 0.004M/s (drops 0.000 ± 0.000M/s) helper-1 2.819 ± 0.002M/s (drops 0.000 ± 0.000M/s) no-helper-8 2.797 ± 0.109M/s (drops 0.000 ± 0.000M/s) helper-8 2.786 ± 0.025M/s (drops 0.000 ± 0.000M/s) no-helper-32 2.399 ± 0.011M/s (drops 0.000 ± 0.000M/s) helper-32 2.703 ± 0.002M/s (drops 0.000 ± 0.000M/s) no-helper-64 2.020 ± 0.015M/s (drops 0.000 ± 0.000M/s) helper-64 2.702 ± 0.073M/s (drops 0.000 ± 0.000M/s) no-helper-128 1.604 ± 0.001M/s (drops 0.000 ± 0.000M/s) helper-128 2.516 ± 0.002M/s (drops 0.000 ± 0.000M/s) no-helper-512 0.699 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-512 2.106 ± 0.003M/s (drops 0.000 ± 0.000M/s) no-helper-2048 0.215 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-2048 1.223 ± 0.003M/s (drops 0.000 ± 0.000M/s) no-helper-4095 0.112 ± 0.000M/s (drops 0.000 ± 0.000M/s) helper-4095 0.796 ± 0.000M/s (drops 0.000 ± 0.000M/s) Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-11selftests/bpf: Fix checkpatch error on empty function parameterHou Tao6-33/+34
Fix checkpatch error: "ERROR: Bad function definition - void foo() should probably be void foo(void)". Most replacements are done by the following command: sed -i 's#\([a-z]\)()$#\1(void)#g' testing/selftests/bpf/benchs/*.c Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-10Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-nextJakub Kicinski73-517/+2189
Andrii Nakryiko says: ==================== bpf-next 2021-12-10 v2 We've added 115 non-merge commits during the last 26 day(s) which contain a total of 182 files changed, 5747 insertions(+), 2564 deletions(-). The main changes are: 1) Various samples fixes, from Alexander Lobakin. 2) BPF CO-RE support in kernel and light skeleton, from Alexei Starovoitov. 3) A batch of new unified APIs for libbpf, logging improvements, version querying, etc. Also a batch of old deprecations for old APIs and various bug fixes, in preparation for libbpf 1.0, from Andrii Nakryiko. 4) BPF documentation reorganization and improvements, from Christoph Hellwig and Dave Tucker. 5) Support for declarative initialization of BPF_MAP_TYPE_PROG_ARRAY in libbpf, from Hengqi Chen. 6) Verifier log fixes, from Hou Tao. 7) Runtime-bounded loops support with bpf_loop() helper, from Joanne Koong. 8) Extend branch record capturing to all platforms that support it, from Kajol Jain. 9) Light skeleton codegen improvements, from Kumar Kartikeya Dwivedi. 10) bpftool doc-generating script improvements, from Quentin Monnet. 11) Two libbpf v0.6 bug fixes, from Shuyi Cheng and Vincent Minet. 12) Deprecation warning fix for perf/bpf_counter, from Song Liu. 13) MAX_TAIL_CALL_CNT unification and MIPS build fix for libbpf, from Tiezhu Yang. 14) BTF_KING_TYPE_TAG follow-up fixes, from Yonghong Song. 15) Selftests fixes and improvements, from Ilya Leoshkevich, Jean-Philippe Brucker, Jiri Olsa, Maxim Mikityanskiy, Tirthendu Sarkar, Yucong Sun, and others. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (115 commits) libbpf: Add "bool skipped" to struct bpf_map libbpf: Fix typo in btf__dedup@LIBBPF_0.0.2 definition bpftool: Switch bpf_object__load_xattr() to bpf_object__load() selftests/bpf: Remove the only use of deprecated bpf_object__load_xattr() selftests/bpf: Add test for libbpf's custom log_buf behavior selftests/bpf: Replace all uses of bpf_load_btf() with bpf_btf_load() libbpf: Deprecate bpf_object__load_xattr() libbpf: Add per-program log buffer setter and getter libbpf: Preserve kernel error code and remove kprobe prog type guessing libbpf: Improve logging around BPF program loading libbpf: Allow passing user log setting through bpf_object_open_opts libbpf: Allow passing preallocated log_buf when loading BTF into kernel libbpf: Add OPTS-based bpf_btf_load() API libbpf: Fix bpf_prog_load() log_buf logic for log_level 0 samples/bpf: Remove unneeded variable bpf: Remove redundant assignment to pointer t selftests/bpf: Fix a compilation warning perf/bpf_counter: Use bpf_map_create instead of bpf_create_map samples: bpf: Fix 'unknown warning group' build warning on Clang samples: bpf: Fix xdp_sample_user.o linking with Clang ... ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2021-12-10selftests/bpf: Remove the only use of deprecated bpf_object__load_xattr()Andrii Nakryiko1-5/+5
Switch from bpf_object__load_xattr() to bpf_object__load() and kernel_log_level in bpf_object_open_opts. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-10selftests/bpf: Add test for libbpf's custom log_buf behaviorAndrii Nakryiko2-0/+300
Add a selftest that validates that per-program and per-object log_buf overrides work as expected. Also test same logic for low-level bpf_prog_load() and bpf_btf_load() APIs. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-10selftests/bpf: Replace all uses of bpf_load_btf() with bpf_btf_load()Andrii Nakryiko3-22/+32
Switch all selftests uses of to-be-deprecated bpf_load_btf() with equivalent bpf_btf_load() calls. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-10libbpf: Improve logging around BPF program loadingAndrii Nakryiko1-2/+4
Add missing "prog '%s': " prefixes in few places and use consistently markers for beginning and end of program load logs. Here's an example of log output: libbpf: prog 'handler': BPF program load failed: Permission denied libbpf: -- BEGIN PROG LOAD LOG --- arg#0 reference type('UNKNOWN ') size cannot be determined: -22 ; out1 = in1; 0: (18) r1 = 0xffffc9000cdcc000 2: (61) r1 = *(u32 *)(r1 +0) ... 81: (63) *(u32 *)(r4 +0) = r5 R1_w=map_value(id=0,off=16,ks=4,vs=20,imm=0) R4=map_value(id=0,off=400,ks=4,vs=16,imm=0) invalid access to map value, value_size=16 off=400 size=4 R4 min value is outside of the allowed memory range processed 63 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0 -- END PROG LOAD LOG -- libbpf: failed to load program 'handler' libbpf: failed to load object 'test_skeleton' The entire verifier log, including BEGIN and END markers are now always youtput during a single print callback call. This should make it much easier to post-process or parse it, if necessary. It's not an explicit API guarantee, but it can be reasonably expected to stay like that. Also __bpf_object__open is renamed to bpf_object_open() as it's always an adventure to find the exact function that implements bpf_object's open phase, so drop the double underscored and use internal libbpf naming convention. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-10selftests/bpf: Tests for state pruning with u32 spill/fillPaul Chaignon2-0/+103
This patch adds tests for the verifier's tracking for spilled, <8B registers. The first two test cases ensure the verifier doesn't incorrectly prune states in case of <8B spill/fills. The last one simply checks that a filled u64 register is marked unknown if the register spilled in the same slack slot was less than 8B. The map value access at the end of the first program is only incorrect for the path R6=32. If the precision bit for register R8 isn't backtracked through the u32 spill/fill, the R6=32 path is pruned at instruction 9 and the program is incorrectly accepted. The second program is a variation of the same with u32 spills and a u64 fill. The additional instructions to introduce the first pruning point may be a bit fragile as they depend on the heuristics for pruning points in the verifier (currently at least 8 instructions and 2 jumps). If the heuristics are changed, the pruning point may move (e.g., to the subsequent jump) or disappear, which would cause the test to always pass. Signed-off-by: Paul Chaignon <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
2021-12-09Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/netJakub Kicinski1-32/+600
No conflicts. Signed-off-by: Jakub Kicinski <[email protected]>
2021-12-08selftests/bpf: Fix a compilation warningYonghong Song1-1/+1
The following warning is triggered when I used clang compiler to build the selftest. /.../prog_tests/btf_dedup_split.c:368:6: warning: variable 'btf2' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized] if (!ASSERT_OK(err, "btf_dedup")) ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /.../prog_tests/btf_dedup_split.c:424:12: note: uninitialized use occurs here btf__free(btf2); ^~~~ /.../prog_tests/btf_dedup_split.c:368:2: note: remove the 'if' if its condition is always false if (!ASSERT_OK(err, "btf_dedup")) ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /.../prog_tests/btf_dedup_split.c:343:25: note: initialize the variable 'btf2' to silence this warning struct btf *btf1, *btf2; ^ = NULL Initialize local variable btf2 = NULL and the warning is gone. Fixes: 9a49afe6f5a5 ("selftests/bpf: Add btf_dedup case with duplicated structs within CU") Signed-off-by: Yonghong Song <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-08bpf: Add selftests to cover packet access corner casesMaxim Mikityanskiy1-16/+584
This commit adds BPF verifier selftests that cover all corner cases by packet boundary checks. Specifically, 8-byte packet reads are tested at the beginning of data and at the beginning of data_meta, using all kinds of boundary checks (all comparison operators: <, >, <=, >=; both permutations of operands: data + length compared to end, end compared to data + length). For each case there are three tests: 1. Length is just enough for an 8-byte read. Length is either 7 or 8, depending on the comparison. 2. Length is increased by 1 - should still pass the verifier. These cases are useful, because they failed before commit 2fa7d94afc1a ("bpf: Fix the off-by-two error in range markings"). 3. Length is decreased by 1 - should be rejected by the verifier. Some existing tests are just renamed to avoid duplication. Signed-off-by: Maxim Mikityanskiy <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-03bpf: Fix the test_task_vma selftest to support output shorter than 1 kBMaxim Mikityanskiy1-2/+3
The test for bpf_iter_task_vma assumes that the output will be longer than 1 kB, as the comment above the loop says. Due to this assumption, the loop becomes infinite if the output turns to be shorter than 1 kB. The return value of read_fd_into_buffer is 0 when the end of file was reached, and len isn't being increased any more. This commit adds a break on EOF to handle short output correctly. For the reference, this is the contents that I get when running test_progs under vmtest.sh, and it's shorter than 1 kB: 00400000-00401000 r--p 00000000 fe:00 25867 /root/bpf/test_progs 00401000-00674000 r-xp 00001000 fe:00 25867 /root/bpf/test_progs 00674000-0095f000 r--p 00274000 fe:00 25867 /root/bpf/test_progs 0095f000-00983000 r--p 0055e000 fe:00 25867 /root/bpf/test_progs 00983000-00a8a000 rw-p 00582000 fe:00 25867 /root/bpf/test_progs 00a8a000-0484e000 rw-p 00000000 00:00 0 7f6c64000000-7f6c64021000 rw-p 00000000 00:00 0 7f6c64021000-7f6c68000000 ---p 00000000 00:00 0 7f6c6ac8f000-7f6c6ac90000 r--s 00000000 00:0d 8032 anon_inode:bpf-map 7f6c6ac90000-7f6c6ac91000 ---p 00000000 00:00 0 7f6c6ac91000-7f6c6b491000 rw-p 00000000 00:00 0 7f6c6b491000-7f6c6b492000 r--s 00000000 00:0d 8032 anon_inode:bpf-map 7f6c6b492000-7f6c6b493000 rw-s 00000000 00:0d 8032 anon_inode:bpf-map 7ffc1e23d000-7ffc1e25e000 rw-p 00000000 00:00 0 7ffc1e3b8000-7ffc1e3bc000 r--p 00000000 00:00 0 7ffc1e3bc000-7ffc1e3bd000 r-xp 00000000 00:00 0 7fffffffe000-7ffffffff000 --xp 00000000 00:00 0 Fixes: e8168840e16c ("selftests/bpf: Add test for bpf_iter_task_vma") Signed-off-by: Maxim Mikityanskiy <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-03bpf: Fix the off-by-two error in range markingsMaxim Mikityanskiy1-16/+16
The first commit cited below attempts to fix the off-by-one error that appeared in some comparisons with an open range. Due to this error, arithmetically equivalent pieces of code could get different verdicts from the verifier, for example (pseudocode): // 1. Passes the verifier: if (data + 8 > data_end) return early read *(u64 *)data, i.e. [data; data+7] // 2. Rejected by the verifier (should still pass): if (data + 7 >= data_end) return early read *(u64 *)data, i.e. [data; data+7] The attempted fix, however, shifts the range by one in a wrong direction, so the bug not only remains, but also such piece of code starts failing in the verifier: // 3. Rejected by the verifier, but the check is stricter than in #1. if (data + 8 >= data_end) return early read *(u64 *)data, i.e. [data; data+7] The change performed by that fix converted an off-by-one bug into off-by-two. The second commit cited below added the BPF selftests written to ensure than code chunks like #3 are rejected, however, they should be accepted. This commit fixes the off-by-two error by adjusting new_range in the right direction and fixes the tests by changing the range into the one that should actually fail. Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns") Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests") Signed-off-by: Maxim Mikityanskiy <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Remove all the uses of deprecated bpf_prog_load_xattr()Andrii Nakryiko9-107/+119
Migrate all the selftests that were still using bpf_prog_load_xattr(). Few are converted to skeleton, others will use bpf_object__open_file() API. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Mute xdpxceiver.c's deprecation warningsAndrii Nakryiko1-0/+6
xdpxceiver.c is using AF_XDP APIs that are deprecated starting from libbpf 0.7. Until we migrate the test to libxdp or solve this issue in some other way, mute deprecation warnings within xdpxceiver.c. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Remove recently reintroduced legacy btf__dedup() useAndrii Nakryiko1-2/+2
We've added one extra patch that added back the use of legacy btf__dedup() variant. Clean that up. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Update test names for xchg and cmpxchgPaul E. McKenney1-2/+2
The test_cmpxchg() and test_xchg() functions say "test_run add". Therefore, make them say "test_run cmpxchg" and "test_run xchg", respectively. Signed-off-by: Paul E. McKenney <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/20211201005030.GA3071525@paulmck-ThinkPad-P17-Gen-1
2021-12-02selftests/bpf: Build testing_helpers.o out of treeJean-Philippe Brucker1-18/+22
Add $(OUTPUT) prefix to testing_helpers.o, so it can be built out of tree when necessary. At the moment, in addition to being built in-tree even when out-of-tree is required, testing_helpers.o is not built with the right recipe when cross-building. For consistency the other helpers, cgroup_helpers and trace_helpers, can also be passed as objects instead of source. Use *_HELPERS variable to keep the Makefile readable. Fixes: f87c1930ac29 ("selftests/bpf: Merge test_stub.c into testing_helpers.c") Signed-off-by: Jean-Philippe Brucker <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Add CO-RE relocations to verifier scale test.Alexei Starovoitov1-2/+2
Add 182 CO-RE relocations to verifier scale test. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Revert CO-RE removal in test_ksyms_weak.Alexei Starovoitov1-1/+1
The commit 087cba799ced ("selftests/bpf: Add weak/typeless ksym test for light skeleton") added test_ksyms_weak to light skeleton testing, but remove CO-RE access. Revert that part of commit, since light skeleton can use CO-RE in the kernel. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Additional test for CO-RE in the kernel.Alexei Starovoitov3-1/+119
Add a test where randmap() function is appended to three different bpf programs. That action checks struct bpf_core_relo replication logic and offset adjustment in gen loader part of libbpf. Fourth bpf program has 360 CO-RE relocations from vmlinux, bpf_testmod, and non-existing type. It tests candidate cache logic. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Convert map_ptr_kern test to use light skeleton.Alexei Starovoitov2-10/+9
To exercise CO-RE in the kernel further convert map_ptr_kern test to light skeleton. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Improve inner_map test coverage.Alexei Starovoitov1-2/+14
Check that hash and array inner maps are properly initialized. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-12-02selftests/bpf: Add lskel version of kfunc test.Alexei Starovoitov2-1/+25
Add light skeleton version of kfunc_call_test_subprog test. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-30selftest/bpf/benchs: Add bpf_loop benchmarkJoanne Koong7-1/+203
Add benchmark to measure the throughput and latency of the bpf_loop call. Testing this on my dev machine on 1 thread, the data is as follows: nr_loops: 10 bpf_loop - throughput: 198.519 ± 0.155 M ops/s, latency: 5.037 ns/op nr_loops: 100 bpf_loop - throughput: 247.448 ± 0.305 M ops/s, latency: 4.041 ns/op nr_loops: 500 bpf_loop - throughput: 260.839 ± 0.380 M ops/s, latency: 3.834 ns/op nr_loops: 1000 bpf_loop - throughput: 262.806 ± 0.629 M ops/s, latency: 3.805 ns/op nr_loops: 5000 bpf_loop - throughput: 264.211 ± 1.508 M ops/s, latency: 3.785 ns/op nr_loops: 10000 bpf_loop - throughput: 265.366 ± 3.054 M ops/s, latency: 3.768 ns/op nr_loops: 50000 bpf_loop - throughput: 235.986 ± 20.205 M ops/s, latency: 4.238 ns/op nr_loops: 100000 bpf_loop - throughput: 264.482 ± 0.279 M ops/s, latency: 3.781 ns/op nr_loops: 500000 bpf_loop - throughput: 309.773 ± 87.713 M ops/s, latency: 3.228 ns/op nr_loops: 1000000 bpf_loop - throughput: 262.818 ± 4.143 M ops/s, latency: 3.805 ns/op >From this data, we can see that the latency per loop decreases as the number of loops increases. On this particular machine, each loop had an overhead of about ~4 ns, and we were able to run ~250 million loops per second. Signed-off-by: Joanne Koong <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-30selftests/bpf: Measure bpf_loop verifier performanceJoanne Koong5-4/+169
This patch tests bpf_loop in pyperf and strobemeta, and measures the verifier performance of replacing the traditional for loop with bpf_loop. The results are as follows: ~strobemeta~ Baseline verification time 6808200 usec stack depth 496 processed 554252 insns (limit 1000000) max_states_per_insn 16 total_states 15878 peak_states 13489 mark_read 3110 #192 verif_scale_strobemeta:OK (unrolled loop) Using bpf_loop verification time 31589 usec stack depth 96+400 processed 1513 insns (limit 1000000) max_states_per_insn 2 total_states 106 peak_states 106 mark_read 60 #193 verif_scale_strobemeta_bpf_loop:OK ~pyperf600~ Baseline verification time 29702486 usec stack depth 368 processed 626838 insns (limit 1000000) max_states_per_insn 7 total_states 30368 peak_states 30279 mark_read 748 #182 verif_scale_pyperf600:OK (unrolled loop) Using bpf_loop verification time 148488 usec stack depth 320+40 processed 10518 insns (limit 1000000) max_states_per_insn 10 total_states 705 peak_states 517 mark_read 38 #183 verif_scale_pyperf600_bpf_loop:OK Using the bpf_loop helper led to approximately a 99% decrease in the verification time and in the number of instructions. Signed-off-by: Joanne Koong <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-30selftests/bpf: Add bpf_loop testJoanne Koong2-0/+257
Add test for bpf_loop testing a variety of cases: various nr_loops, null callback ctx, invalid flags, nested callbacks. Signed-off-by: Joanne Koong <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-28selftests/bpf: Test BPF_MAP_TYPE_PROG_ARRAY static initializationHengqi Chen2-0/+71
Add testcase for BPF_MAP_TYPE_PROG_ARRAY static initialization. Signed-off-by: Hengqi Chen <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-26af_unix: Remove UNIX_ABSTRACT() macro and test sun_path[0] instead.Kuniyuki Iwashima3-4/+2
In BSD and abstract address cases, we store sockets in the hash table with keys between 0 and UNIX_HASH_SIZE - 1. However, the hash saved in a socket varies depending on its address type; sockets with BSD addresses always have UNIX_HASH_SIZE in their unix_sk(sk)->addr->hash. This is just for the UNIX_ABSTRACT() macro used to check the address type. The difference of the saved hashes comes from the first byte of the address in the first place. So, we can test it directly. Then we can keep a real hash in each socket and replace unix_table_lock with per-hash locks in the later patch. Signed-off-by: Kuniyuki Iwashima <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
2021-11-26selftests/bpf: Fix misaligned accesses in xdp and xdp_bpf2bpf testsAndrii Nakryiko2-8/+9
Similar to previous patch, just copy over necessary struct into local stack variable before checking its fields. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-26selftests/bpf: Fix misaligned memory accesses in xdp_bonding testAndrii Nakryiko1-16/+20
Construct packet buffer explicitly for each packet to avoid unaligned memory accesses. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-26selftests/bpf: Prevent out-of-bounds stack access in test_bpffsAndrii Nakryiko1-1/+3
Buf can be not zero-terminated leading to strstr() to access data beyond the intended buf[] array. Fix by forcing zero termination. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
2021-11-26selftests/bpf: Fix misaligned memory access in queue_stack_map testAndrii Nakryiko1-5/+7
Copy over iphdr into a local variable before accessing its fields. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]