diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
94 files changed, 4695 insertions, 1330 deletions
| diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 0ee29e11eaee..970f09156eb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{0, "R1=ctx(id=0,off=0,imm=0)"}, +			{0, "R1=ctx(off=0,imm=0)"},  			{0, "R10=fp0"}, -			{0, "R3_w=inv2"}, -			{1, "R3_w=inv4"}, -			{2, "R3_w=inv8"}, -			{3, "R3_w=inv16"}, -			{4, "R3_w=inv32"}, +			{0, "R3_w=2"}, +			{1, "R3_w=4"}, +			{2, "R3_w=8"}, +			{3, "R3_w=16"}, +			{4, "R3_w=32"},  		},  	},  	{ @@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{0, "R1=ctx(id=0,off=0,imm=0)"}, +			{0, "R1=ctx(off=0,imm=0)"},  			{0, "R10=fp0"}, -			{0, "R3_w=inv1"}, -			{1, "R3_w=inv2"}, -			{2, "R3_w=inv4"}, -			{3, "R3_w=inv8"}, -			{4, "R3_w=inv16"}, -			{5, "R3_w=inv1"}, -			{6, "R4_w=inv32"}, -			{7, "R4_w=inv16"}, -			{8, "R4_w=inv8"}, -			{9, "R4_w=inv4"}, -			{10, "R4_w=inv2"}, +			{0, "R3_w=1"}, +			{1, "R3_w=2"}, +			{2, "R3_w=4"}, +			{3, "R3_w=8"}, +			{4, "R3_w=16"}, +			{5, "R3_w=1"}, +			{6, "R4_w=32"}, +			{7, "R4_w=16"}, +			{8, "R4_w=8"}, +			{9, "R4_w=4"}, +			{10, "R4_w=2"},  		},  	},  	{ @@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{0, "R1=ctx(id=0,off=0,imm=0)"}, +			{0, "R1=ctx(off=0,imm=0)"},  			{0, "R10=fp0"}, -			{0, "R3_w=inv4"}, -			{1, "R3_w=inv8"}, -			{2, "R3_w=inv10"}, -			{3, "R4_w=inv8"}, -			{4, "R4_w=inv12"}, -			{5, "R4_w=inv14"}, +			{0, "R3_w=4"}, +			{1, "R3_w=8"}, +			{2, "R3_w=10"}, +			{3, "R4_w=8"}, +			{4, "R4_w=12"}, +			{5, "R4_w=14"},  		},  	},  	{ @@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{0, "R1=ctx(id=0,off=0,imm=0)"}, +			{0, "R1=ctx(off=0,imm=0)"},  			{0, "R10=fp0"}, -			{0, "R3_w=inv7"}, -			{1, "R3_w=inv7"}, -			{2, "R3_w=inv14"}, -			{3, "R3_w=inv56"}, +			{0, "R3_w=7"}, +			{1, "R3_w=7"}, +			{2, "R3_w=14"}, +			{3, "R3_w=56"},  		},  	}, @@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"}, -			{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, -			{7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, -			{8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, -			{9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, -			{10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, -			{12, "R3_w=pkt_end(id=0,off=0,imm=0)"}, -			{17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, -			{18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, -			{19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, -			{20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, -			{21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, -			{22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, +			{6, "R0_w=pkt(off=8,r=8,imm=0)"}, +			{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, +			{7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, +			{8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, +			{9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, +			{10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, +			{12, "R3_w=pkt_end(off=0,imm=0)"}, +			{17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, +			{18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"}, +			{19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, +			{20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, +			{21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, +			{22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},  		},  	},  	{ @@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, -			{7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, -			{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, -			{9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, -			{10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, -			{11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, -			{12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, -			{13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, -			{14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, -			{15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, +			{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, +			{7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, +			{8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, +			{9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, +			{10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, +			{11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, +			{12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, +			{13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, +			{14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, +			{15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},  		},  	},  	{ @@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = {  		},  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.matches = { -			{2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, -			{4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, -			{5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, -			{9, "R2=pkt(id=0,off=0,r=18,imm=0)"}, -			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, -			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, -			{13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, -			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, +			{2, "R5_w=pkt(off=0,r=0,imm=0)"}, +			{4, "R5_w=pkt(off=14,r=0,imm=0)"}, +			{5, "R4_w=pkt(off=14,r=0,imm=0)"}, +			{9, "R2=pkt(off=0,r=18,imm=0)"}, +			{10, "R5=pkt(off=14,r=18,imm=0)"}, +			{10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, +			{13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, +			{14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},  		},  	},  	{ @@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = {  			/* Calculated offset in R6 has unknown value, but known  			 * alignment of 4.  			 */ -			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, -			{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{6, "R2_w=pkt(off=0,r=8,imm=0)"}, +			{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},  			/* Offset is added to packet pointer R5, resulting in  			 * known fixed offset, and variable offset from R6.  			 */ -			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},  			/* At the time the word size load is performed from R5,  			 * it's total offset is NET_IP_ALIGN + reg->off (0) +  			 * reg->aux_off (14) which is 16.  Then the variable  			 * offset is considered using reg->aux_off_align which  			 * is 4 and meets the load's requirements.  			 */ -			{15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, -			{15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, +			{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},  			/* Variable offset is added to R5 packet pointer,  			 * resulting in auxiliary alignment of 4.  			 */ -			{17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},  			/* Constant offset is added to R5, resulting in  			 * reg->off of 14.  			 */ -			{18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off  			 * (14) which is 16.  Then the variable offset is 4-byte  			 * aligned, so the total offset is 4-byte aligned and  			 * meets the load's requirements.  			 */ -			{23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, -			{23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, +			{23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},  			/* Constant offset is added to R5 packet pointer,  			 * resulting in reg->off value of 14.  			 */ -			{25, "R5_w=pkt(id=0,off=14,r=8"}, +			{25, "R5_w=pkt(off=14,r=8"},  			/* Variable offset is added to R5, resulting in a  			 * variable offset of (4n).  			 */ -			{26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},  			/* Constant is added to R5 again, setting reg->off to 18. */ -			{27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},  			/* And once more we add a variable; resulting var_off  			 * is still (4n), fixed offset is not changed.  			 * Also, we create a new reg->id.  			 */ -			{28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, +			{28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)  			 * which is 20.  Then the variable offset is (4n), so  			 * the total offset is 4-byte aligned and meets the  			 * load's requirements.  			 */ -			{33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, -			{33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, +			{33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, +			{33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},  		},  	},  	{ @@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = {  			/* Calculated offset in R6 has unknown value, but known  			 * alignment of 4.  			 */ -			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, -			{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{6, "R2_w=pkt(off=0,r=8,imm=0)"}, +			{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},  			/* Adding 14 makes R6 be (4n+2) */ -			{8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, +			{8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},  			/* Packet pointer has (4n+2) offset */ -			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, -			{12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, +			{11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, +			{12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)  			 * which is 2.  Then the variable offset is (4n+2), so  			 * the total offset is 4-byte aligned and meets the  			 * load's requirements.  			 */ -			{15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, +			{15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},  			/* Newly read value in R6 was shifted left by 2, so has  			 * known alignment of 4.  			 */ -			{17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},  			/* Added (4n) to packet pointer's (4n+2) var_off, giving  			 * another (4n+2).  			 */ -			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, -			{20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, +			{19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, +			{20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)  			 * which is 2.  Then the variable offset is (4n+2), so  			 * the total offset is 4-byte aligned and meets the  			 * load's requirements.  			 */ -			{23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, +			{23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},  		},  	},  	{ @@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = {  		.prog_type = BPF_PROG_TYPE_SCHED_CLS,  		.result = REJECT,  		.matches = { -			{3, "R5_w=pkt_end(id=0,off=0,imm=0)"}, +			{3, "R5_w=pkt_end(off=0,imm=0)"},  			/* (ptr - ptr) << 2 == unknown, (4n) */ -			{5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, +			{5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},  			/* (4n) + 14 == (4n+2).  We blow our bounds, because  			 * the add could overflow.  			 */ -			{6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, +			{6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},  			/* Checked s>=0 */ -			{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, +			{9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},  			/* packet pointer + nonnegative (4n+2) */ -			{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, -			{12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, +			{11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, +			{12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},  			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.  			 * We checked the bounds, but it might have been able  			 * to overflow if the packet pointer started in the @@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {  			 * So we did not get a 'range' on R6, and the access  			 * attempt will fail.  			 */ -			{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, +			{15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},  		}  	},  	{ @@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = {  			/* Calculated offset in R6 has unknown value, but known  			 * alignment of 4.  			 */ -			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, -			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{6, "R2_w=pkt(off=0,r=8,imm=0)"}, +			{8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},  			/* Adding 14 makes R6 be (4n+2) */ -			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, +			{9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},  			/* New unknown value in R7 is (4n) */ -			{10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, +			{10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},  			/* Subtracting it from R6 blows our unsigned bounds */ -			{11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, +			{11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},  			/* Checked s>= 0 */ -			{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, +			{14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)  			 * which is 2.  Then the variable offset is (4n+2), so  			 * the total offset is 4-byte aligned and meets the  			 * load's requirements.  			 */ -			{20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"}, +			{20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},  		},  	}, @@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = {  			/* Calculated offset in R6 has unknown value, but known  			 * alignment of 4.  			 */ -			{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, -			{9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, +			{6, "R2_w=pkt(off=0,r=8,imm=0)"}, +			{9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},  			/* Adding 14 makes R6 be (4n+2) */ -			{10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, +			{10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},  			/* Subtracting from packet pointer overflows ubounds */ -			{13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, +			{13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},  			/* New unknown value in R7 is (4n), >= 76 */ -			{14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, +			{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},  			/* Adding it to packet pointer gives nice bounds again */ -			{16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, +			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},  			/* At the time the word size load is performed from R5,  			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)  			 * which is 2.  Then the variable offset is (4n+2), so  			 * the total offset is 4-byte aligned and meets the  			 * load's requirements.  			 */ -			{20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, +			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},  		},  	},  }; @@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test)  			/* Check the next line as well in case the previous line  			 * did not have a corresponding bpf insn. Example:  			 * func#0 @0 -			 * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 -			 * 0: (b7) r3 = 2                 ; R3_w=inv2 +			 * 0: R1=ctx(off=0,imm=0) R10=fp0 +			 * 0: (b7) r3 = 2                 ; R3_w=2  			 */  			if (!strstr(line_ptr, m.match)) {  				cur_line = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index 86b7d5d84eec..13e101f370a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -7,19 +7,15 @@  static void test_add(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__add__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(add)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.add.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run add", -		  "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->add64_value, 3, "add64_value");  	ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); @@ -31,28 +27,20 @@ static void test_add(struct atomics_lskel *skel)  	ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");  	ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); - -cleanup: -	close(link_fd);  }  static void test_sub(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__sub__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(sub)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.sub.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run sub", -		  "err %d errno %d retval %d duration %d\n", -		  err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");  	ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); @@ -64,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel)  	ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");  	ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); - -cleanup: -	close(link_fd);  }  static void test_and(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__and__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(and)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.and.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run and", -		  "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");  	ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); @@ -93,27 +74,20 @@ static void test_and(struct atomics_lskel *skel)  	ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");  	ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); -cleanup: -	close(link_fd);  }  static void test_or(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__or__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(or)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.or.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run or", -		  "err %d errno %d retval %d duration %d\n", -		  err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");  	ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); @@ -122,26 +96,20 @@ static void test_or(struct atomics_lskel *skel)  	ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");  	ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); -cleanup: -	close(link_fd);  }  static void test_xor(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__xor__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(xor)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.xor.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run xor", -		  "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");  	ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); @@ -150,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel)  	ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");  	ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); -cleanup: -	close(link_fd);  }  static void test_cmpxchg(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__cmpxchg__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.cmpxchg.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run cmpxchg", -		  "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");  	ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); @@ -178,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel)  	ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");  	ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");  	ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); - -cleanup: -	close(link_fd);  }  static void test_xchg(struct atomics_lskel *skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; -	int link_fd; - -	link_fd = atomics_lskel__xchg__attach(skel); -	if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) -		return; +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	/* No need to attach it, just run it directly */  	prog_fd = skel->progs.xchg.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "test_run xchg", -		  "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) -		goto cleanup; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run_opts err")) +		return; +	if (!ASSERT_OK(topts.retval, "test_run_opts retval")) +		return;  	ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");  	ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");  	ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");  	ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); - -cleanup: -	close(link_fd);  }  void test_atomics(void)  {  	struct atomics_lskel *skel; -	__u32 duration = 0;  	skel = atomics_lskel__open_and_load(); -	if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) +	if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))  		return;  	if (skel->data->skip_tests) { diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index d0bd51eb23c8..d48f6e533e1e 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -5,9 +5,10 @@  /* this is how USDT semaphore is actually defined, except volatile modifier */  volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes"))); -/* attach point */ -static void method(void) { -	return ; +/* uprobe attach point */ +static void trigger_func(void) +{ +	asm volatile ("");  }  void test_attach_probe(void) @@ -17,8 +18,7 @@ void test_attach_probe(void)  	struct bpf_link *kprobe_link, *kretprobe_link;  	struct bpf_link *uprobe_link, *uretprobe_link;  	struct test_attach_probe* skel; -	size_t uprobe_offset; -	ssize_t base_addr, ref_ctr_offset; +	ssize_t uprobe_offset, ref_ctr_offset;  	bool legacy;  	/* Check if new-style kprobe/uprobe API is supported. @@ -34,11 +34,9 @@ void test_attach_probe(void)  	 */  	legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; -	base_addr = get_base_addr(); -	if (CHECK(base_addr < 0, "get_base_addr", -		  "failed to find base addr: %zd", base_addr)) +	uprobe_offset = get_uprobe_offset(&trigger_func); +	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))  		return; -	uprobe_offset = get_uprobe_offset(&method, base_addr);  	ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);  	if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) @@ -103,7 +101,7 @@ void test_attach_probe(void)  		goto cleanup;  	/* trigger & validate uprobe & uretprobe */ -	method(); +	trigger_func();  	if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",  		  "wrong uprobe res: %d\n", skel->bss->uprobe_res)) diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c index d0f06e40c16d..a1766a298bb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c +++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c @@ -1,13 +1,24 @@  // SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include "bind_perm.skel.h" - +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h>  #include <sys/types.h>  #include <sys/socket.h> -#include <sys/capability.h> + +#include "test_progs.h" +#include "cap_helpers.h" +#include "bind_perm.skel.h"  static int duration; +static int create_netns(void) +{ +	if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) +		return -1; + +	return 0; +} +  void try_bind(int family, int port, int expected_errno)  {  	struct sockaddr_storage addr = {}; @@ -38,43 +49,16 @@ close_socket:  		close(fd);  } -bool cap_net_bind_service(cap_flag_value_t flag) -{ -	const cap_value_t cap_net_bind_service = CAP_NET_BIND_SERVICE; -	cap_flag_value_t original_value; -	bool was_effective = false; -	cap_t caps; - -	caps = cap_get_proc(); -	if (CHECK(!caps, "cap_get_proc", "errno %d", errno)) -		goto free_caps; - -	if (CHECK(cap_get_flag(caps, CAP_NET_BIND_SERVICE, CAP_EFFECTIVE, -			       &original_value), -		  "cap_get_flag", "errno %d", errno)) -		goto free_caps; - -	was_effective = (original_value == CAP_SET); - -	if (CHECK(cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_bind_service, -			       flag), -		  "cap_set_flag", "errno %d", errno)) -		goto free_caps; - -	if (CHECK(cap_set_proc(caps), "cap_set_proc", "errno %d", errno)) -		goto free_caps; - -free_caps: -	CHECK(cap_free(caps), "cap_free", "errno %d", errno); -	return was_effective; -} -  void test_bind_perm(void)  { -	bool cap_was_effective; +	const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE;  	struct bind_perm *skel; +	__u64 old_caps = 0;  	int cgroup_fd; +	if (create_netns()) +		return; +  	cgroup_fd = test__join_cgroup("/bind_perm");  	if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))  		return; @@ -91,7 +75,8 @@ void test_bind_perm(void)  	if (!ASSERT_OK_PTR(skel, "bind_v6_prog"))  		goto close_skeleton; -	cap_was_effective = cap_net_bind_service(CAP_CLEAR); +	ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps), +		  "cap_disable_effective");  	try_bind(AF_INET, 110, EACCES);  	try_bind(AF_INET6, 110, EACCES); @@ -99,8 +84,9 @@ void test_bind_perm(void)  	try_bind(AF_INET, 111, 0);  	try_bind(AF_INET6, 111, 0); -	if (cap_was_effective) -		cap_net_bind_service(CAP_SET); +	if (old_caps & net_bind_svc_cap) +		ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL), +			  "cap_enable_effective");  close_skeleton:  	bind_perm__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 5eea3c3a40fe..923a6139b2d8 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -7,6 +7,13 @@  #include <unistd.h>  #include <test_progs.h>  #include "test_bpf_cookie.skel.h" +#include "kprobe_multi.skel.h" + +/* uprobe attach point */ +static void trigger_func(void) +{ +	asm volatile (""); +}  static void kprobe_subtest(struct test_bpf_cookie *skel)  { @@ -57,16 +64,188 @@ cleanup:  	bpf_link__destroy(retlink2);  } +static void kprobe_multi_test_run(struct kprobe_multi *skel) +{ +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	int err, prog_fd; + +	prog_fd = bpf_program__fd(skel->progs.trigger); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run"); + +	ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); +	ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); +	ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); +	ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); +	ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); +	ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); +	ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); +	ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + +	ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); +	ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); +	ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); +	ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); +	ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); +	ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); +	ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); +	ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +} + +static void kprobe_multi_link_api_subtest(void) +{ +	int prog_fd, link1_fd = -1, link2_fd = -1; +	struct kprobe_multi *skel = NULL; +	LIBBPF_OPTS(bpf_link_create_opts, opts); +	unsigned long long addrs[8]; +	__u64 cookies[8]; + +	if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) +		goto cleanup; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); +	skel->bss->test_cookie = true; + +#define GET_ADDR(__sym, __addr) ({				\ +	__addr = ksym_get_addr(__sym);				\ +	if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym))	\ +		goto cleanup;					\ +}) + +	GET_ADDR("bpf_fentry_test1", addrs[0]); +	GET_ADDR("bpf_fentry_test2", addrs[1]); +	GET_ADDR("bpf_fentry_test3", addrs[2]); +	GET_ADDR("bpf_fentry_test4", addrs[3]); +	GET_ADDR("bpf_fentry_test5", addrs[4]); +	GET_ADDR("bpf_fentry_test6", addrs[5]); +	GET_ADDR("bpf_fentry_test7", addrs[6]); +	GET_ADDR("bpf_fentry_test8", addrs[7]); + +#undef GET_ADDR + +	cookies[0] = 1; +	cookies[1] = 2; +	cookies[2] = 3; +	cookies[3] = 4; +	cookies[4] = 5; +	cookies[5] = 6; +	cookies[6] = 7; +	cookies[7] = 8; + +	opts.kprobe_multi.addrs = (const unsigned long *) &addrs; +	opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); +	opts.kprobe_multi.cookies = (const __u64 *) &cookies; +	prog_fd = bpf_program__fd(skel->progs.test_kprobe); + +	link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); +	if (!ASSERT_GE(link1_fd, 0, "link1_fd")) +		goto cleanup; + +	cookies[0] = 8; +	cookies[1] = 7; +	cookies[2] = 6; +	cookies[3] = 5; +	cookies[4] = 4; +	cookies[5] = 3; +	cookies[6] = 2; +	cookies[7] = 1; + +	opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; +	prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + +	link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); +	if (!ASSERT_GE(link2_fd, 0, "link2_fd")) +		goto cleanup; + +	kprobe_multi_test_run(skel); + +cleanup: +	close(link1_fd); +	close(link2_fd); +	kprobe_multi__destroy(skel); +} + +static void kprobe_multi_attach_api_subtest(void) +{ +	struct bpf_link *link1 = NULL, *link2 = NULL; +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	struct kprobe_multi *skel = NULL; +	const char *syms[8] = { +		"bpf_fentry_test1", +		"bpf_fentry_test2", +		"bpf_fentry_test3", +		"bpf_fentry_test4", +		"bpf_fentry_test5", +		"bpf_fentry_test6", +		"bpf_fentry_test7", +		"bpf_fentry_test8", +	}; +	__u64 cookies[8]; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); +	skel->bss->test_cookie = true; + +	cookies[0] = 1; +	cookies[1] = 2; +	cookies[2] = 3; +	cookies[3] = 4; +	cookies[4] = 5; +	cookies[5] = 6; +	cookies[6] = 7; +	cookies[7] = 8; + +	opts.syms = syms; +	opts.cnt = ARRAY_SIZE(syms); +	opts.cookies = cookies; + +	link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						      NULL, &opts); +	if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) +		goto cleanup; + +	cookies[0] = 8; +	cookies[1] = 7; +	cookies[2] = 6; +	cookies[3] = 5; +	cookies[4] = 4; +	cookies[5] = 3; +	cookies[6] = 2; +	cookies[7] = 1; + +	opts.retprobe = true; + +	link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, +						      NULL, &opts); +	if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) +		goto cleanup; + +	kprobe_multi_test_run(skel); + +cleanup: +	bpf_link__destroy(link2); +	bpf_link__destroy(link1); +	kprobe_multi__destroy(skel); +}  static void uprobe_subtest(struct test_bpf_cookie *skel)  {  	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);  	struct bpf_link *link1 = NULL, *link2 = NULL;  	struct bpf_link *retlink1 = NULL, *retlink2 = NULL; -	size_t uprobe_offset; -	ssize_t base_addr; +	ssize_t uprobe_offset; -	base_addr = get_base_addr(); -	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); +	uprobe_offset = get_uprobe_offset(&trigger_func); +	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) +		goto cleanup;  	/* attach two uprobes */  	opts.bpf_cookie = 0x100; @@ -99,7 +278,7 @@ static void uprobe_subtest(struct test_bpf_cookie *skel)  		goto cleanup;  	/* trigger uprobe && uretprobe */ -	get_base_addr(); +	trigger_func();  	ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");  	ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); @@ -193,7 +372,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)  	attr.type = PERF_TYPE_SOFTWARE;  	attr.config = PERF_COUNT_SW_CPU_CLOCK;  	attr.freq = 1; -	attr.sample_freq = 4000; +	attr.sample_freq = 1000;  	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);  	if (!ASSERT_GE(pfd, 0, "perf_fd"))  		goto cleanup; @@ -243,6 +422,10 @@ void test_bpf_cookie(void)  	if (test__start_subtest("kprobe"))  		kprobe_subtest(skel); +	if (test__start_subtest("multi_kprobe_link_api")) +		kprobe_multi_link_api_subtest(); +	if (test__start_subtest("multi_kprobe_attach_api")) +		kprobe_multi_attach_api_subtest();  	if (test__start_subtest("uprobe"))  		uprobe_subtest(skel);  	if (test__start_subtest("tracepoint")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index b84f859b1267..5142a7d130b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -138,6 +138,24 @@ static void test_task(void)  	bpf_iter_task__destroy(skel);  } +static void test_task_sleepable(void) +{ +	struct bpf_iter_task *skel; + +	skel = bpf_iter_task__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) +		return; + +	do_dummy_read(skel->progs.dump_task_sleepable); + +	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, +		  "num_expected_failure_copy_from_user_task"); +	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, +		  "num_success_copy_from_user_task"); + +	bpf_iter_task__destroy(skel); +} +  static void test_task_stack(void)  {  	struct bpf_iter_task_stack *skel; @@ -1252,6 +1270,8 @@ void test_bpf_iter(void)  		test_bpf_map();  	if (test__start_subtest("task"))  		test_task(); +	if (test__start_subtest("task_sleepable")) +		test_task_sleepable();  	if (test__start_subtest("task_stack"))  		test_task_stack();  	if (test__start_subtest("task_file")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c new file mode 100644 index 000000000000..ee725d4d98a5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ +#include <sys/socket.h> +#include <sys/un.h> +#include <test_progs.h> +#include "bpf_iter_setsockopt_unix.skel.h" + +#define NR_CASES 5 + +static int create_unix_socket(struct bpf_iter_setsockopt_unix *skel) +{ +	struct sockaddr_un addr = { +		.sun_family = AF_UNIX, +		.sun_path = "", +	}; +	socklen_t len; +	int fd, err; + +	fd = socket(AF_UNIX, SOCK_STREAM, 0); +	if (!ASSERT_NEQ(fd, -1, "socket")) +		return -1; + +	len = offsetof(struct sockaddr_un, sun_path); +	err = bind(fd, (struct sockaddr *)&addr, len); +	if (!ASSERT_OK(err, "bind")) +		return -1; + +	len = sizeof(addr); +	err = getsockname(fd, (struct sockaddr *)&addr, &len); +	if (!ASSERT_OK(err, "getsockname")) +		return -1; + +	memcpy(&skel->bss->sun_path, &addr.sun_path, +	       len - offsetof(struct sockaddr_un, sun_path)); + +	return fd; +} + +static void test_sndbuf(struct bpf_iter_setsockopt_unix *skel, int fd) +{ +	socklen_t optlen; +	int i, err; + +	for (i = 0; i < NR_CASES; i++) { +		if (!ASSERT_NEQ(skel->data->sndbuf_getsockopt[i], -1, +				"bpf_(get|set)sockopt")) +			return; + +		err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, +				 &(skel->data->sndbuf_setsockopt[i]), +				 sizeof(skel->data->sndbuf_setsockopt[i])); +		if (!ASSERT_OK(err, "setsockopt")) +			return; + +		optlen = sizeof(skel->bss->sndbuf_getsockopt_expected[i]); +		err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, +				 &(skel->bss->sndbuf_getsockopt_expected[i]), +				 &optlen); +		if (!ASSERT_OK(err, "getsockopt")) +			return; + +		if (!ASSERT_EQ(skel->data->sndbuf_getsockopt[i], +			       skel->bss->sndbuf_getsockopt_expected[i], +			       "bpf_(get|set)sockopt")) +			return; +	} +} + +void test_bpf_iter_setsockopt_unix(void) +{ +	struct bpf_iter_setsockopt_unix *skel; +	int err, unix_fd, iter_fd; +	char buf; + +	skel = bpf_iter_setsockopt_unix__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "open_and_load")) +		return; + +	unix_fd = create_unix_socket(skel); +	if (!ASSERT_NEQ(unix_fd, -1, "create_unix_server")) +		goto destroy; + +	skel->links.change_sndbuf = bpf_program__attach_iter(skel->progs.change_sndbuf, NULL); +	if (!ASSERT_OK_PTR(skel->links.change_sndbuf, "bpf_program__attach_iter")) +		goto destroy; + +	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.change_sndbuf)); +	if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create")) +		goto destroy; + +	while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 && +	       errno == EAGAIN) +		; +	if (!ASSERT_OK(err, "read iter error")) +		goto destroy; + +	test_sndbuf(skel, unix_fd); +destroy: +	bpf_iter_setsockopt_unix__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c new file mode 100644 index 000000000000..d43f548c572c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <unistd.h> +#include <pthread.h> +#include <sys/mman.h> +#include <stdatomic.h> +#include <test_progs.h> +#include <sys/syscall.h> +#include <linux/module.h> +#include <linux/userfaultfd.h> + +#include "ksym_race.skel.h" +#include "bpf_mod_race.skel.h" +#include "kfunc_call_race.skel.h" + +/* This test crafts a race between btf_try_get_module and do_init_module, and + * checks whether btf_try_get_module handles the invocation for a well-formed + * but uninitialized module correctly. Unless the module has completed its + * initcalls, the verifier should fail the program load and return ENXIO. + * + * userfaultfd is used to trigger a fault in an fmod_ret program, and make it + * sleep, then the BPF program is loaded and the return value from verifier is + * inspected. After this, the userfaultfd is closed so that the module loading + * thread makes forward progress, and fmod_ret injects an error so that the + * module load fails and it is freed. + * + * If the verifier succeeded in loading the supplied program, it will end up + * taking reference to freed module, and trigger a crash when the program fd + * is closed later. This is true for both kfuncs and ksyms. In both cases, + * the crash is triggered inside bpf_prog_free_deferred, when module reference + * is finally released. + */ + +struct test_config { +	const char *str_open; +	void *(*bpf_open_and_load)(); +	void (*bpf_destroy)(void *); +}; + +enum test_state { +	_TS_INVALID, +	TS_MODULE_LOAD, +	TS_MODULE_LOAD_FAIL, +}; + +static _Atomic enum test_state state = _TS_INVALID; + +static int sys_finit_module(int fd, const char *param_values, int flags) +{ +	return syscall(__NR_finit_module, fd, param_values, flags); +} + +static int sys_delete_module(const char *name, unsigned int flags) +{ +	return syscall(__NR_delete_module, name, flags); +} + +static int load_module(const char *mod) +{ +	int ret, fd; + +	fd = open("bpf_testmod.ko", O_RDONLY); +	if (fd < 0) +		return fd; + +	ret = sys_finit_module(fd, "", 0); +	close(fd); +	if (ret < 0) +		return ret; +	return 0; +} + +static void *load_module_thread(void *p) +{ + +	if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail")) +		atomic_store(&state, TS_MODULE_LOAD); +	else +		atomic_store(&state, TS_MODULE_LOAD_FAIL); +	return p; +} + +static int sys_userfaultfd(int flags) +{ +	return syscall(__NR_userfaultfd, flags); +} + +static int test_setup_uffd(void *fault_addr) +{ +	struct uffdio_register uffd_register = {}; +	struct uffdio_api uffd_api = {}; +	int uffd; + +	uffd = sys_userfaultfd(O_CLOEXEC); +	if (uffd < 0) +		return -errno; + +	uffd_api.api = UFFD_API; +	uffd_api.features = 0; +	if (ioctl(uffd, UFFDIO_API, &uffd_api)) { +		close(uffd); +		return -1; +	} + +	uffd_register.range.start = (unsigned long)fault_addr; +	uffd_register.range.len = 4096; +	uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING; +	if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) { +		close(uffd); +		return -1; +	} +	return uffd; +} + +static void test_bpf_mod_race_config(const struct test_config *config) +{ +	void *fault_addr, *skel_fail; +	struct bpf_mod_race *skel; +	struct uffd_msg uffd_msg; +	pthread_t load_mod_thrd; +	_Atomic int *blockingp; +	int uffd, ret; + +	fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); +	if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) +		return; + +	if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod")) +		goto end_mmap; + +	skel = bpf_mod_race__open(); +	if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open")) +		goto end_module; + +	skel->rodata->bpf_mod_race_config.tgid = getpid(); +	skel->rodata->bpf_mod_race_config.inject_error = -4242; +	skel->rodata->bpf_mod_race_config.fault_addr = fault_addr; +	if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load")) +		goto end_destroy; +	blockingp = (_Atomic int *)&skel->bss->bpf_blocking; + +	if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach")) +		goto end_destroy; + +	uffd = test_setup_uffd(fault_addr); +	if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address")) +		goto end_destroy; + +	if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL), +		       "load module thread")) +		goto end_uffd; + +	/* Now, we either fail loading module, or block in bpf prog, spin to find out */ +	while (!atomic_load(&state) && !atomic_load(blockingp)) +		; +	if (!ASSERT_EQ(state, _TS_INVALID, "module load should block")) +		goto end_join; +	if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) { +		pthread_kill(load_mod_thrd, SIGKILL); +		goto end_uffd; +	} + +	/* We might have set bpf_blocking to 1, but may have not blocked in +	 * bpf_copy_from_user. Read userfaultfd descriptor to verify that. +	 */ +	if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg), +		       "read uffd block event")) +		goto end_join; +	if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault")) +		goto end_join; + +	/* We know that load_mod_thrd is blocked in the fmod_ret program, the +	 * module state is still MODULE_STATE_COMING because mod->init hasn't +	 * returned. This is the time we try to load a program calling kfunc and +	 * check if we get ENXIO from verifier. +	 */ +	skel_fail = config->bpf_open_and_load(); +	ret = errno; +	if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) { +		/* Close uffd to unblock load_mod_thrd */ +		close(uffd); +		uffd = -1; +		while (atomic_load(blockingp) != 2) +			; +		ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); +		config->bpf_destroy(skel_fail); +		goto end_join; + +	} +	ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO"); +	ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false"); + +	close(uffd); +	uffd = -1; +end_join: +	pthread_join(load_mod_thrd, NULL); +	if (uffd < 0) +		ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success"); +end_uffd: +	if (uffd >= 0) +		close(uffd); +end_destroy: +	bpf_mod_race__destroy(skel); +	ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); +end_module: +	sys_delete_module("bpf_testmod", 0); +	ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod"); +end_mmap: +	munmap(fault_addr, 4096); +	atomic_store(&state, _TS_INVALID); +} + +static const struct test_config ksym_config = { +	.str_open = "ksym_race__open_and_load", +	.bpf_open_and_load = (void *)ksym_race__open_and_load, +	.bpf_destroy = (void *)ksym_race__destroy, +}; + +static const struct test_config kfunc_config = { +	.str_open = "kfunc_call_race__open_and_load", +	.bpf_open_and_load = (void *)kfunc_call_race__open_and_load, +	.bpf_destroy = (void *)kfunc_call_race__destroy, +}; + +void serial_test_bpf_mod_race(void) +{ +	if (test__start_subtest("ksym (used_btfs UAF)")) +		test_bpf_mod_race_config(&ksym_config); +	if (test__start_subtest("kfunc (kfunc_btf_tab UAF)")) +		test_bpf_mod_race_config(&kfunc_config); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c new file mode 100644 index 000000000000..dd30b1e3a67c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "test_bpf_nf.skel.h" + +enum { +	TEST_XDP, +	TEST_TC_BPF, +}; + +void test_bpf_nf_ct(int mode) +{ +	struct test_bpf_nf *skel; +	int prog_fd, err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	); + +	skel = test_bpf_nf__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) +		return; + +	if (mode == TEST_XDP) +		prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); +	else +		prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test); + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "bpf_prog_test_run")) +		goto end; + +	ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); +	ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); +	ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); +	ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); +	ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); +	ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id"); +	ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup"); +	ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple"); +end: +	test_bpf_nf__destroy(skel); +} + +void test_bpf_nf(void) +{ +	if (test__start_subtest("xdp-ct")) +		test_bpf_nf_ct(TEST_XDP); +	if (test__start_subtest("tc-bpf-ct")) +		test_bpf_nf_ct(TEST_TC_BPF); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 8ba53acf9eb4..ec823561b912 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3939,6 +3939,25 @@ static struct btf_raw_test raw_tests[] = {  	.err_str = "Invalid component_idx",  },  { +	.descr = "decl_tag test #15, func, invalid func proto", +	.raw_types = { +		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),	/* [1] */ +		BTF_DECL_TAG_ENC(NAME_TBD, 3, 0),		/* [2] */ +		BTF_FUNC_ENC(NAME_TBD, 8),			/* [3] */ +		BTF_END_RAW, +	}, +	BTF_STR_SEC("\0tag\0func"), +	.map_type = BPF_MAP_TYPE_ARRAY, +	.map_name = "tag_type_check_btf", +	.key_size = sizeof(int), +	.value_size = 4, +	.key_type_id = 1, +	.value_type_id = 1, +	.max_entries = 1, +	.btf_load_err = true, +	.err_str = "Invalid type_id", +}, +{  	.descr = "type_tag test #1",  	.raw_types = {  		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),	/* [1] */ @@ -4560,6 +4579,8 @@ static void do_test_file(unsigned int test_num)  	has_btf_ext = btf_ext != NULL;  	btf_ext__free(btf_ext); +	/* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */ +	libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);  	obj = bpf_object__open(test->file);  	err = libbpf_get_error(obj);  	if (CHECK(err, "obj: %d", err)) @@ -4684,6 +4705,8 @@ skip:  	fprintf(stderr, "OK");  done: +	libbpf_set_strict_mode(LIBBPF_STRICT_ALL); +  	btf__free(btf);  	free(func_info);  	bpf_object__close(obj); @@ -6533,7 +6556,7 @@ done:  static void do_test_info_raw(unsigned int test_num)  {  	const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1]; -	unsigned int raw_btf_size, linfo_str_off, linfo_size; +	unsigned int raw_btf_size, linfo_str_off, linfo_size = 0;  	int btf_fd = -1, prog_fd = -1, err = 0;  	void *raw_btf, *patched_linfo = NULL;  	const char *ret_next_str; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 9e26903f9170..5fce7008d1ff 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -148,22 +148,38 @@ static void test_btf_dump_incremental(void)  	/* First, generate BTF corresponding to the following C code:  	 * -	 * enum { VAL = 1 }; +	 * enum x; +	 * +	 * enum x { X = 1 }; +	 * +	 * enum { Y = 1 }; +	 * +	 * struct s;  	 *  	 * struct s { int x; };  	 *  	 */ +	id = btf__add_enum(btf, "x", 4); +	ASSERT_EQ(id, 1, "enum_declaration_id"); +	id = btf__add_enum(btf, "x", 4); +	ASSERT_EQ(id, 2, "named_enum_id"); +	err = btf__add_enum_value(btf, "X", 1); +	ASSERT_OK(err, "named_enum_val_ok"); +  	id = btf__add_enum(btf, NULL, 4); -	ASSERT_EQ(id, 1, "enum_id"); -	err = btf__add_enum_value(btf, "VAL", 1); -	ASSERT_OK(err, "enum_val_ok"); +	ASSERT_EQ(id, 3, "anon_enum_id"); +	err = btf__add_enum_value(btf, "Y", 1); +	ASSERT_OK(err, "anon_enum_val_ok");  	id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); -	ASSERT_EQ(id, 2, "int_id"); +	ASSERT_EQ(id, 4, "int_id"); + +	id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT); +	ASSERT_EQ(id, 5, "fwd_id");  	id = btf__add_struct(btf, "s", 4); -	ASSERT_EQ(id, 3, "struct_id"); -	err = btf__add_field(btf, "x", 2, 0, 0); +	ASSERT_EQ(id, 6, "struct_id"); +	err = btf__add_field(btf, "x", 4, 0, 0);  	ASSERT_OK(err, "field_ok");  	for (i = 1; i < btf__type_cnt(btf); i++) { @@ -173,11 +189,20 @@ static void test_btf_dump_incremental(void)  	fflush(dump_buf_file);  	dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ +  	ASSERT_STREQ(dump_buf, +"enum x;\n" +"\n" +"enum x {\n" +"	X = 1,\n" +"};\n" +"\n"  "enum {\n" -"	VAL = 1,\n" +"	Y = 1,\n"  "};\n"  "\n" +"struct s;\n" +"\n"  "struct s {\n"  "	int x;\n"  "};\n\n", "c_dump1"); @@ -199,10 +224,12 @@ static void test_btf_dump_incremental(void)  	fseek(dump_buf_file, 0, SEEK_SET);  	id = btf__add_struct(btf, "s", 4); -	ASSERT_EQ(id, 4, "struct_id"); -	err = btf__add_field(btf, "x", 1, 0, 0); +	ASSERT_EQ(id, 7, "struct_id"); +	err = btf__add_field(btf, "x", 2, 0, 0); +	ASSERT_OK(err, "field_ok"); +	err = btf__add_field(btf, "y", 3, 32, 0);  	ASSERT_OK(err, "field_ok"); -	err = btf__add_field(btf, "s", 3, 32, 0); +	err = btf__add_field(btf, "s", 6, 64, 0);  	ASSERT_OK(err, "field_ok");  	for (i = 1; i < btf__type_cnt(btf); i++) { @@ -214,9 +241,10 @@ static void test_btf_dump_incremental(void)  	dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */  	ASSERT_STREQ(dump_buf,  "struct s___2 {\n" +"	enum x x;\n"  "	enum {\n" -"		VAL___2 = 1,\n" -"	} x;\n" +"		Y___2 = 1,\n" +"	} y;\n"  "	struct s s;\n"  "};\n\n" , "c_dump1"); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c index 88d63e23e35f..071430cd54de 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -1,19 +1,22 @@  // SPDX-License-Identifier: GPL-2.0  /* Copyright (c) 2021 Facebook */  #include <test_progs.h> -#include "btf_decl_tag.skel.h" +#include <bpf/btf.h> +#include "test_btf_decl_tag.skel.h"  /* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */  struct btf_type_tag_test {          int **p;  };  #include "btf_type_tag.skel.h" +#include "btf_type_tag_user.skel.h" +#include "btf_type_tag_percpu.skel.h"  static void test_btf_decl_tag(void)  { -	struct btf_decl_tag *skel; +	struct test_btf_decl_tag *skel; -	skel = btf_decl_tag__open_and_load(); +	skel = test_btf_decl_tag__open_and_load();  	if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))  		return; @@ -22,7 +25,7 @@ static void test_btf_decl_tag(void)  		test__skip();  	} -	btf_decl_tag__destroy(skel); +	test_btf_decl_tag__destroy(skel);  }  static void test_btf_type_tag(void) @@ -41,10 +44,206 @@ static void test_btf_type_tag(void)  	btf_type_tag__destroy(skel);  } +/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as + * module_btf, it will not load module btf. + * + * Returns 0 on success. + * Return -1 On error. In case of error, the loaded btf will be freed and the + * input parameters will be set to pointing to NULL. + */ +static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf, +		     bool needs_vmlinux_tag) +{ +	const char *module_name = "bpf_testmod"; +	__s32 type_id; + +	if (!env.has_testmod) { +		test__skip(); +		return -1; +	} + +	*vmlinux_btf = btf__load_vmlinux_btf(); +	if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF")) +		return -1; + +	if (!needs_vmlinux_tag) +		goto load_module_btf; + +	/* skip the test if the vmlinux does not have __user tags */ +	type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG); +	if (type_id <= 0) { +		printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__); +		test__skip(); +		goto free_vmlinux_btf; +	} + +load_module_btf: +	/* skip loading module_btf, if not requested by caller */ +	if (!module_btf) +		return 0; + +	*module_btf = btf__load_module_btf(module_name, *vmlinux_btf); +	if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF")) +		goto free_vmlinux_btf; + +	/* skip the test if the module does not have __user tags */ +	type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG); +	if (type_id <= 0) { +		printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name); +		test__skip(); +		goto free_module_btf; +	} + +	return 0; + +free_module_btf: +	btf__free(*module_btf); +free_vmlinux_btf: +	btf__free(*vmlinux_btf); + +	*vmlinux_btf = NULL; +	if (module_btf) +		*module_btf = NULL; +	return -1; +} + +static void test_btf_type_tag_mod_user(bool load_test_user1) +{ +	struct btf *vmlinux_btf = NULL, *module_btf = NULL; +	struct btf_type_tag_user *skel; +	int err; + +	if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) +		return; + +	skel = btf_type_tag_user__open(); +	if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) +		goto cleanup; + +	bpf_program__set_autoload(skel->progs.test_sys_getsockname, false); +	if (load_test_user1) +		bpf_program__set_autoload(skel->progs.test_user2, false); +	else +		bpf_program__set_autoload(skel->progs.test_user1, false); + +	err = btf_type_tag_user__load(skel); +	ASSERT_ERR(err, "btf_type_tag_user"); + +	btf_type_tag_user__destroy(skel); + +cleanup: +	btf__free(module_btf); +	btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_user(void) +{ +	struct btf_type_tag_user *skel; +	struct btf *vmlinux_btf = NULL; +	int err; + +	if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) +		return; + +	skel = btf_type_tag_user__open(); +	if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) +		goto cleanup; + +	bpf_program__set_autoload(skel->progs.test_user2, false); +	bpf_program__set_autoload(skel->progs.test_user1, false); + +	err = btf_type_tag_user__load(skel); +	ASSERT_ERR(err, "btf_type_tag_user"); + +	btf_type_tag_user__destroy(skel); + +cleanup: +	btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_mod_percpu(bool load_test_percpu1) +{ +	struct btf *vmlinux_btf, *module_btf; +	struct btf_type_tag_percpu *skel; +	int err; + +	if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) +		return; + +	skel = btf_type_tag_percpu__open(); +	if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) +		goto cleanup; + +	bpf_program__set_autoload(skel->progs.test_percpu_load, false); +	bpf_program__set_autoload(skel->progs.test_percpu_helper, false); +	if (load_test_percpu1) +		bpf_program__set_autoload(skel->progs.test_percpu2, false); +	else +		bpf_program__set_autoload(skel->progs.test_percpu1, false); + +	err = btf_type_tag_percpu__load(skel); +	ASSERT_ERR(err, "btf_type_tag_percpu"); + +	btf_type_tag_percpu__destroy(skel); + +cleanup: +	btf__free(module_btf); +	btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_percpu(bool load_test) +{ +	struct btf_type_tag_percpu *skel; +	struct btf *vmlinux_btf = NULL; +	int err; + +	if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) +		return; + +	skel = btf_type_tag_percpu__open(); +	if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) +		goto cleanup; + +	bpf_program__set_autoload(skel->progs.test_percpu2, false); +	bpf_program__set_autoload(skel->progs.test_percpu1, false); +	if (load_test) { +		bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + +		err = btf_type_tag_percpu__load(skel); +		ASSERT_ERR(err, "btf_type_tag_percpu_load"); +	} else { +		bpf_program__set_autoload(skel->progs.test_percpu_load, false); + +		err = btf_type_tag_percpu__load(skel); +		ASSERT_OK(err, "btf_type_tag_percpu_helper"); +	} + +	btf_type_tag_percpu__destroy(skel); + +cleanup: +	btf__free(vmlinux_btf); +} +  void test_btf_tag(void)  {  	if (test__start_subtest("btf_decl_tag"))  		test_btf_decl_tag();  	if (test__start_subtest("btf_type_tag"))  		test_btf_type_tag(); + +	if (test__start_subtest("btf_type_tag_user_mod1")) +		test_btf_type_tag_mod_user(true); +	if (test__start_subtest("btf_type_tag_user_mod2")) +		test_btf_type_tag_mod_user(false); +	if (test__start_subtest("btf_type_tag_sys_user_vmlinux")) +		test_btf_type_tag_vmlinux_user(); + +	if (test__start_subtest("btf_type_tag_percpu_mod1")) +		test_btf_type_tag_mod_percpu(true); +	if (test__start_subtest("btf_type_tag_percpu_mod2")) +		test_btf_type_tag_mod_percpu(false); +	if (test__start_subtest("btf_type_tag_percpu_vmlinux_load")) +		test_btf_type_tag_vmlinux_percpu(true); +	if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper")) +		test_btf_type_tag_vmlinux_percpu(false);  } diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 858916d11e2e..9367bd2f0ae1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -14,7 +14,7 @@ static int prog_load(void)  		BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */  		BPF_EXIT_INSN(),  	}; -	size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); +	size_t insns_cnt = ARRAY_SIZE(prog);  	return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,  			       prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index d3e8f729c623..db0b7bac78d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -63,7 +63,7 @@ static int prog_load_cnt(int verdict, int val)  		BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */  		BPF_EXIT_INSN(),  	}; -	size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); +	size_t insns_cnt = ARRAY_SIZE(prog);  	int ret;  	ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, @@ -194,14 +194,14 @@ void serial_test_cgroup_attach_multi(void)  	attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE;  	attach_opts.replace_prog_fd = allow_prog[0]; -	if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,  					 BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "fail_prog_replace_override", "unexpected success\n"))  		goto err;  	CHECK_FAIL(errno != EINVAL);  	attach_opts.flags = BPF_F_REPLACE; -	if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,  					 BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "fail_prog_replace_no_multi", "unexpected success\n"))  		goto err; @@ -209,7 +209,7 @@ void serial_test_cgroup_attach_multi(void)  	attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE;  	attach_opts.replace_prog_fd = -1; -	if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,  					 BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "fail_prog_replace_bad_fd", "unexpected success\n"))  		goto err; @@ -217,7 +217,7 @@ void serial_test_cgroup_attach_multi(void)  	/* replacing a program that is not attached to cgroup should fail  */  	attach_opts.replace_prog_fd = allow_prog[3]; -	if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,  					 BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "fail_prog_replace_no_ent", "unexpected success\n"))  		goto err; @@ -225,14 +225,14 @@ void serial_test_cgroup_attach_multi(void)  	/* replace 1st from the top program */  	attach_opts.replace_prog_fd = allow_prog[0]; -	if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,  					BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "prog_replace", "errno=%d\n", errno))  		goto err;  	/* replace program with itself */  	attach_opts.replace_prog_fd = allow_prog[6]; -	if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, +	if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,  					BPF_CGROUP_INET_EGRESS, &attach_opts),  		  "prog_replace", "errno=%d\n", errno))  		goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 356547e849e2..9421a5b7f4e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -16,7 +16,7 @@ static int prog_load(int verdict)  		BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */  		BPF_EXIT_INSN(),  	}; -	size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); +	size_t insns_cnt = ARRAY_SIZE(prog);  	return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,  			       prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c new file mode 100644 index 000000000000..0b47c3c000c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2021 Google LLC. + */ + +#include <test_progs.h> +#include <cgroup_helpers.h> +#include <network_helpers.h> + +#include "cgroup_getset_retval_setsockopt.skel.h" +#include "cgroup_getset_retval_getsockopt.skel.h" + +#define SOL_CUSTOM	0xdeadbeef + +static int zero; + +static void test_setsockopt_set(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_set_eunatch = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that sets EUNATCH, assert that +	 * we actually get that error when we run setsockopt() +	 */ +	link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eunatch); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_set_eunatch = NULL, *link_get_retval = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that sets EUNATCH, and one that gets the +	 * previously set errno. Assert that we get the same errno back. +	 */ +	link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) +		goto close_bpf_object; +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eunatch); +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_get_retval = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that gets the previously set errno. +	 * Assert that, without anything setting one, we get 0. +	 */ +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_OK(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				  &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_get_retval = NULL, *link_set_eunatch = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that gets the previously set errno, and then +	 * one that sets the errno to EUNATCH. Assert that the get does not +	 * see EUNATCH set later, and does not prevent EUNATCH from being set. +	 */ +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; +	link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_get_retval); +	bpf_link__destroy(link_set_eunatch); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_override(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_set_eunatch = NULL, *link_set_eisconn = NULL; +	struct bpf_link *link_get_retval = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, +	 * and then one that gets the exported errno. Assert both the syscall +	 * and the helper sees the last set errno. +	 */ +	link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) +		goto close_bpf_object; +	link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) +		goto close_bpf_object; +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EISCONN, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, -EISCONN, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eunatch); +	bpf_link__destroy(link_set_eisconn); +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_legacy_eperm = NULL, *link_get_retval = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that return a reject without setting errno +	 * (legacy reject), and one that gets the errno. Assert that for +	 * backward compatibility the syscall result in EPERM, and this +	 * is also visible to the helper. +	 */ +	link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, +						       cgroup_fd); +	if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) +		goto close_bpf_object; +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EPERM, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, -EPERM, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_legacy_eperm); +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_setsockopt *obj; +	struct bpf_link *link_set_eunatch = NULL, *link_legacy_eperm = NULL; +	struct bpf_link *link_get_retval = NULL; + +	obj = cgroup_getset_retval_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach setsockopt that sets EUNATCH, then one that return a reject +	 * without setting errno, and then one that gets the exported errno. +	 * Assert both the syscall and the helper's errno are unaffected by +	 * the second prog (i.e. legacy rejects does not override the errno +	 * to EPERM). +	 */ +	link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) +		goto close_bpf_object; +	link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, +						       cgroup_fd); +	if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) +		goto close_bpf_object; +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, +				   &zero, sizeof(int)), "setsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eunatch); +	bpf_link__destroy(link_legacy_eperm); +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_getsockopt_get(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_getsockopt *obj; +	struct bpf_link *link_get_retval = NULL; +	int buf; +	socklen_t optlen = sizeof(buf); + +	obj = cgroup_getset_retval_getsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach getsockopt that gets previously set errno. Assert that the +	 * error from kernel is in both ctx_retval_value and retval_value. +	 */ +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, +				   &buf, &optlen), "getsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EOPNOTSUPP, "getsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, -EOPNOTSUPP, "retval_value")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->ctx_retval_value, -EOPNOTSUPP, "ctx_retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_override(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_getsockopt *obj; +	struct bpf_link *link_set_eisconn = NULL; +	int buf; +	socklen_t optlen = sizeof(buf); + +	obj = cgroup_getset_retval_getsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach getsockopt that sets retval to -EISCONN. Assert that this +	 * overrides the value from kernel. +	 */ +	link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) +		goto close_bpf_object; + +	if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, +				   &buf, &optlen), "getsockopt")) +		goto close_bpf_object; +	if (!ASSERT_EQ(errno, EISCONN, "getsockopt-errno")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eisconn); + +	cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) +{ +	struct cgroup_getset_retval_getsockopt *obj; +	struct bpf_link *link_set_eisconn = NULL, *link_clear_retval = NULL; +	struct bpf_link *link_get_retval = NULL; +	int buf; +	socklen_t optlen = sizeof(buf); + +	obj = cgroup_getset_retval_getsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(obj, "skel-load")) +		return; + +	/* Attach getsockopt that sets retval to -EISCONN, and one that clears +	 * ctx retval. Assert that the clearing ctx retval is synced to helper +	 * and clears any errors both from kernel and BPF.. +	 */ +	link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, +						      cgroup_fd); +	if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) +		goto close_bpf_object; +	link_clear_retval = bpf_program__attach_cgroup(obj->progs.clear_retval, +						       cgroup_fd); +	if (!ASSERT_OK_PTR(link_clear_retval, "cg-attach-clear_retval")) +		goto close_bpf_object; +	link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, +						     cgroup_fd); +	if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) +		goto close_bpf_object; + +	if (!ASSERT_OK(getsockopt(sock_fd, SOL_CUSTOM, 0, +				  &buf, &optlen), "getsockopt")) +		goto close_bpf_object; + +	if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) +		goto close_bpf_object; +	if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) +		goto close_bpf_object; +	if (!ASSERT_EQ(obj->bss->ctx_retval_value, 0, "ctx_retval_value")) +		goto close_bpf_object; + +close_bpf_object: +	bpf_link__destroy(link_set_eisconn); +	bpf_link__destroy(link_clear_retval); +	bpf_link__destroy(link_get_retval); + +	cgroup_getset_retval_getsockopt__destroy(obj); +} + +void test_cgroup_getset_retval(void) +{ +	int cgroup_fd = -1; +	int sock_fd = -1; + +	cgroup_fd = test__join_cgroup("/cgroup_getset_retval"); +	if (!ASSERT_GE(cgroup_fd, 0, "cg-create")) +		goto close_fd; + +	sock_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0); +	if (!ASSERT_GE(sock_fd, 0, "start-server")) +		goto close_fd; + +	if (test__start_subtest("setsockopt-set")) +		test_setsockopt_set(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-set_and_get")) +		test_setsockopt_set_and_get(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-default_zero")) +		test_setsockopt_default_zero(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-default_zero_and_set")) +		test_setsockopt_default_zero_and_set(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-override")) +		test_setsockopt_override(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-legacy_eperm")) +		test_setsockopt_legacy_eperm(cgroup_fd, sock_fd); + +	if (test__start_subtest("setsockopt-legacy_no_override")) +		test_setsockopt_legacy_no_override(cgroup_fd, sock_fd); + +	if (test__start_subtest("getsockopt-get")) +		test_getsockopt_get(cgroup_fd, sock_fd); + +	if (test__start_subtest("getsockopt-override")) +		test_getsockopt_override(cgroup_fd, sock_fd); + +	if (test__start_subtest("getsockopt-retval_sync")) +		test_getsockopt_retval_sync(cgroup_fd, sock_fd); + +close_fd: +	close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index f73e6e36b74d..12f4395f18b3 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -79,28 +79,21 @@ static void test_check_mtu_run_xdp(struct test_check_mtu *skel,  				   struct bpf_program *prog,  				   __u32 mtu_expect)  { -	const char *prog_name = bpf_program__name(prog);  	int retval_expect = XDP_PASS;  	__u32 mtu_result = 0;  	char buf[256] = {}; -	int err; -	struct bpf_prog_test_run_attr tattr = { +	int err, prog_fd = bpf_program__fd(prog); +	LIBBPF_OPTS(bpf_test_run_opts, topts,  		.repeat = 1,  		.data_in = &pkt_v4,  		.data_size_in = sizeof(pkt_v4),  		.data_out = buf,  		.data_size_out = sizeof(buf), -		.prog_fd = bpf_program__fd(prog), -	}; - -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err != 0, "bpf_prog_test_run", -		   "prog_name:%s (err %d errno %d retval %d)\n", -		   prog_name, err, errno, tattr.retval); +	); -	CHECK(tattr.retval != retval_expect, "retval", -	      "progname:%s unexpected retval=%d expected=%d\n", -	      prog_name, tattr.retval, retval_expect); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, retval_expect, "retval");  	/* Extract MTU that BPF-prog got */  	mtu_result = skel->bss->global_bpf_mtu_xdp; @@ -139,28 +132,21 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel,  				  struct bpf_program *prog,  				  __u32 mtu_expect)  { -	const char *prog_name = bpf_program__name(prog);  	int retval_expect = BPF_OK;  	__u32 mtu_result = 0;  	char buf[256] = {}; -	int err; -	struct bpf_prog_test_run_attr tattr = { -		.repeat = 1, +	int err, prog_fd = bpf_program__fd(prog); +	LIBBPF_OPTS(bpf_test_run_opts, topts,  		.data_in = &pkt_v4,  		.data_size_in = sizeof(pkt_v4),  		.data_out = buf,  		.data_size_out = sizeof(buf), -		.prog_fd = bpf_program__fd(prog), -	}; - -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err != 0, "bpf_prog_test_run", -		   "prog_name:%s (err %d errno %d retval %d)\n", -		   prog_name, err, errno, tattr.retval); +		.repeat = 1, +	); -	CHECK(tattr.retval != retval_expect, "retval", -	      "progname:%s unexpected retval=%d expected=%d\n", -	      prog_name, tattr.retval, retval_expect); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, retval_expect, "retval");  	/* Extract MTU that BPF-prog got */  	mtu_result = skel->bss->global_bpf_mtu_tc; diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c index e075d03ab630..224f016b0a53 100644 --- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c @@ -161,7 +161,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)  	}  } -static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr) +static bool was_decapsulated(struct bpf_test_run_opts *tattr)  {  	return tattr->data_size_out < tattr->data_size_in;  } @@ -367,12 +367,12 @@ static void close_fds(int *fds, int n)  static void test_cls_redirect_common(struct bpf_program *prog)  { -	struct bpf_prog_test_run_attr tattr = {}; +	LIBBPF_OPTS(bpf_test_run_opts, tattr);  	int families[] = { AF_INET, AF_INET6 };  	struct sockaddr_storage ss;  	struct sockaddr *addr;  	socklen_t slen; -	int i, j, err; +	int i, j, err, prog_fd;  	int servers[__NR_KIND][ARRAY_SIZE(families)] = {};  	int conns[__NR_KIND][ARRAY_SIZE(families)] = {};  	struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)]; @@ -394,7 +394,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)  			goto cleanup;  	} -	tattr.prog_fd = bpf_program__fd(prog); +	prog_fd = bpf_program__fd(prog);  	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		struct test_cfg *test = &tests[i]; @@ -415,7 +415,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)  			if (CHECK_FAIL(!tattr.data_size_in))  				continue; -			err = bpf_prog_test_run_xattr(&tattr); +			err = bpf_prog_test_run_opts(prog_fd, &tattr);  			if (CHECK_FAIL(err))  				continue; diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c index 561c5185d886..6a5a1c019a5d 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_kern.c +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -7,8 +7,22 @@  void test_core_kern_lskel(void)  {  	struct core_kern_lskel *skel; +	int link_fd;  	skel = core_kern_lskel__open_and_load(); -	ASSERT_OK_PTR(skel, "open_and_load"); +	if (!ASSERT_OK_PTR(skel, "open_and_load")) +		return; + +	link_fd = core_kern_lskel__core_relo_proto__attach(skel); +	if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)")) +		goto cleanup; + +	/* trigger tracepoints */ +	usleep(1); +	ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists"); +	ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists"); +	ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested"); + +cleanup:  	core_kern_lskel__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c new file mode 100644 index 000000000000..04cc145bc26a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "test_progs.h" +#include "core_kern_overflow.lskel.h" + +void test_core_kern_overflow_lskel(void) +{ +	struct core_kern_overflow_lskel *skel; + +	skel = core_kern_overflow_lskel__open_and_load(); +	if (!ASSERT_NULL(skel, "open_and_load")) +		core_kern_overflow_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index b8bdd1c3efca..f28f75aa9154 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -2,6 +2,7 @@  #include <test_progs.h>  #include "progs/core_reloc_types.h"  #include "bpf_testmod/bpf_testmod.h" +#include <linux/limits.h>  #include <sys/mman.h>  #include <sys/syscall.h>  #include <bpf/btf.h> @@ -511,7 +512,7 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test)  } -static struct core_reloc_test_case test_cases[] = { +static const struct core_reloc_test_case test_cases[] = {  	/* validate we can find kernel image and use its BTF for relocs */  	{  		.case_name = "kernel", @@ -836,13 +837,27 @@ static size_t roundup_page(size_t sz)  	return (sz + page_size - 1) / page_size * page_size;  } -void test_core_reloc(void) +static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objpath) +{ +	char command[4096]; +	int n; + +	n = snprintf(command, sizeof(command), +		     "./bpftool gen min_core_btf %s %s %s", +		     src_btf, dst_btf, objpath); +	if (n < 0 || n >= sizeof(command)) +		return -1; + +	return system(command); +} + +static void run_core_reloc_tests(bool use_btfgen)  {  	const size_t mmap_sz = roundup_page(sizeof(struct data));  	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); -	struct core_reloc_test_case *test_case; +	struct core_reloc_test_case *test_case, test_case_copy;  	const char *tp_name, *probe_name; -	int err, i, equal; +	int err, i, equal, fd;  	struct bpf_link *link = NULL;  	struct bpf_map *data_map;  	struct bpf_program *prog; @@ -854,7 +869,11 @@ void test_core_reloc(void)  	my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);  	for (i = 0; i < ARRAY_SIZE(test_cases); i++) { -		test_case = &test_cases[i]; +		char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; + +		test_case_copy = test_cases[i]; +		test_case = &test_case_copy; +  		if (!test__start_subtest(test_case->case_name))  			continue; @@ -863,6 +882,26 @@ void test_core_reloc(void)  			continue;  		} +		/* generate a "minimal" BTF file and use it as source */ +		if (use_btfgen) { + +			if (!test_case->btf_src_file || test_case->fails) { +				test__skip(); +				continue; +			} + +			fd = mkstemp(btf_file); +			if (!ASSERT_GE(fd, 0, "btf_tmp")) +				continue; +			close(fd); /* we only need the path */ +			err = run_btfgen(test_case->btf_src_file, btf_file, +					 test_case->bpf_obj_file); +			if (!ASSERT_OK(err, "run_btfgen")) +				continue; + +			test_case->btf_src_file = btf_file; +		} +  		if (test_case->setup) {  			err = test_case->setup(test_case);  			if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err)) @@ -872,7 +911,7 @@ void test_core_reloc(void)  		if (test_case->btf_src_file) {  			err = access(test_case->btf_src_file, R_OK);  			if (!ASSERT_OK(err, "btf_src_file")) -				goto cleanup; +				continue;  		}  		open_opts.btf_custom_path = test_case->btf_src_file; @@ -954,8 +993,20 @@ cleanup:  			CHECK_FAIL(munmap(mmap_data, mmap_sz));  			mmap_data = NULL;  		} +		if (use_btfgen) +			remove(test_case->btf_src_file);  		bpf_link__destroy(link);  		link = NULL;  		bpf_object__close(obj);  	}  } + +void test_core_reloc(void) +{ +	run_core_reloc_tests(false); +} + +void test_core_reloc_btfgen(void) +{ +	run_core_reloc_tests(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c new file mode 100644 index 000000000000..b2dfc5954aea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "test_custom_sec_handlers.skel.h" + +#define COOKIE_ABC1 1 +#define COOKIE_ABC2 2 +#define COOKIE_CUSTOM 3 +#define COOKIE_FALLBACK 4 +#define COOKIE_KPROBE 5 + +static int custom_setup_prog(struct bpf_program *prog, long cookie) +{ +	if (cookie == COOKIE_ABC1) +		bpf_program__set_autoload(prog, false); + +	return 0; +} + +static int custom_prepare_load_prog(struct bpf_program *prog, +				    struct bpf_prog_load_opts *opts, long cookie) +{ +	if (cookie == COOKIE_FALLBACK) +		opts->prog_flags |= BPF_F_SLEEPABLE; +	else if (cookie == COOKIE_ABC1) +		ASSERT_FALSE(true, "unexpected preload for abc"); + +	return 0; +} + +static int custom_attach_prog(const struct bpf_program *prog, long cookie, +			      struct bpf_link **link) +{ +	switch (cookie) { +	case COOKIE_ABC2: +		*link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); +		return libbpf_get_error(*link); +	case COOKIE_CUSTOM: +		*link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep"); +		return libbpf_get_error(*link); +	case COOKIE_KPROBE: +	case COOKIE_FALLBACK: +		/* no auto-attach for SEC("xyz") and SEC("kprobe") */ +		*link = NULL; +		return 0; +	default: +		ASSERT_FALSE(true, "unexpected cookie"); +		return -EINVAL; +	} +} + +static int abc1_id; +static int abc2_id; +static int custom_id; +static int fallback_id; +static int kprobe_id; + +__attribute__((constructor)) +static void register_sec_handlers(void) +{ +	LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts, +		.cookie = COOKIE_ABC1, +		.prog_setup_fn = custom_setup_prog, +		.prog_prepare_load_fn = custom_prepare_load_prog, +		.prog_attach_fn = NULL, +	); +	LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts, +		.cookie = COOKIE_ABC2, +		.prog_setup_fn = custom_setup_prog, +		.prog_prepare_load_fn = custom_prepare_load_prog, +		.prog_attach_fn = custom_attach_prog, +	); +	LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts, +		.cookie = COOKIE_CUSTOM, +		.prog_setup_fn = NULL, +		.prog_prepare_load_fn = NULL, +		.prog_attach_fn = custom_attach_prog, +	); + +	abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts); +	abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts); +	custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts); +} + +__attribute__((destructor)) +static void unregister_sec_handlers(void) +{ +	libbpf_unregister_prog_handler(abc1_id); +	libbpf_unregister_prog_handler(abc2_id); +	libbpf_unregister_prog_handler(custom_id); +} + +void test_custom_sec_handlers(void) +{ +	LIBBPF_OPTS(libbpf_prog_handler_opts, opts, +		.prog_setup_fn = custom_setup_prog, +		.prog_prepare_load_fn = custom_prepare_load_prog, +		.prog_attach_fn = custom_attach_prog, +	); +	struct test_custom_sec_handlers* skel; +	int err; + +	ASSERT_GT(abc1_id, 0, "abc1_id"); +	ASSERT_GT(abc2_id, 0, "abc2_id"); +	ASSERT_GT(custom_id, 0, "custom_id"); + +	/* override libbpf's handle of SEC("kprobe/...") but also allow pure +	 * SEC("kprobe") due to "kprobe+" specifier. Register it as +	 * TRACEPOINT, just for fun. +	 */ +	opts.cookie = COOKIE_KPROBE; +	kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts); +	/* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test +	 * setting custom BPF_F_SLEEPABLE bit in preload handler +	 */ +	opts.cookie = COOKIE_FALLBACK; +	fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts); + +	if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) { +		if (fallback_id > 0) +			libbpf_unregister_prog_handler(fallback_id); +		if (kprobe_id > 0) +			libbpf_unregister_prog_handler(kprobe_id); +		return; +	} + +	/* open skeleton and validate assumptions */ +	skel = test_custom_sec_handlers__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type"); +	ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload"); + +	ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type"); +	ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type"); +	ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type"); +	ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type"); +	ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type"); + +	skel->rodata->my_pid = getpid(); + +	/* now attempt to load everything */ +	err = test_custom_sec_handlers__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	/* now try to auto-attach everything */ +	err = test_custom_sec_handlers__attach(skel); +	if (!ASSERT_OK(err, "skel_attach")) +		goto cleanup; + +	skel->links.xyz = bpf_program__attach(skel->progs.kprobe1); +	ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err"); +	ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach"); + +	/* trigger programs */ +	usleep(1); + +	/* SEC("abc") is set to not auto-loaded */ +	ASSERT_FALSE(skel->bss->abc1_called, "abc1_called"); +	ASSERT_TRUE(skel->bss->abc2_called, "abc2_called"); +	ASSERT_TRUE(skel->bss->custom1_called, "custom1_called"); +	ASSERT_TRUE(skel->bss->custom2_called, "custom2_called"); +	/* SEC("kprobe") shouldn't be auto-attached */ +	ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called"); +	/* SEC("xyz") shouldn't be auto-attached */ +	ASSERT_FALSE(skel->bss->xyz_called, "xyz_called"); + +cleanup: +	test_custom_sec_handlers__destroy(skel); + +	ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback"); +	ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index cbaa44ffb8c6..c11832657d2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -2,6 +2,7 @@  /* Copyright (C) 2021. Huawei Technologies Co., Ltd */  #include <test_progs.h>  #include "dummy_st_ops.skel.h" +#include "trace_dummy_st_ops.skel.h"  /* Need to keep consistent with definition in include/linux/bpf.h */  struct bpf_dummy_ops_state { @@ -26,10 +27,10 @@ static void test_dummy_st_ops_attach(void)  static void test_dummy_init_ret_value(void)  {  	__u64 args[1] = {0}; -	struct bpf_prog_test_run_attr attr = { -		.ctx_size_in = sizeof(args), +	LIBBPF_OPTS(bpf_test_run_opts, attr,  		.ctx_in = args, -	}; +		.ctx_size_in = sizeof(args), +	);  	struct dummy_st_ops *skel;  	int fd, err; @@ -38,8 +39,7 @@ static void test_dummy_init_ret_value(void)  		return;  	fd = bpf_program__fd(skel->progs.test_1); -	attr.prog_fd = fd; -	err = bpf_prog_test_run_xattr(&attr); +	err = bpf_prog_test_run_opts(fd, &attr);  	ASSERT_OK(err, "test_run");  	ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); @@ -53,10 +53,11 @@ static void test_dummy_init_ptr_arg(void)  		.val = exp_retval,  	};  	__u64 args[1] = {(unsigned long)&in_state}; -	struct bpf_prog_test_run_attr attr = { -		.ctx_size_in = sizeof(args), +	LIBBPF_OPTS(bpf_test_run_opts, attr,  		.ctx_in = args, -	}; +		.ctx_size_in = sizeof(args), +	); +	struct trace_dummy_st_ops *trace_skel;  	struct dummy_st_ops *skel;  	int fd, err; @@ -65,22 +66,42 @@ static void test_dummy_init_ptr_arg(void)  		return;  	fd = bpf_program__fd(skel->progs.test_1); -	attr.prog_fd = fd; -	err = bpf_prog_test_run_xattr(&attr); + +	trace_skel = trace_dummy_st_ops__open(); +	if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open")) +		goto done; + +	err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1, +					     fd, "test_1"); +	if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)")) +		goto done; + +	err = trace_dummy_st_ops__load(trace_skel); +	if (!ASSERT_OK(err, "load(trace_skel)")) +		goto done; + +	err = trace_dummy_st_ops__attach(trace_skel); +	if (!ASSERT_OK(err, "attach(trace_skel)")) +		goto done; + +	err = bpf_prog_test_run_opts(fd, &attr);  	ASSERT_OK(err, "test_run");  	ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");  	ASSERT_EQ(attr.retval, exp_retval, "test_ret"); +	ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val"); +done:  	dummy_st_ops__destroy(skel); +	trace_dummy_st_ops__destroy(trace_skel);  }  static void test_dummy_multiple_args(void)  {  	__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; -	struct bpf_prog_test_run_attr attr = { -		.ctx_size_in = sizeof(args), +	LIBBPF_OPTS(bpf_test_run_opts, attr,  		.ctx_in = args, -	}; +		.ctx_size_in = sizeof(args), +	);  	struct dummy_st_ops *skel;  	int fd, err;  	size_t i; @@ -91,8 +112,7 @@ static void test_dummy_multiple_args(void)  		return;  	fd = bpf_program__fd(skel->progs.test_2); -	attr.prog_fd = fd; -	err = bpf_prog_test_run_xattr(&attr); +	err = bpf_prog_test_run_opts(fd, &attr);  	ASSERT_OK(err, "test_run");  	for (i = 0; i < ARRAY_SIZE(args); i++) {  		snprintf(name, sizeof(name), "arg %zu", i); diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 4374ac8a8a91..130f5b82d2e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -9,38 +9,34 @@ void test_fentry_fexit(void)  	struct fentry_test_lskel *fentry_skel = NULL;  	struct fexit_test_lskel *fexit_skel = NULL;  	__u64 *fentry_res, *fexit_res; -	__u32 duration = 0, retval;  	int err, prog_fd, i; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	fentry_skel = fentry_test_lskel__open_and_load(); -	if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) +	if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))  		goto close_prog;  	fexit_skel = fexit_test_lskel__open_and_load(); -	if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) +	if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))  		goto close_prog;  	err = fentry_test_lskel__attach(fentry_skel); -	if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "fentry_attach"))  		goto close_prog;  	err = fexit_test_lskel__attach(fexit_skel); -	if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "fexit_attach"))  		goto close_prog;  	prog_fd = fexit_skel->progs.test1.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "ipv6", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv6 test_run"); +	ASSERT_OK(topts.retval, "ipv6 test retval");  	fentry_res = (__u64 *)fentry_skel->bss;  	fexit_res = (__u64 *)fexit_skel->bss;  	printf("%lld\n", fentry_skel->bss->test1_result);  	for (i = 0; i < 8; i++) { -		CHECK(fentry_res[i] != 1, "result", -		      "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); -		CHECK(fexit_res[i] != 1, "result", -		      "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]); +		ASSERT_EQ(fentry_res[i], 1, "fentry result"); +		ASSERT_EQ(fexit_res[i], 1, "fexit result");  	}  close_prog: diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 12921b3850d2..c0d1d61d5f66 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -6,9 +6,9 @@  static int fentry_test(struct fentry_test_lskel *fentry_skel)  {  	int err, prog_fd, i; -	__u32 duration = 0, retval;  	int link_fd;  	__u64 *result; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	err = fentry_test_lskel__attach(fentry_skel);  	if (!ASSERT_OK(err, "fentry_attach")) @@ -20,10 +20,9 @@ static int fentry_test(struct fentry_test_lskel *fentry_skel)  		return -1;  	prog_fd = fentry_skel->progs.test1.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	result = (__u64 *)fentry_skel->bss;  	for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index c52f99f6a909..d9aad15e0d24 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -58,12 +58,17 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,  				      test_cb cb)  {  	struct bpf_object *obj = NULL, *tgt_obj; -	__u32 retval, tgt_prog_id, info_len; +	__u32 tgt_prog_id, info_len;  	struct bpf_prog_info prog_info = {};  	struct bpf_program **prog = NULL, *p;  	struct bpf_link **link = NULL;  	int err, tgt_fd, i;  	struct btf *btf; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v6, +		.data_size_in = sizeof(pkt_v6), +		.repeat = 1, +	);  	err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,  			    &tgt_obj, &tgt_fd); @@ -132,7 +137,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,  					     &link_info, &info_len);  		ASSERT_OK(err, "link_fd_get_info");  		ASSERT_EQ(link_info.tracing.attach_type, -			  bpf_program__get_expected_attach_type(prog[i]), +			  bpf_program__expected_attach_type(prog[i]),  			  "link_attach_type");  		ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id");  		ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id"); @@ -147,10 +152,9 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,  	if (!run_prog)  		goto close_prog; -	err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6), -				NULL, NULL, &retval, NULL); +	err = bpf_prog_test_run_opts(tgt_fd, &topts);  	ASSERT_OK(err, "prog_run"); -	ASSERT_EQ(retval, 0, "prog_run_ret"); +	ASSERT_EQ(topts.retval, 0, "prog_run_ret");  	if (check_data_map(obj, prog_cnt, false))  		goto close_prog; @@ -225,29 +229,31 @@ static int test_second_attach(struct bpf_object *obj)  	const char *tgt_obj_file = "./test_pkt_access.o";  	struct bpf_program *prog = NULL;  	struct bpf_object *tgt_obj; -	__u32 duration = 0, retval;  	struct bpf_link *link;  	int err = 0, tgt_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v6, +		.data_size_in = sizeof(pkt_v6), +		.repeat = 1, +	);  	prog = bpf_object__find_program_by_name(obj, prog_name); -	if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name)) +	if (!ASSERT_OK_PTR(prog, "find_prog"))  		return -ENOENT;  	err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,  			    &tgt_obj, &tgt_fd); -	if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n", -		  tgt_obj_file, err, errno)) +	if (!ASSERT_OK(err, "second_prog_load"))  		return err;  	link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name);  	if (!ASSERT_OK_PTR(link, "second_link"))  		goto out; -	err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6), -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval, "ipv6", -		  "err %d errno %d retval %d duration %d\n", -		  err, errno, retval, duration)) +	err = bpf_prog_test_run_opts(tgt_fd, &topts); +	if (!ASSERT_OK(err, "ipv6 test_run")) +		goto out; +	if (!ASSERT_OK(topts.retval, "ipv6 retval"))  		goto out;  	err = check_data_map(obj, 1, true); diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index e4cede6b4b2d..3ee2107bbf7a 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -10,9 +10,7 @@ void test_fexit_stress(void)  	char test_skb[128] = {};  	int fexit_fd[CNT] = {};  	int link_fd[CNT] = {}; -	__u32 duration = 0;  	char error[4096]; -	__u32 prog_ret;  	int err, i, filter_fd;  	const struct bpf_insn trace_program[] = { @@ -36,9 +34,15 @@ void test_fexit_stress(void)  		.log_size = sizeof(error),  	); +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = test_skb, +		.data_size_in = sizeof(test_skb), +		.repeat = 1, +	); +  	err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",  					 trace_opts.expected_attach_type); -	if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) +	if (!ASSERT_GT(err, 0, "find_vmlinux_btf_id"))  		goto out;  	trace_opts.attach_btf_id = err; @@ -47,24 +51,20 @@ void test_fexit_stress(void)  					    trace_program,  					    sizeof(trace_program) / sizeof(struct bpf_insn),  					    &trace_opts); -		if (CHECK(fexit_fd[i] < 0, "fexit loaded", -			  "failed: %d errno %d\n", fexit_fd[i], errno)) +		if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))  			goto out;  		link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); -		if (CHECK(link_fd[i] < 0, "fexit attach failed", -			  "prog %d failed: %d err %d\n", i, link_fd[i], errno)) +		if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))  			goto out;  	}  	filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",  				  skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),  				  &skb_opts); -	if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", -		  filter_fd, errno)) +	if (!ASSERT_GE(filter_fd, 0, "test_program_loaded"))  		goto out; -	err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, -				0, &prog_ret, 0); +	err = bpf_prog_test_run_opts(filter_fd, &topts);  	close(filter_fd);  	CHECK_FAIL(err);  out: diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index d4887d8bb396..101b7343036b 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -6,9 +6,9 @@  static int fexit_test(struct fexit_test_lskel *fexit_skel)  {  	int err, prog_fd, i; -	__u32 duration = 0, retval;  	int link_fd;  	__u64 *result; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	err = fexit_test_lskel__attach(fexit_skel);  	if (!ASSERT_OK(err, "fexit_attach")) @@ -20,10 +20,9 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel)  		return -1;  	prog_fd = fexit_skel->progs.test1.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	result = (__u64 *)fexit_skel->bss;  	for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c index b74b3c0c555a..5165b38f0e59 100644 --- a/tools/testing/selftests/bpf/prog_tests/find_vma.c +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -7,12 +7,14 @@  #include "find_vma_fail1.skel.h"  #include "find_vma_fail2.skel.h" -static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret) +static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test)  { -	ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); -	ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); -	ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); -	ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); +	if (need_test) { +		ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); +		ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); +		ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); +		ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); +	}  	skel->bss->found_vm_exec = 0;  	skel->data->find_addr_ret = -1; @@ -30,17 +32,26 @@ static int open_pe(void)  	attr.type = PERF_TYPE_HARDWARE;  	attr.config = PERF_COUNT_HW_CPU_CYCLES;  	attr.freq = 1; -	attr.sample_freq = 4000; +	attr.sample_freq = 1000;  	pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);  	return pfd >= 0 ? pfd : -errno;  } +static bool find_vma_pe_condition(struct find_vma *skel) +{ +	return skel->bss->found_vm_exec == 0 || +		skel->data->find_addr_ret != 0 || +		skel->data->find_zero_ret == -1 || +		strcmp(skel->bss->d_iname, "test_progs") != 0; +} +  static void test_find_vma_pe(struct find_vma *skel)  {  	struct bpf_link *link = NULL;  	volatile int j = 0;  	int pfd, i; +	const int one_bn = 1000000000;  	pfd = open_pe();  	if (pfd < 0) { @@ -57,10 +68,10 @@ static void test_find_vma_pe(struct find_vma *skel)  	if (!ASSERT_OK_PTR(link, "attach_perf_event"))  		goto cleanup; -	for (i = 0; i < 1000000; ++i) +	for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i)  		++j; -	test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */); +	test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn);  cleanup:  	bpf_link__destroy(link);  	close(pfd); @@ -75,7 +86,7 @@ static void test_find_vma_kprobe(struct find_vma *skel)  		return;  	getpgid(skel->bss->target_pid); -	test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */); +	test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true);  }  static void test_illegal_write_vma(void) @@ -108,7 +119,6 @@ void serial_test_find_vma(void)  	skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;  	test_find_vma_pe(skel); -	usleep(100000); /* allow the irq_work to finish */  	test_find_vma_kprobe(skel);  	find_vma__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index ac54e3f91d42..0c1661ea996e 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -13,8 +13,9 @@  #endif  #define CHECK_FLOW_KEYS(desc, got, expected)				\ -	CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,		\ +	_CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\  	      desc,							\ +	      topts.duration,						\  	      "nhoff=%u/%u "						\  	      "thoff=%u/%u "						\  	      "addr_proto=0x%x/0x%x "					\ @@ -457,7 +458,7 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)  	if (map_fd < 0)  		return -1; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -487,7 +488,7 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)  		/* Keep in sync with 'flags' from eth_get_headlen. */  		__u32 eth_get_headlen_flags =  			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; -		struct bpf_prog_test_run_attr tattr = {}; +		LIBBPF_OPTS(bpf_test_run_opts, topts);  		struct bpf_flow_keys flow_keys = {};  		__u32 key = (__u32)(tests[i].keys.sport) << 16 |  			    tests[i].keys.dport; @@ -503,13 +504,12 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)  		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);  		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); -		CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); +		ASSERT_OK(err, "bpf_map_lookup_elem"); -		CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);  		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);  		err = bpf_map_delete_elem(keys_fd, &key); -		CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); +		ASSERT_OK(err, "bpf_map_delete_elem");  	}  } @@ -573,27 +573,24 @@ void test_flow_dissector(void)  	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		struct bpf_flow_keys flow_keys; -		struct bpf_prog_test_run_attr tattr = { -			.prog_fd = prog_fd, +		LIBBPF_OPTS(bpf_test_run_opts, topts,  			.data_in = &tests[i].pkt,  			.data_size_in = sizeof(tests[i].pkt),  			.data_out = &flow_keys, -		}; +		);  		static struct bpf_flow_keys ctx = {};  		if (tests[i].flags) { -			tattr.ctx_in = &ctx; -			tattr.ctx_size_in = sizeof(ctx); +			topts.ctx_in = &ctx; +			topts.ctx_size_in = sizeof(ctx);  			ctx.flags = tests[i].flags;  		} -		err = bpf_prog_test_run_xattr(&tattr); -		CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || -			   err || tattr.retval != 1, -			   tests[i].name, -			   "err %d errno %d retval %d duration %d size %u/%zu\n", -			   err, errno, tattr.retval, tattr.duration, -			   tattr.data_size_out, sizeof(flow_keys)); +		err = bpf_prog_test_run_opts(prog_fd, &topts); +		ASSERT_OK(err, "test_run"); +		ASSERT_EQ(topts.retval, 1, "test_run retval"); +		ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), +			  "test_run data_size_out");  		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);  	} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 93ac3f28226c..36afb409c25f 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -5,7 +5,6 @@  void serial_test_flow_dissector_load_bytes(void)  {  	struct bpf_flow_keys flow_keys; -	__u32 duration = 0, retval, size;  	struct bpf_insn prog[] = {  		// BPF_REG_1 - 1st argument: context  		// BPF_REG_2 - 2nd argument: offset, start at first byte @@ -27,22 +26,25 @@ void serial_test_flow_dissector_load_bytes(void)  		BPF_EXIT_INSN(),  	};  	int fd, err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = &flow_keys, +		.data_size_out = sizeof(flow_keys), +		.repeat = 1, +	);  	/* make sure bpf_skb_load_bytes is not allowed from skb-less context  	 */  	fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,  			      ARRAY_SIZE(prog), "GPL", 0, NULL, 0); -	CHECK(fd < 0, -	      "flow_dissector-bpf_skb_load_bytes-load", -	      "fd %d errno %d\n", -	      fd, errno); +	ASSERT_GE(fd, 0, "bpf_test_load_program good fd"); -	err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4), -				&flow_keys, &size, &retval, &duration); -	CHECK(size != sizeof(flow_keys) || err || retval != 1, -	      "flow_dissector-bpf_skb_load_bytes", -	      "err %d errno %d retval %d duration %d size %u/%zu\n", -	      err, errno, retval, duration, size, sizeof(flow_keys)); +	err = bpf_prog_test_run_opts(fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), +		  "test_run data_size_out"); +	ASSERT_EQ(topts.retval, 1, "test_run retval");  	if (fd >= -1)  		close(fd); diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c index 68eb12a287d4..044df13ee069 100644 --- a/tools/testing/selftests/bpf/prog_tests/for_each.c +++ b/tools/testing/selftests/bpf/prog_tests/for_each.c @@ -12,8 +12,13 @@ static void test_hash_map(void)  	int i, err, hashmap_fd, max_entries, percpu_map_fd;  	struct for_each_hash_map_elem *skel;  	__u64 *percpu_valbuf = NULL; -	__u32 key, num_cpus, retval; +	__u32 key, num_cpus;  	__u64 val; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	skel = for_each_hash_map_elem__open_and_load();  	if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) @@ -42,11 +47,10 @@ static void test_hash_map(void)  	if (!ASSERT_OK(err, "percpu_map_update"))  		goto out; -	err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access), -				1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, -				&retval, &duration); -	if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n", -		  err, errno, retval)) +	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); +	duration = topts.duration; +	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", +		  err, errno, topts.retval))  		goto out;  	ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output"); @@ -69,11 +73,16 @@ out:  static void test_array_map(void)  { -	__u32 key, num_cpus, max_entries, retval; +	__u32 key, num_cpus, max_entries;  	int i, arraymap_fd, percpu_map_fd, err;  	struct for_each_array_map_elem *skel;  	__u64 *percpu_valbuf = NULL;  	__u64 val, expected_total; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	skel = for_each_array_map_elem__open_and_load();  	if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load")) @@ -106,11 +115,10 @@ static void test_array_map(void)  	if (!ASSERT_OK(err, "percpu_map_update"))  		goto out; -	err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access), -				1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, -				&retval, &duration); -	if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n", -		  err, errno, retval)) +	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); +	duration = topts.duration; +	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", +		  err, errno, topts.retval))  		goto out;  	ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c index 85c427119fe9..28cf63963cb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c @@ -5,8 +5,8 @@  void test_get_func_args_test(void)  {  	struct get_func_args_test *skel = NULL; -	__u32 duration = 0, retval;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	skel = get_func_args_test__open_and_load();  	if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load")) @@ -20,19 +20,17 @@ void test_get_func_args_test(void)  	 * fentry/fexit programs.  	 */  	prog_fd = bpf_program__fd(skel->progs.test1); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	/* This runs bpf_modify_return_test function and triggers  	 * fmod_ret_test and fexit_test programs.  	 */  	prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 1234, "test_run"); +	ASSERT_EQ(topts.retval, 1234, "test_run");  	ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");  	ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index 02a465f36d59..938dbd4d7c2f 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -5,8 +5,8 @@  void test_get_func_ip_test(void)  {  	struct get_func_ip_test *skel = NULL; -	__u32 duration = 0, retval;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	skel = get_func_ip_test__open();  	if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) @@ -29,14 +29,12 @@ void test_get_func_ip_test(void)  		goto cleanup;  	prog_fd = bpf_program__fd(skel->progs.test1); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	prog_fd = bpf_program__fd(skel->progs.test5); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index e834a01de16a..16048978a1ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -29,11 +29,8 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)  	 */  	struct get_stack_trace_t e;  	int i, num_stack; -	static __u64 cnt;  	struct ksym *ks; -	cnt++; -  	memset(&e, 0, sizeof(e));  	memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e)); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c index 8d5a6023a1bb..5308de1ed478 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -27,7 +27,7 @@ void test_get_stackid_cannot_attach(void)  		return;  	/* override program type */ -	bpf_program__set_perf_event(skel->progs.oncpu); +	bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);  	err = test_stacktrace_build_id__load(skel);  	if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 9da131b32e13..027685858925 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)  		{ "relocate .rodata reference", 10, ~0 },  	}; -	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { +	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);  		CHECK(err || num != tests[i].num, tests[i].name,  		      "err %d result %llx expected %llx\n", @@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration)  		{ "relocate .bss reference",    4, "\0\0hello" },  	}; -	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { +	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);  		CHECK(err || memcmp(str, tests[i].str, sizeof(str)),  		      tests[i].name, "err %d result \'%s\' expected \'%s\'\n", @@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)  		{ "relocate .data reference",   3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },  	}; -	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { +	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);  		CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),  		      tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n", @@ -121,7 +121,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)  	if (CHECK_FAIL(map_fd < 0))  		return; -	buff = malloc(bpf_map__def(map)->value_size); +	buff = malloc(bpf_map__value_size(map));  	if (buff)  		err = bpf_map_update_elem(map_fd, &zero, buff, 0);  	free(buff); @@ -132,24 +132,26 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)  void test_global_data(void)  {  	const char *file = "./test_global_data.o"; -	__u32 duration = 0, retval;  	struct bpf_object *obj;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); -	if (CHECK(err, "load program", "error %d loading %s\n", err, file)) +	if (!ASSERT_OK(err, "load program"))  		return; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "pass global data run", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "pass global data run err"); +	ASSERT_OK(topts.retval, "pass global data run retval"); -	test_global_data_number(obj, duration); -	test_global_data_string(obj, duration); -	test_global_data_struct(obj, duration); -	test_global_data_rdonly(obj, duration); +	test_global_data_number(obj, topts.duration); +	test_global_data_string(obj, topts.duration); +	test_global_data_struct(obj, topts.duration); +	test_global_data_rdonly(obj, topts.duration);  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c index 1db86eab101b..57331c606964 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -20,7 +20,7 @@ void test_global_data_init(void)  	if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))  		goto out; -	sz = bpf_map__def(map)->value_size; +	sz = bpf_map__value_size(map);  	newval = malloc(sz);  	if (CHECK_FAIL(!newval))  		goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c index 93a2439237b0..29039a36cce5 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_func_args.c +++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c @@ -40,19 +40,21 @@ static void test_global_func_args0(struct bpf_object *obj)  void test_global_func_args(void)  {  	const char *file = "./test_global_func_args.o"; -	__u32 retval;  	struct bpf_object *obj;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);  	if (CHECK(err, "load program", "error %d loading %s\n", err, file))  		return; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "pass global func args run", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_OK(topts.retval, "test_run retval");  	test_global_func_args0(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index ce10d2fc3a6c..1cee6957285e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -53,24 +53,24 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)  void serial_test_kfree_skb(void)  {  	struct __sk_buff skb = {}; -	struct bpf_prog_test_run_attr tattr = { +	LIBBPF_OPTS(bpf_test_run_opts, topts,  		.data_in = &pkt_v6,  		.data_size_in = sizeof(pkt_v6),  		.ctx_in = &skb,  		.ctx_size_in = sizeof(skb), -	}; +	);  	struct kfree_skb *skel = NULL;  	struct bpf_link *link;  	struct bpf_object *obj;  	struct perf_buffer *pb = NULL; -	int err; +	int err, prog_fd;  	bool passed = false;  	__u32 duration = 0;  	const int zero = 0;  	bool test_ok[2];  	err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, -			    &obj, &tattr.prog_fd); +				 &obj, &prog_fd);  	if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))  		return; @@ -100,11 +100,9 @@ void serial_test_kfree_skb(void)  		goto close_prog;  	memcpy(skb.cb, &cb, sizeof(cb)); -	err = bpf_prog_test_run_xattr(&tattr); -	duration = tattr.duration; -	CHECK(err || tattr.retval, "ipv6", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, tattr.retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv6 test_run"); +	ASSERT_OK(topts.retval, "ipv6 test_run retval");  	/* read perf buffer */  	err = perf_buffer__poll(pb, 100); diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 7d7445ccc141..c00eb974eb85 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -9,23 +9,31 @@  static void test_main(void)  {  	struct kfunc_call_test_lskel *skel; -	int prog_fd, retval, err; +	int prog_fd, err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	skel = kfunc_call_test_lskel__open_and_load();  	if (!ASSERT_OK_PTR(skel, "skel"))  		return;  	prog_fd = skel->progs.kfunc_call_test1.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "bpf_prog_test_run(test1)"); -	ASSERT_EQ(retval, 12, "test1-retval"); +	ASSERT_EQ(topts.retval, 12, "test1-retval");  	prog_fd = skel->progs.kfunc_call_test2.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "bpf_prog_test_run(test2)"); -	ASSERT_EQ(retval, 3, "test2-retval"); +	ASSERT_EQ(topts.retval, 3, "test2-retval"); + +	prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd; +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)"); +	ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval");  	kfunc_call_test_lskel__destroy(skel);  } @@ -33,17 +41,21 @@ static void test_main(void)  static void test_subprog(void)  {  	struct kfunc_call_test_subprog *skel; -	int prog_fd, retval, err; +	int prog_fd, err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	skel = kfunc_call_test_subprog__open_and_load();  	if (!ASSERT_OK_PTR(skel, "skel"))  		return;  	prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1); -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "bpf_prog_test_run(test1)"); -	ASSERT_EQ(retval, 10, "test1-retval"); +	ASSERT_EQ(topts.retval, 10, "test1-retval");  	ASSERT_NEQ(skel->data->active_res, -1, "active_res");  	ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); @@ -53,17 +65,21 @@ static void test_subprog(void)  static void test_subprog_lskel(void)  {  	struct kfunc_call_test_subprog_lskel *skel; -	int prog_fd, retval, err; +	int prog_fd, err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	skel = kfunc_call_test_subprog_lskel__open_and_load();  	if (!ASSERT_OK_PTR(skel, "skel"))  		return;  	prog_fd = skel->progs.kfunc_call_test1.prog_fd; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "bpf_prog_test_run(test1)"); -	ASSERT_EQ(retval, 10, "test1-retval"); +	ASSERT_EQ(topts.retval, 10, "test1-retval");  	ASSERT_NEQ(skel->data->active_res, -1, "active_res");  	ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c new file mode 100644 index 000000000000..b9876b55fc0c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "kprobe_multi.skel.h" +#include "trace_helpers.h" + +static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) +{ +	LIBBPF_OPTS(bpf_test_run_opts, topts); +	int err, prog_fd; + +	prog_fd = bpf_program__fd(skel->progs.trigger); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run"); + +	ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); +	ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); +	ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); +	ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); +	ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); +	ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); +	ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); +	ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + +	if (test_return) { +		ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); +		ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); +		ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); +		ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); +		ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); +		ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); +		ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); +		ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +	} +} + +static void test_skel_api(void) +{ +	struct kprobe_multi *skel = NULL; +	int err; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); +	err = kprobe_multi__attach(skel); +	if (!ASSERT_OK(err, "kprobe_multi__attach")) +		goto cleanup; + +	kprobe_multi_test_run(skel, true); + +cleanup: +	kprobe_multi__destroy(skel); +} + +static void test_link_api(struct bpf_link_create_opts *opts) +{ +	int prog_fd, link1_fd = -1, link2_fd = -1; +	struct kprobe_multi *skel = NULL; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); +	prog_fd = bpf_program__fd(skel->progs.test_kprobe); +	link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); +	if (!ASSERT_GE(link1_fd, 0, "link_fd")) +		goto cleanup; + +	opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; +	prog_fd = bpf_program__fd(skel->progs.test_kretprobe); +	link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); +	if (!ASSERT_GE(link2_fd, 0, "link_fd")) +		goto cleanup; + +	kprobe_multi_test_run(skel, true); + +cleanup: +	if (link1_fd != -1) +		close(link1_fd); +	if (link2_fd != -1) +		close(link2_fd); +	kprobe_multi__destroy(skel); +} + +#define GET_ADDR(__sym, __addr) ({					\ +	__addr = ksym_get_addr(__sym);					\ +	if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym))	\ +		return;							\ +}) + +static void test_link_api_addrs(void) +{ +	LIBBPF_OPTS(bpf_link_create_opts, opts); +	unsigned long long addrs[8]; + +	GET_ADDR("bpf_fentry_test1", addrs[0]); +	GET_ADDR("bpf_fentry_test2", addrs[1]); +	GET_ADDR("bpf_fentry_test3", addrs[2]); +	GET_ADDR("bpf_fentry_test4", addrs[3]); +	GET_ADDR("bpf_fentry_test5", addrs[4]); +	GET_ADDR("bpf_fentry_test6", addrs[5]); +	GET_ADDR("bpf_fentry_test7", addrs[6]); +	GET_ADDR("bpf_fentry_test8", addrs[7]); + +	opts.kprobe_multi.addrs = (const unsigned long*) addrs; +	opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); +	test_link_api(&opts); +} + +static void test_link_api_syms(void) +{ +	LIBBPF_OPTS(bpf_link_create_opts, opts); +	const char *syms[8] = { +		"bpf_fentry_test1", +		"bpf_fentry_test2", +		"bpf_fentry_test3", +		"bpf_fentry_test4", +		"bpf_fentry_test5", +		"bpf_fentry_test6", +		"bpf_fentry_test7", +		"bpf_fentry_test8", +	}; + +	opts.kprobe_multi.syms = syms; +	opts.kprobe_multi.cnt = ARRAY_SIZE(syms); +	test_link_api(&opts); +} + +static void +test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts) +{ +	struct bpf_link *link1 = NULL, *link2 = NULL; +	struct kprobe_multi *skel = NULL; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); +	link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						      pattern, opts); +	if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) +		goto cleanup; + +	if (opts) { +		opts->retprobe = true; +		link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, +							      pattern, opts); +		if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) +			goto cleanup; +	} + +	kprobe_multi_test_run(skel, !!opts); + +cleanup: +	bpf_link__destroy(link2); +	bpf_link__destroy(link1); +	kprobe_multi__destroy(skel); +} + +static void test_attach_api_pattern(void) +{ +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + +	test_attach_api("bpf_fentry_test*", &opts); +	test_attach_api("bpf_fentry_test?", NULL); +} + +static void test_attach_api_addrs(void) +{ +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); +	unsigned long long addrs[8]; + +	GET_ADDR("bpf_fentry_test1", addrs[0]); +	GET_ADDR("bpf_fentry_test2", addrs[1]); +	GET_ADDR("bpf_fentry_test3", addrs[2]); +	GET_ADDR("bpf_fentry_test4", addrs[3]); +	GET_ADDR("bpf_fentry_test5", addrs[4]); +	GET_ADDR("bpf_fentry_test6", addrs[5]); +	GET_ADDR("bpf_fentry_test7", addrs[6]); +	GET_ADDR("bpf_fentry_test8", addrs[7]); + +	opts.addrs = (const unsigned long *) addrs; +	opts.cnt = ARRAY_SIZE(addrs); +	test_attach_api(NULL, &opts); +} + +static void test_attach_api_syms(void) +{ +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); +	const char *syms[8] = { +		"bpf_fentry_test1", +		"bpf_fentry_test2", +		"bpf_fentry_test3", +		"bpf_fentry_test4", +		"bpf_fentry_test5", +		"bpf_fentry_test6", +		"bpf_fentry_test7", +		"bpf_fentry_test8", +	}; + +	opts.syms = syms; +	opts.cnt = ARRAY_SIZE(syms); +	test_attach_api(NULL, &opts); +} + +static void test_attach_api_fails(void) +{ +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); +	struct kprobe_multi *skel = NULL; +	struct bpf_link *link = NULL; +	unsigned long long addrs[2]; +	const char *syms[2] = { +		"bpf_fentry_test1", +		"bpf_fentry_test2", +	}; +	__u64 cookies[2]; + +	addrs[0] = ksym_get_addr("bpf_fentry_test1"); +	addrs[1] = ksym_get_addr("bpf_fentry_test2"); + +	if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr")) +		goto cleanup; + +	skel = kprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) +		goto cleanup; + +	skel->bss->pid = getpid(); + +	/* fail_1 - pattern and opts NULL */ +	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						     NULL, NULL); +	if (!ASSERT_ERR_PTR(link, "fail_1")) +		goto cleanup; + +	if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error")) +		goto cleanup; + +	/* fail_2 - both addrs and syms set */ +	opts.addrs = (const unsigned long *) addrs; +	opts.syms = syms; +	opts.cnt = ARRAY_SIZE(syms); +	opts.cookies = NULL; + +	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						     NULL, &opts); +	if (!ASSERT_ERR_PTR(link, "fail_2")) +		goto cleanup; + +	if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error")) +		goto cleanup; + +	/* fail_3 - pattern and addrs set */ +	opts.addrs = (const unsigned long *) addrs; +	opts.syms = NULL; +	opts.cnt = ARRAY_SIZE(syms); +	opts.cookies = NULL; + +	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						     "ksys_*", &opts); +	if (!ASSERT_ERR_PTR(link, "fail_3")) +		goto cleanup; + +	if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error")) +		goto cleanup; + +	/* fail_4 - pattern and cnt set */ +	opts.addrs = NULL; +	opts.syms = NULL; +	opts.cnt = ARRAY_SIZE(syms); +	opts.cookies = NULL; + +	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						     "ksys_*", &opts); +	if (!ASSERT_ERR_PTR(link, "fail_4")) +		goto cleanup; + +	if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error")) +		goto cleanup; + +	/* fail_5 - pattern and cookies */ +	opts.addrs = NULL; +	opts.syms = NULL; +	opts.cnt = 0; +	opts.cookies = cookies; + +	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, +						     "ksys_*", &opts); +	if (!ASSERT_ERR_PTR(link, "fail_5")) +		goto cleanup; + +	if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error")) +		goto cleanup; + +cleanup: +	bpf_link__destroy(link); +	kprobe_multi__destroy(skel); +} + +void test_kprobe_multi_test(void) +{ +	if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) +		return; + +	if (test__start_subtest("skel_api")) +		test_skel_api(); +	if (test__start_subtest("link_api_addrs")) +		test_link_api_syms(); +	if (test__start_subtest("link_api_syms")) +		test_link_api_addrs(); +	if (test__start_subtest("attach_api_pattern")) +		test_attach_api_pattern(); +	if (test__start_subtest("attach_api_addrs")) +		test_attach_api_addrs(); +	if (test__start_subtest("attach_api_syms")) +		test_attach_api_syms(); +	if (test__start_subtest("attach_api_fails")) +		test_attach_api_fails(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index d490ad80eccb..a1ebac70ec29 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -6,11 +6,15 @@  #include "test_ksyms_module.lskel.h"  #include "test_ksyms_module.skel.h" -void test_ksyms_module_lskel(void) +static void test_ksyms_module_lskel(void)  {  	struct test_ksyms_module_lskel *skel; -	int retval;  	int err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	if (!env.has_testmod) {  		test__skip(); @@ -20,20 +24,24 @@ void test_ksyms_module_lskel(void)  	skel = test_ksyms_module_lskel__open_and_load();  	if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load"))  		return; -	err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(skel->progs.load.prog_fd, &topts);  	if (!ASSERT_OK(err, "bpf_prog_test_run"))  		goto cleanup; -	ASSERT_EQ(retval, 0, "retval"); +	ASSERT_EQ(topts.retval, 0, "retval");  	ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");  cleanup:  	test_ksyms_module_lskel__destroy(skel);  } -void test_ksyms_module_libbpf(void) +static void test_ksyms_module_libbpf(void)  {  	struct test_ksyms_module *skel; -	int retval, err; +	int err; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	if (!env.has_testmod) {  		test__skip(); @@ -43,11 +51,10 @@ void test_ksyms_module_libbpf(void)  	skel = test_ksyms_module__open_and_load();  	if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))  		return; -	err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, -				sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); +	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.load), &topts);  	if (!ASSERT_OK(err, "bpf_prog_test_run"))  		goto cleanup; -	ASSERT_EQ(retval, 0, "retval"); +	ASSERT_EQ(topts.retval, 0, "retval");  	ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");  cleanup:  	test_ksyms_module__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 540ef28fabff..55f733ff4109 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -23,12 +23,16 @@ static void test_l4lb(const char *file)  		__u8 flags;  	} real_def = {.dst = MAGIC_VAL};  	__u32 ch_key = 11, real_num = 3; -	__u32 duration, retval, size;  	int err, i, prog_fd, map_fd;  	__u64 bytes = 0, pkts = 0;  	struct bpf_object *obj;  	char buf[128];  	u32 *magic = (u32 *)buf; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = NUM_ITER, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);  	if (CHECK_FAIL(err)) @@ -49,19 +53,24 @@ static void test_l4lb(const char *file)  		goto out;  	bpf_map_update_elem(map_fd, &real_num, &real_def, 0); -	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); -	CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || -	      *magic != MAGIC_VAL, "ipv4", -	      "err %d errno %d retval %d size %d magic %x\n", -	      err, errno, retval, size, *magic); +	topts.data_in = &pkt_v4; +	topts.data_size_in = sizeof(pkt_v4); -	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), -				buf, &size, &retval, &duration); -	CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || -	      *magic != MAGIC_VAL, "ipv6", -	      "err %d errno %d retval %d size %d magic %x\n", -	      err, errno, retval, size, *magic); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv4 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); +	ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 magic"); + +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	topts.data_size_out = sizeof(buf); /* reset out size */ + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv6 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); +	ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 magic");  	map_fd = bpf_find_map(__func__, obj, "stats");  	if (map_fd < 0) diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index e469b023962b..fe9a23e65ef4 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -78,7 +78,7 @@ static void obj_load_log_buf(void)  	ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),  		      "libbpf_log_not_empty");  	ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); -	ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), +	ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),  		      "good_log_verbose");  	ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),  		      "bad_log_not_empty"); @@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)  	opts.log_level = 2;  	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",  			   good_prog_insns, good_prog_insn_cnt, &opts); -	ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); +	ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");  	ASSERT_GE(fd, 0, "good_fd2");  	if (fd >= 0)  		close(fd); @@ -202,7 +202,7 @@ static void bpf_btf_load_log_buf(void)  	const void *raw_btf_data;  	__u32 raw_btf_size;  	struct btf *btf; -	char *log_buf; +	char *log_buf = NULL;  	int fd = -1;  	btf = btf__new_empty(); diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index 23d19e9cf26a..e4e99b37df64 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -4,14 +4,17 @@  static void *spin_lock_thread(void *arg)  { -	__u32 duration, retval;  	int err, prog_fd = *(u32 *) arg; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 10000, +	); + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run_opts err"); +	ASSERT_OK(topts.retval, "test_run_opts retval"); -	err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration);  	pthread_exit(arg);  } diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c index 273725504f11..43e502acf050 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -9,10 +9,16 @@  void test_map_ptr(void)  {  	struct map_ptr_kern_lskel *skel; -	__u32 duration = 0, retval;  	char buf[128];  	int err;  	int page_size = getpagesize(); +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = 1, +	);  	skel = map_ptr_kern_lskel__open();  	if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -26,14 +32,12 @@ void test_map_ptr(void)  	skel->bss->page_size = page_size; -	err = bpf_prog_test_run(skel->progs.cg_skb.prog_fd, 1, &pkt_v4, -				sizeof(pkt_v4), buf, NULL, &retval, NULL); +	err = bpf_prog_test_run_opts(skel->progs.cg_skb.prog_fd, &topts); -	if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) +	if (!ASSERT_OK(err, "test_run"))  		goto cleanup; -	if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval, -		  skel->bss->g_map_type, skel->bss->g_line)) +	if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))  		goto cleanup;  cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index b772fe30ce9b..5d9955af6247 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -15,39 +15,31 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)  {  	struct modify_return *skel = NULL;  	int err, prog_fd; -	__u32 duration = 0, retval;  	__u16 side_effect;  	__s16 ret; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	skel = modify_return__open_and_load(); -	if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n")) +	if (!ASSERT_OK_PTR(skel, "skel_load"))  		goto cleanup;  	err = modify_return__attach(skel); -	if (CHECK(err, "modify_return", "attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "modify_return__attach failed"))  		goto cleanup;  	skel->bss->input_retval = input_retval;  	prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0, -				&retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); -	CHECK(err, "test_run", "err %d errno %d\n", err, errno); +	side_effect = UPPER(topts.retval); +	ret = LOWER(topts.retval); -	side_effect = UPPER(retval); -	ret  = LOWER(retval); - -	CHECK(ret != want_ret, "test_run", -	      "unexpected ret: %d, expected: %d\n", ret, want_ret); -	CHECK(side_effect != want_side_effect, "modify_return", -	      "unexpected side_effect: %d\n", side_effect); - -	CHECK(skel->bss->fentry_result != 1, "modify_return", -	      "fentry failed\n"); -	CHECK(skel->bss->fexit_result != 1, "modify_return", -	      "fexit failed\n"); -	CHECK(skel->bss->fmod_ret_result != 1, "modify_return", -	      "fmod_ret failed\n"); +	ASSERT_EQ(ret, want_ret, "test_run ret"); +	ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect"); +	ASSERT_EQ(skel->bss->fentry_result, 1, "modify_return fentry_result"); +	ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result"); +	ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result");  cleanup:  	modify_return__destroy(skel); @@ -63,4 +55,3 @@ void serial_test_modify_return(void)  		 0 /* want_side_effect */,  		 -EINVAL /* want_ret */);  } - diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c index 6194b776a28b..7093edca6e08 100644 --- a/tools/testing/selftests/bpf/prog_tests/obj_name.c +++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c @@ -20,7 +20,7 @@ void test_obj_name(void)  	__u32 duration = 0;  	int i; -	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { +	for (i = 0; i < ARRAY_SIZE(tests); i++) {  		size_t name_len = strlen(tests[i].name) + 1;  		union bpf_attr attr;  		size_t ncopy; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c index 12c4f45cee1a..bc24f83339d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c @@ -110,7 +110,7 @@ static void test_perf_branches_hw(void)  	attr.type = PERF_TYPE_HARDWARE;  	attr.config = PERF_COUNT_HW_CPU_CYCLES;  	attr.freq = 1; -	attr.sample_freq = 4000; +	attr.sample_freq = 1000;  	attr.sample_type = PERF_SAMPLE_BRANCH_STACK;  	attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;  	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); @@ -151,7 +151,7 @@ static void test_perf_branches_no_hw(void)  	attr.type = PERF_TYPE_SOFTWARE;  	attr.config = PERF_COUNT_SW_CPU_CLOCK;  	attr.freq = 1; -	attr.sample_freq = 4000; +	attr.sample_freq = 1000;  	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);  	if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))  		return; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index ede07344f264..224eba6fef2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -39,7 +39,7 @@ void serial_test_perf_link(void)  	attr.type = PERF_TYPE_SOFTWARE;  	attr.config = PERF_COUNT_SW_CPU_CLOCK;  	attr.freq = 1; -	attr.sample_freq = 4000; +	attr.sample_freq = 1000;  	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);  	if (!ASSERT_GE(pfd, 0, "perf_fd"))  		goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index 6628710ec3c6..0bcccdc34fbc 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -6,23 +6,27 @@ void test_pkt_access(void)  {  	const char *file = "./test_pkt_access.o";  	struct bpf_object *obj; -	__u32 duration, retval;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 100000, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);  	if (CHECK_FAIL(err))  		return; -	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "ipv4", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv4 test_run_opts err"); +	ASSERT_OK(topts.retval, "ipv4 test_run_opts retval"); + +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	topts.data_size_out = 0; /* reset from last call */ +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv6 test_run_opts err"); +	ASSERT_OK(topts.retval, "ipv6 test_run_opts retval"); -	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "ipv6", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration);  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index c9d2d6a1bfcc..00ee1dd792aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -6,18 +6,20 @@ void test_pkt_md_access(void)  {  	const char *file = "./test_pkt_md_access.o";  	struct bpf_object *obj; -	__u32 duration, retval;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 10, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);  	if (CHECK_FAIL(err))  		return; -	err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run_opts err"); +	ASSERT_OK(topts.retval, "test_run_opts retval");  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c new file mode 100644 index 000000000000..1ccd2bdf8fa8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_pkt_access.skel.h" + +static const __u32 duration; + +static void check_run_cnt(int prog_fd, __u64 run_cnt) +{ +	struct bpf_prog_info info = {}; +	__u32 info_len = sizeof(info); +	int err; + +	err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); +	if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) +		return; + +	CHECK(run_cnt != info.run_cnt, "run_cnt", +	      "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); +} + +void test_prog_run_opts(void) +{ +	struct test_pkt_access *skel; +	int err, stats_fd = -1, prog_fd; +	char buf[10] = {}; +	__u64 run_cnt = 0; + +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.repeat = 1, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = 5, +	); + +	stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); +	if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd")) +		return; + +	skel = test_pkt_access__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "open_and_load")) +		goto cleanup; + +	prog_fd = bpf_program__fd(skel->progs.test_pkt_access); + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_EQ(errno, ENOSPC, "test_run errno"); +	ASSERT_ERR(err, "test_run"); +	ASSERT_OK(topts.retval, "test_run retval"); + +	ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out"); +	ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint"); + +	run_cnt += topts.repeat; +	check_run_cnt(prog_fd, run_cnt); + +	topts.data_out = NULL; +	topts.data_size_out = 0; +	topts.repeat = 2; +	errno = 0; + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(errno, "run_no_output errno"); +	ASSERT_OK(err, "run_no_output err"); +	ASSERT_OK(topts.retval, "run_no_output retval"); + +	run_cnt += topts.repeat; +	check_run_cnt(prog_fd, run_cnt); + +cleanup: +	if (skel) +		test_pkt_access__destroy(skel); +	if (stats_fd >= 0) +		close(stats_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c deleted file mode 100644 index 89fc98faf19e..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include <network_helpers.h> - -#include "test_pkt_access.skel.h" - -static const __u32 duration; - -static void check_run_cnt(int prog_fd, __u64 run_cnt) -{ -	struct bpf_prog_info info = {}; -	__u32 info_len = sizeof(info); -	int err; - -	err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); -	if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) -		return; - -	CHECK(run_cnt != info.run_cnt, "run_cnt", -	      "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); -} - -void test_prog_run_xattr(void) -{ -	struct test_pkt_access *skel; -	int err, stats_fd = -1; -	char buf[10] = {}; -	__u64 run_cnt = 0; - -	struct bpf_prog_test_run_attr tattr = { -		.repeat = 1, -		.data_in = &pkt_v4, -		.data_size_in = sizeof(pkt_v4), -		.data_out = buf, -		.data_size_out = 5, -	}; - -	stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); -	if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno)) -		return; - -	skel = test_pkt_access__open_and_load(); -	if (CHECK_ATTR(!skel, "open_and_load", "failed\n")) -		goto cleanup; - -	tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access); - -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err >= 0 || errno != ENOSPC || tattr.retval, "run", -	      "err %d errno %d retval %d\n", err, errno, tattr.retval); - -	CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", -	      "incorrect output size, want %zu have %u\n", -	      sizeof(pkt_v4), tattr.data_size_out); - -	CHECK_ATTR(buf[5] != 0, "overflow", -	      "BPF_PROG_TEST_RUN ignored size hint\n"); - -	run_cnt += tattr.repeat; -	check_run_cnt(tattr.prog_fd, run_cnt); - -	tattr.data_out = NULL; -	tattr.data_size_out = 0; -	tattr.repeat = 2; -	errno = 0; - -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err || errno || tattr.retval, "run_no_output", -	      "err %d errno %d retval %d\n", err, errno, tattr.retval); - -	tattr.data_size_out = 1; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); - -	run_cnt += tattr.repeat; -	check_run_cnt(tattr.prog_fd, run_cnt); - -cleanup: -	if (skel) -		test_pkt_access__destroy(skel); -	if (stats_fd >= 0) -		close(stats_fd); -} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index b9822f914eeb..d2743fc10032 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -10,11 +10,18 @@ enum {  static void test_queue_stack_map_by_type(int type)  {  	const int MAP_SIZE = 32; -	__u32 vals[MAP_SIZE], duration, retval, size, val; +	__u32 vals[MAP_SIZE], val;  	int i, err, prog_fd, map_in_fd, map_out_fd;  	char file[32], buf[128];  	struct bpf_object *obj;  	struct iphdr iph; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = 1, +	);  	/* Fill test values to be used */  	for (i = 0; i < MAP_SIZE; i++) @@ -58,38 +65,37 @@ static void test_queue_stack_map_by_type(int type)  			pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;  		} -		err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -					buf, &size, &retval, &duration); -		if (err || retval || size != sizeof(pkt_v4)) +		topts.data_size_out = sizeof(buf); +		err = bpf_prog_test_run_opts(prog_fd, &topts); +		if (err || topts.retval || +		    topts.data_size_out != sizeof(pkt_v4))  			break;  		memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph));  		if (iph.daddr != val)  			break;  	} -	CHECK(err || retval || size != sizeof(pkt_v4) || iph.daddr != val, -	      "bpf_map_pop_elem", -	      "err %d errno %d retval %d size %d iph->daddr %u\n", -	      err, errno, retval, size, iph.daddr); +	ASSERT_OK(err, "bpf_map_pop_elem"); +	ASSERT_OK(topts.retval, "bpf_map_pop_elem test retval"); +	ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), +		  "bpf_map_pop_elem data_size_out"); +	ASSERT_EQ(iph.daddr, val, "bpf_map_pop_elem iph.daddr");  	/* Queue is empty, program should return TC_ACT_SHOT */ -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); -	CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4), -	      "check-queue-stack-map-empty", -	      "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	topts.data_size_out = sizeof(buf); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "check-queue-stack-map-empty"); +	ASSERT_EQ(topts.retval, 2  /* TC_ACT_SHOT */, +		  "check-queue-stack-map-empty test retval"); +	ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), +		  "check-queue-stack-map-empty data_size_out");  	/* Check that the program pushed elements correctly */  	for (i = 0; i < MAP_SIZE; i++) {  		err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); -		if (err || val != vals[i] * 5) -			break; +		ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"); +		ASSERT_EQ(val, vals[i] * 5, "bpf_map_push_elem val");  	} - -	CHECK(i != MAP_SIZE && (err || val != vals[i] * 5), -	      "bpf_map_push_elem", "err %d value %u\n", err, val); -  out:  	pkt_v4.iph.saddr = 0;  	bpf_object__close(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c index 41720a62c4fa..fe5b8fae2c36 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c @@ -5,59 +5,54 @@  #include "bpf/libbpf_internal.h"  #include "test_raw_tp_test_run.skel.h" -static int duration; -  void test_raw_tp_test_run(void)  { -	struct bpf_prog_test_run_attr test_attr = {};  	int comm_fd = -1, err, nr_online, i, prog_fd;  	__u64 args[2] = {0x1234ULL, 0x5678ULL};  	int expected_retval = 0x1234 + 0x5678;  	struct test_raw_tp_test_run *skel;  	char buf[] = "new_name";  	bool *online = NULL; -	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, -			    .ctx_in = args, -			    .ctx_size_in = sizeof(args), -			    .flags = BPF_F_TEST_RUN_ON_CPU, -		); +	LIBBPF_OPTS(bpf_test_run_opts, opts, +		.ctx_in = args, +		.ctx_size_in = sizeof(args), +		.flags = BPF_F_TEST_RUN_ON_CPU, +	);  	err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,  				  &nr_online); -	if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err)) +	if (!ASSERT_OK(err, "parse_cpu_mask_file"))  		return;  	skel = test_raw_tp_test_run__open_and_load(); -	if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) +	if (!ASSERT_OK_PTR(skel, "skel_open"))  		goto cleanup;  	err = test_raw_tp_test_run__attach(skel); -	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "skel_attach"))  		goto cleanup;  	comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); -	if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno)) +	if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm"))  		goto cleanup;  	err = write(comm_fd, buf, sizeof(buf)); -	CHECK(err < 0, "task rename", "err %d", errno); +	ASSERT_GE(err, 0, "task rename"); -	CHECK(skel->bss->count == 0, "check_count", "didn't increase\n"); -	CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n"); +	ASSERT_NEQ(skel->bss->count, 0, "check_count"); +	ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu");  	prog_fd = bpf_program__fd(skel->progs.rename); -	test_attr.prog_fd = prog_fd; -	test_attr.ctx_in = args; -	test_attr.ctx_size_in = sizeof(__u64); +	opts.ctx_in = args; +	opts.ctx_size_in = sizeof(__u64); -	err = bpf_prog_test_run_xattr(&test_attr); -	CHECK(err == 0, "test_run", "should fail for too small ctx\n"); +	err = bpf_prog_test_run_opts(prog_fd, &opts); +	ASSERT_NEQ(err, 0, "test_run should fail for too small ctx"); -	test_attr.ctx_size_in = sizeof(args); -	err = bpf_prog_test_run_xattr(&test_attr); -	CHECK(err < 0, "test_run", "err %d\n", errno); -	CHECK(test_attr.retval != expected_retval, "check_retval", -	      "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval); +	opts.ctx_size_in = sizeof(args); +	err = bpf_prog_test_run_opts(prog_fd, &opts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(opts.retval, expected_retval, "check_retval");  	for (i = 0; i < nr_online; i++) {  		if (!online[i]) @@ -66,28 +61,23 @@ void test_raw_tp_test_run(void)  		opts.cpu = i;  		opts.retval = 0;  		err = bpf_prog_test_run_opts(prog_fd, &opts); -		CHECK(err < 0, "test_run_opts", "err %d\n", errno); -		CHECK(skel->data->on_cpu != i, "check_on_cpu", -		      "expect %d got %d\n", i, skel->data->on_cpu); -		CHECK(opts.retval != expected_retval, -		      "check_retval", "expect 0x%x, got 0x%x\n", -		      expected_retval, opts.retval); +		ASSERT_OK(err, "test_run_opts"); +		ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu"); +		ASSERT_EQ(opts.retval, expected_retval, "check_retval");  	}  	/* invalid cpu ID should fail with ENXIO */  	opts.cpu = 0xffffffff;  	err = bpf_prog_test_run_opts(prog_fd, &opts); -	CHECK(err >= 0 || errno != ENXIO, -	      "test_run_opts_fail", -	      "should failed with ENXIO\n"); +	ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO"); +	ASSERT_ERR(err, "test_run_opts_fail");  	/* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */  	opts.cpu = 1;  	opts.flags = 0;  	err = bpf_prog_test_run_opts(prog_fd, &opts); -	CHECK(err >= 0 || errno != EINVAL, -	      "test_run_opts_fail", -	      "should failed with EINVAL\n"); +	ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL"); +	ASSERT_ERR(err, "test_run_opts_fail");  cleanup:  	close(comm_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index 239baccabccb..f4aa7dab4766 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -56,21 +56,23 @@ void serial_test_raw_tp_writable_test_run(void)  		0,  	}; -	__u32 prog_ret; -	int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, -				    0, &prog_ret, 0); +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = test_skb, +		.data_size_in = sizeof(test_skb), +		.repeat = 1, +	); +	int err = bpf_prog_test_run_opts(filter_fd, &topts);  	CHECK(err != 42, "test_run",  	      "tracepoint did not modify return value\n"); -	CHECK(prog_ret != 0, "test_run_ret", +	CHECK(topts.retval != 0, "test_run_ret",  	      "socket_filter did not return 0\n");  	close(tp_fd); -	err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0, -				&prog_ret, 0); +	err = bpf_prog_test_run_opts(filter_fd, &topts);  	CHECK(err != 0, "test_run_notrace",  	      "test_run failed with %d errno %d\n", err, errno); -	CHECK(prog_ret != 0, "test_run_ret_notrace", +	CHECK(topts.retval != 0, "test_run_ret_notrace",  	      "socket_filter did not return 0\n");  out_filterfd: diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 776916b61c40..d71226e34c34 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -4,11 +4,11 @@  #include <sys/resource.h>  #include "test_send_signal_kern.skel.h" -int sigusr1_received = 0; +static int sigusr1_received;  static void sigusr1_handler(int signum)  { -	sigusr1_received++; +	sigusr1_received = 1;  }  static void test_send_signal_common(struct perf_event_attr *attr, @@ -40,9 +40,10 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	if (pid == 0) {  		int old_prio; +		volatile int j = 0;  		/* install signal handler and notify parent */ -		signal(SIGUSR1, sigusr1_handler); +		ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");  		close(pipe_c2p[0]); /* close read */  		close(pipe_p2c[1]); /* close write */ @@ -63,9 +64,11 @@ static void test_send_signal_common(struct perf_event_attr *attr,  		ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");  		/* wait a little for signal handler */ -		sleep(1); +		for (int i = 0; i < 100000000 && !sigusr1_received; i++) +			j /= i + j + 1;  		buf[0] = sigusr1_received ? '2' : '0'; +		ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");  		ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");  		/* wait for parent notification and exit */ @@ -93,7 +96,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,  			goto destroy_skel;  		}  	} else { -		pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, +		pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,  				 -1 /* group id */, 0 /* flags */);  		if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {  			err = -1; @@ -110,9 +113,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");  	/* trigger the bpf send_signal */ -	skel->bss->pid = pid; -	skel->bss->sig = SIGUSR1;  	skel->bss->signal_thread = signal_thread; +	skel->bss->sig = SIGUSR1; +	skel->bss->pid = pid;  	/* notify child that bpf program can send_signal now */  	ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index aecfe662c070..70b49da5ca0a 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -13,10 +13,14 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)  	struct itimerval timeo = {  		.it_value.tv_usec = 100000, /* 100ms */  	}; -	__u32 duration = 0, retval;  	int prog_fd;  	int err;  	int i; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 0xffffffff, +	);  	for (i = 0; i < ARRAY_SIZE(prog); i++)  		prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0); @@ -24,20 +28,17 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)  	prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog),  				   "GPL", 0, NULL, 0); -	CHECK(prog_fd < 0, "test-run", "errno %d\n", errno); +	ASSERT_GE(prog_fd, 0, "test-run load");  	err = sigaction(SIGALRM, &sigalrm_action, NULL); -	CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno); +	ASSERT_OK(err, "test-run-signal-sigaction");  	err = setitimer(ITIMER_REAL, &timeo, NULL); -	CHECK(err, "test-run-signal-timer", "errno %d\n", errno); - -	err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(duration > 500000000, /* 500ms */ -	      "test-run-signal-duration", -	      "duration %dns > 500ms\n", -	      duration); +	ASSERT_OK(err, "test-run-signal-timer"); + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_LE(topts.duration, 500000000 /* 500ms */, +		  "test-run-signal-duration");  	signal(SIGALRM, SIG_DFL);  } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index b5319ba2ee27..ce0e555b5e38 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -20,97 +20,72 @@ void test_skb_ctx(void)  		.gso_size = 10,  		.hwtstamp = 11,  	}; -	struct bpf_prog_test_run_attr tattr = { +	LIBBPF_OPTS(bpf_test_run_opts, tattr,  		.data_in = &pkt_v4,  		.data_size_in = sizeof(pkt_v4),  		.ctx_in = &skb,  		.ctx_size_in = sizeof(skb),  		.ctx_out = &skb,  		.ctx_size_out = sizeof(skb), -	}; +	);  	struct bpf_object *obj; -	int err; -	int i; +	int err, prog_fd, i; -	err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, -			    &tattr.prog_fd); -	if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) +	err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, +				 &obj, &prog_fd); +	if (!ASSERT_OK(err, "load"))  		return;  	/* ctx_in != NULL, ctx_size_in == 0 */  	tattr.ctx_size_in = 0; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "ctx_size_in");  	tattr.ctx_size_in = sizeof(skb);  	/* ctx_out != NULL, ctx_size_out == 0 */  	tattr.ctx_size_out = 0; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "ctx_size_out");  	tattr.ctx_size_out = sizeof(skb);  	/* non-zero [len, tc_index] fields should be rejected*/  	skb.len = 1; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "len");  	skb.len = 0;  	skb.tc_index = 1; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "tc_index");  	skb.tc_index = 0;  	/* non-zero [hash, sk] fields should be rejected */  	skb.hash = 1; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "hash");  	skb.hash = 0;  	skb.sk = (struct bpf_sock *)1; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_NEQ(err, 0, "sk");  	skb.sk = 0; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err != 0 || tattr.retval, -		   "run", -		   "err %d errno %d retval %d\n", -		   err, errno, tattr.retval); - -	CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), -		   "ctx_size_out", -		   "incorrect output size, want %zu have %u\n", -		   sizeof(skb), tattr.ctx_size_out); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); +	ASSERT_OK(err, "test_run"); +	ASSERT_OK(tattr.retval, "test_run retval"); +	ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out");  	for (i = 0; i < 5; i++) -		CHECK_ATTR(skb.cb[i] != i + 2, -			   "ctx_out_cb", -			   "skb->cb[i] == %d, expected %d\n", -			   skb.cb[i], i + 2); -	CHECK_ATTR(skb.priority != 7, -		   "ctx_out_priority", -		   "skb->priority == %d, expected %d\n", -		   skb.priority, 7); -	CHECK_ATTR(skb.ifindex != 1, -		   "ctx_out_ifindex", -		   "skb->ifindex == %d, expected %d\n", -		   skb.ifindex, 1); -	CHECK_ATTR(skb.ingress_ifindex != 11, -		   "ctx_out_ingress_ifindex", -		   "skb->ingress_ifindex == %d, expected %d\n", -		   skb.ingress_ifindex, 11); -	CHECK_ATTR(skb.tstamp != 8, -		   "ctx_out_tstamp", -		   "skb->tstamp == %lld, expected %d\n", -		   skb.tstamp, 8); -	CHECK_ATTR(skb.mark != 10, -		   "ctx_out_mark", -		   "skb->mark == %u, expected %d\n", -		   skb.mark, 10); +		ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb"); +	ASSERT_EQ(skb.priority, 7, "ctx_out_priority"); +	ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex"); +	ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex"); +	ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp"); +	ASSERT_EQ(skb.mark, 10, "ctx_out_mark");  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c index 6f802a1c0800..97dc8b14be48 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c @@ -9,22 +9,22 @@ void test_skb_helpers(void)  		.gso_segs = 8,  		.gso_size = 10,  	}; -	struct bpf_prog_test_run_attr tattr = { +	LIBBPF_OPTS(bpf_test_run_opts, topts,  		.data_in = &pkt_v4,  		.data_size_in = sizeof(pkt_v4),  		.ctx_in = &skb,  		.ctx_size_in = sizeof(skb),  		.ctx_out = &skb,  		.ctx_size_out = sizeof(skb), -	}; +	);  	struct bpf_object *obj; -	int err; +	int err, prog_fd; -	err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, -			    &tattr.prog_fd); -	if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) +	err = bpf_prog_test_load("./test_skb_helpers.o", +				 BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); +	if (!ASSERT_OK(err, "load"))  		return; -	err = bpf_prog_test_run_xattr(&tattr); -	CHECK_ATTR(err, "len", "err %d errno %d\n", err, errno); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run");  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c index 9fc040eaa482..9d211b5c22c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -1,9 +1,11 @@  // SPDX-License-Identifier: GPL-2.0  /* Copyright (c) 2019 Facebook */ +#define _GNU_SOURCE  #include <netinet/in.h>  #include <arpa/inet.h>  #include <unistd.h> +#include <sched.h>  #include <stdlib.h>  #include <string.h>  #include <errno.h> @@ -20,6 +22,7 @@  enum bpf_linum_array_idx {  	EGRESS_LINUM_IDX,  	INGRESS_LINUM_IDX, +	READ_SK_DST_PORT_LINUM_IDX,  	__NR_BPF_LINUM_ARRAY_IDX,  }; @@ -42,8 +45,16 @@ static __u64 child_cg_id;  static int linum_map_fd;  static __u32 duration; -static __u32 egress_linum_idx = EGRESS_LINUM_IDX; -static __u32 ingress_linum_idx = INGRESS_LINUM_IDX; +static bool create_netns(void) +{ +	if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) +		return false; + +	if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) +		return false; + +	return true; +}  static void print_sk(const struct bpf_sock *sk, const char *prefix)  { @@ -91,19 +102,24 @@ static void check_result(void)  {  	struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;  	struct bpf_sock srv_sk, cli_sk, listen_sk; -	__u32 ingress_linum, egress_linum; +	__u32 idx, ingress_linum, egress_linum, linum;  	int err; -	err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx, -				  &egress_linum); +	idx = EGRESS_LINUM_IDX; +	err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum);  	CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",  	      "err:%d errno:%d\n", err, errno); -	err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx, -				  &ingress_linum); +	idx = INGRESS_LINUM_IDX; +	err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum);  	CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",  	      "err:%d errno:%d\n", err, errno); +	idx = READ_SK_DST_PORT_LINUM_IDX; +	err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum); +	ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)"); +	ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line"); +  	memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));  	memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp));  	memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk)); @@ -262,7 +278,7 @@ static void test(void)  	char buf[DATA_LEN];  	/* Prepare listen_fd */ -	listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); +	listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0);  	/* start_server() has logged the error details */  	if (CHECK_FAIL(listen_fd == -1))  		goto done; @@ -330,8 +346,12 @@ done:  void serial_test_sock_fields(void)  { -	struct bpf_link *egress_link = NULL, *ingress_link = NULL;  	int parent_cg_fd = -1, child_cg_fd = -1; +	struct bpf_link *link; + +	/* Use a dedicated netns to have a fixed listen port */ +	if (!create_netns()) +		return;  	/* Create a cgroup, get fd, and join it */  	parent_cg_fd = test__join_cgroup(PARENT_CGROUP); @@ -352,15 +372,20 @@ void serial_test_sock_fields(void)  	if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n"))  		goto done; -	egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, -						 child_cg_fd); -	if (!ASSERT_OK_PTR(egress_link, "attach_cgroup(egress)")) +	link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd); +	if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)")) +		goto done; +	skel->links.egress_read_sock_fields = link; + +	link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd); +	if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)"))  		goto done; +	skel->links.ingress_read_sock_fields = link; -	ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, -						  child_cg_fd); -	if (!ASSERT_OK_PTR(ingress_link, "attach_cgroup(ingress)")) +	link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd); +	if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port"))  		goto done; +	skel->links.read_sk_dst_port = link;  	linum_map_fd = bpf_map__fd(skel->maps.linum_map);  	sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt); @@ -369,8 +394,7 @@ void serial_test_sock_fields(void)  	test();  done: -	bpf_link__destroy(egress_link); -	bpf_link__destroy(ingress_link); +	test_sock_fields__detach(skel);  	test_sock_fields__destroy(skel);  	if (child_cg_fd >= 0)  		close(child_cg_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 85db0f4cdd95..cec5c0882372 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -8,6 +8,7 @@  #include "test_sockmap_update.skel.h"  #include "test_sockmap_invalid_update.skel.h"  #include "test_sockmap_skb_verdict_attach.skel.h" +#include "test_sockmap_progs_query.skel.h"  #include "bpf_iter_sockmap.skel.h"  #define TCP_REPAIR		19	/* TCP sock is under repair right now */ @@ -139,12 +140,16 @@ out:  static void test_sockmap_update(enum bpf_map_type map_type)  { -	struct bpf_prog_test_run_attr tattr;  	int err, prog, src, duration = 0;  	struct test_sockmap_update *skel;  	struct bpf_map *dst_map;  	const __u32 zero = 0;  	char dummy[14] = {0}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = dummy, +		.data_size_in = sizeof(dummy), +		.repeat = 1, +	);  	__s64 sk;  	sk = connected_socket_v4(); @@ -166,16 +171,10 @@ static void test_sockmap_update(enum bpf_map_type map_type)  	if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))  		goto out; -	tattr = (struct bpf_prog_test_run_attr){ -		.prog_fd = prog, -		.repeat = 1, -		.data_in = dummy, -		.data_size_in = sizeof(dummy), -	}; - -	err = bpf_prog_test_run_xattr(&tattr); -	if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run", -		       "errno=%u retval=%u\n", errno, tattr.retval)) +	err = bpf_prog_test_run_opts(prog, &topts); +	if (!ASSERT_OK(err, "test_run")) +		goto out; +	if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))  		goto out;  	compare_cookies(skel->maps.src, dst_map); @@ -315,6 +314,63 @@ out:  	test_sockmap_skb_verdict_attach__destroy(skel);  } +static __u32 query_prog_id(int prog_fd) +{ +	struct bpf_prog_info info = {}; +	__u32 info_len = sizeof(info); +	int err; + +	err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); +	if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") || +	    !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd")) +		return 0; + +	return info.id; +} + +static void test_sockmap_progs_query(enum bpf_attach_type attach_type) +{ +	struct test_sockmap_progs_query *skel; +	int err, map_fd, verdict_fd; +	__u32 attach_flags = 0; +	__u32 prog_ids[3] = {}; +	__u32 prog_cnt = 3; + +	skel = test_sockmap_progs_query__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) +		return; + +	map_fd = bpf_map__fd(skel->maps.sock_map); + +	if (attach_type == BPF_SK_MSG_VERDICT) +		verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); +	else +		verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); + +	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, +			     &attach_flags, prog_ids, &prog_cnt); +	ASSERT_OK(err, "bpf_prog_query failed"); +	ASSERT_EQ(attach_flags,  0, "wrong attach_flags on query"); +	ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); + +	err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); +	if (!ASSERT_OK(err, "bpf_prog_attach failed")) +		goto out; + +	prog_cnt = 1; +	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, +			     &attach_flags, prog_ids, &prog_cnt); +	ASSERT_OK(err, "bpf_prog_query failed"); +	ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); +	ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); +	ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), +		  "wrong prog_ids on query"); + +	bpf_prog_detach2(verdict_fd, map_fd, attach_type); +out: +	test_sockmap_progs_query__destroy(skel); +} +  void test_sockmap_basic(void)  {  	if (test__start_subtest("sockmap create_update_free")) @@ -341,4 +397,12 @@ void test_sockmap_basic(void)  		test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,  						BPF_SK_SKB_VERDICT);  	} +	if (test__start_subtest("sockmap msg_verdict progs query")) +		test_sockmap_progs_query(BPF_SK_MSG_VERDICT); +	if (test__start_subtest("sockmap stream_parser progs query")) +		test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); +	if (test__start_subtest("sockmap stream_verdict progs query")) +		test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); +	if (test__start_subtest("sockmap skb_verdict progs query")) +		test_sockmap_progs_query(BPF_SK_SKB_VERDICT);  } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 7e21bfab6358..2cf0c7a3fe23 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1413,14 +1413,12 @@ close_srv1:  static void test_ops_cleanup(const struct bpf_map *map)  { -	const struct bpf_map_def *def;  	int err, mapfd;  	u32 key; -	def = bpf_map__def(map);  	mapfd = bpf_map__fd(map); -	for (key = 0; key < def->max_entries; key++) { +	for (key = 0; key < bpf_map__max_entries(map); key++) {  		err = bpf_map_delete_elem(mapfd, &key);  		if (err && errno != EINVAL && errno != ENOENT)  			FAIL_ERRNO("map_delete: expected EINVAL/ENOENT"); @@ -1443,13 +1441,13 @@ static const char *family_str(sa_family_t family)  static const char *map_type_str(const struct bpf_map *map)  { -	const struct bpf_map_def *def; +	int type; -	def = bpf_map__def(map); -	if (IS_ERR(def)) +	if (!map)  		return "invalid"; +	type = bpf_map__type(map); -	switch (def->type) { +	switch (type) {  	case BPF_MAP_TYPE_SOCKMAP:  		return "sockmap";  	case BPF_MAP_TYPE_SOCKHASH: diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 4b937e5dbaca..30a99d2ed5c6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -173,11 +173,11 @@ static int getsetsockopt(void)  	}  	memset(&buf, 0, sizeof(buf)); -	buf.zc.address = 12345; /* rejected by BPF */ +	buf.zc.address = 12345; /* Not page aligned. Rejected by tcp_zerocopy_receive() */  	optlen = sizeof(buf.zc);  	errno = 0;  	err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen); -	if (errno != EPERM) { +	if (errno != EINVAL) {  		log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",  			err, errno);  		goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 6307f5d2b417..8e329eaee6d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -4,14 +4,16 @@  static void *spin_lock_thread(void *arg)  { -	__u32 duration, retval;  	int err, prog_fd = *(u32 *) arg; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 10000, +	); -	err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "", -	      "err %d errno %d retval %d duration %d\n", -	      err, errno, retval, duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_OK(topts.retval, "test_run retval");  	pthread_exit(arg);  } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index 0a91d8d9954b..f45a1d7b0a28 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -42,7 +42,7 @@ retry:  		return;  	/* override program type */ -	bpf_program__set_perf_event(skel->progs.oncpu); +	bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);  	err = test_stacktrace_build_id__load(skel);  	if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c new file mode 100644 index 000000000000..1932b1e0685c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "stacktrace_map_skip.skel.h" + +#define TEST_STACK_DEPTH  2 + +void test_stacktrace_map_skip(void) +{ +	struct stacktrace_map_skip *skel; +	int stackid_hmap_fd, stackmap_fd, stack_amap_fd; +	int err, stack_trace_len; + +	skel = stacktrace_map_skip__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) +		return; + +	/* find map fds */ +	stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); +	if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd")) +		goto out; + +	stackmap_fd = bpf_map__fd(skel->maps.stackmap); +	if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd")) +		goto out; + +	stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); +	if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd")) +		goto out; + +	skel->bss->pid = getpid(); + +	err = stacktrace_map_skip__attach(skel); +	if (!ASSERT_OK(err, "skel_attach")) +		goto out; + +	/* give some time for bpf program run */ +	sleep(1); + +	/* disable stack trace collection */ +	skel->bss->control = 1; + +	/* for every element in stackid_hmap, we can find a corresponding one +	 * in stackmap, and vise versa. +	 */ +	err = compare_map_keys(stackid_hmap_fd, stackmap_fd); +	if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap")) +		goto out; + +	err = compare_map_keys(stackmap_fd, stackid_hmap_fd); +	if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap")) +		goto out; + +	stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64); +	err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); +	if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap")) +		goto out; + +	if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed")) +		goto out; + +out: +	stacktrace_map_skip__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c index 3f3d2ac4dd57..903f35a9e62e 100644 --- a/tools/testing/selftests/bpf/prog_tests/subprogs.c +++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c @@ -1,32 +1,83 @@  // SPDX-License-Identifier: GPL-2.0  /* Copyright (c) 2020 Facebook */  #include <test_progs.h> -#include <time.h>  #include "test_subprogs.skel.h"  #include "test_subprogs_unused.skel.h" -static int duration; +struct toggler_ctx { +	int fd; +	bool stop; +}; -void test_subprogs(void) +static void *toggle_jit_harden(void *arg) +{ +	struct toggler_ctx *ctx = arg; +	char two = '2'; +	char zero = '0'; + +	while (!ctx->stop) { +		lseek(ctx->fd, SEEK_SET, 0); +		write(ctx->fd, &two, sizeof(two)); +		lseek(ctx->fd, SEEK_SET, 0); +		write(ctx->fd, &zero, sizeof(zero)); +	} + +	return NULL; +} + +static void test_subprogs_with_jit_harden_toggling(void) +{ +	struct toggler_ctx ctx; +	pthread_t toggler; +	int err; +	unsigned int i, loop = 10; + +	ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR); +	if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden")) +		return; + +	ctx.stop = false; +	err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx); +	if (!ASSERT_OK(err, "new toggler")) +		goto out; + +	/* Make toggler thread to run */ +	usleep(1); + +	for (i = 0; i < loop; i++) { +		struct test_subprogs *skel = test_subprogs__open_and_load(); + +		if (!ASSERT_OK_PTR(skel, "skel open")) +			break; +		test_subprogs__destroy(skel); +	} + +	ctx.stop = true; +	pthread_join(toggler, NULL); +out: +	close(ctx.fd); +} + +static void test_subprogs_alone(void)  {  	struct test_subprogs *skel;  	struct test_subprogs_unused *skel2;  	int err;  	skel = test_subprogs__open_and_load(); -	if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) +	if (!ASSERT_OK_PTR(skel, "skel_open"))  		return;  	err = test_subprogs__attach(skel); -	if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err)) +	if (!ASSERT_OK(err, "skel attach"))  		goto cleanup;  	usleep(1); -	CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12); -	CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17); -	CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19); -	CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36); +	ASSERT_EQ(skel->bss->res1, 12, "res1"); +	ASSERT_EQ(skel->bss->res2, 17, "res2"); +	ASSERT_EQ(skel->bss->res3, 19, "res3"); +	ASSERT_EQ(skel->bss->res4, 36, "res4");  	skel2 = test_subprogs_unused__open_and_load();  	ASSERT_OK_PTR(skel2, "unused_progs_skel"); @@ -35,3 +86,11 @@ void test_subprogs(void)  cleanup:  	test_subprogs__destroy(skel);  } + +void test_subprogs(void) +{ +	if (test__start_subtest("subprogs_alone")) +		test_subprogs_alone(); +	if (test__start_subtest("subprogs_and_jit_harden")) +		test_subprogs_with_jit_harden_toggling(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c new file mode 100644 index 000000000000..9c31b7004f9c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "test_subskeleton.skel.h" +#include "test_subskeleton_lib.subskel.h" + +static void subskeleton_lib_setup(struct bpf_object *obj) +{ +	struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + +	if (!ASSERT_OK_PTR(lib, "open subskeleton")) +		return; + +	*lib->rodata.var1 = 1; +	*lib->data.var2 = 2; +	lib->bss.var3->var3_1 = 3; +	lib->bss.var3->var3_2 = 4; + +	test_subskeleton_lib__destroy(lib); +} + +static int subskeleton_lib_subresult(struct bpf_object *obj) +{ +	struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); +	int result; + +	if (!ASSERT_OK_PTR(lib, "open subskeleton")) +		return -EINVAL; + +	result = *lib->bss.libout1; +	ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult"); + +	ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler"); +	ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler), +		     "lib_perf_handler", "program name"); + +	ASSERT_OK_PTR(lib->maps.map1, "map1"); +	ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name"); + +	ASSERT_EQ(*lib->data.var5, 5, "__weak var5"); +	ASSERT_EQ(*lib->data.var6, 6, "extern var6"); +	ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL"); + +	test_subskeleton_lib__destroy(lib); +	return result; +} + +void test_subskeleton(void) +{ +	int err, result; +	struct test_subskeleton *skel; + +	skel = test_subskeleton__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; + +	skel->rodata->rovar1 = 10; +	skel->rodata->var1 = 1; +	subskeleton_lib_setup(skel->obj); + +	err = test_subskeleton__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	err = test_subskeleton__attach(skel); +	if (!ASSERT_OK(err, "skel_attach")) +		goto cleanup; + +	/* trigger tracepoint */ +	usleep(1); + +	result = subskeleton_lib_subresult(skel->obj) * 10; +	ASSERT_EQ(skel->bss->out1, result, "unexpected calculation"); + +cleanup: +	test_subskeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c index 81e997a69f7a..f4d40001155a 100644 --- a/tools/testing/selftests/bpf/prog_tests/syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/syscall.c @@ -20,20 +20,20 @@ void test_syscall(void)  		.log_buf = (uintptr_t) verifier_log,  		.log_size = sizeof(verifier_log),  	}; -	struct bpf_prog_test_run_attr tattr = { +	LIBBPF_OPTS(bpf_test_run_opts, tattr,  		.ctx_in = &ctx,  		.ctx_size_in = sizeof(ctx), -	}; +	);  	struct syscall *skel = NULL;  	__u64 key = 12, value = 0; -	int err; +	int err, prog_fd;  	skel = syscall__open_and_load();  	if (!ASSERT_OK_PTR(skel, "skel_load"))  		goto cleanup; -	tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog); -	err = bpf_prog_test_run_xattr(&tattr); +	prog_fd = bpf_program__fd(skel->progs.bpf_prog); +	err = bpf_prog_test_run_opts(prog_fd, &tattr);  	ASSERT_EQ(err, 0, "err");  	ASSERT_EQ(tattr.retval, 1, "retval");  	ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd"); diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 5dc0f425bd11..c4da87ec3ba4 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -12,9 +12,13 @@ static void test_tailcall_1(void)  	struct bpf_map *prog_array;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char prog_name[32];  	char buff[128] = {}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,  			    &prog_fd); @@ -37,7 +41,7 @@ static void test_tailcall_1(void)  	if (CHECK_FAIL(map_fd < 0))  		goto out; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -53,23 +57,21 @@ static void test_tailcall_1(void)  			goto out;  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != i, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) { +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, i, "tailcall retval");  		err = bpf_map_delete_elem(map_fd, &i);  		if (CHECK_FAIL(err))  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 3, "tailcall retval"); -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -85,13 +87,12 @@ static void test_tailcall_1(void)  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_OK(topts.retval, "tailcall retval"); -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { -		j = bpf_map__def(prog_array)->max_entries - 1 - i; +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) { +		j = bpf_map__max_entries(prog_array) - 1 - i;  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -107,33 +108,30 @@ static void test_tailcall_1(void)  			goto out;  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { -		j = bpf_map__def(prog_array)->max_entries - 1 - i; +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) { +		j = bpf_map__max_entries(prog_array) - 1 - i; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != j, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, j, "tailcall retval");  		err = bpf_map_delete_elem(map_fd, &i);  		if (CHECK_FAIL(err))  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 3, "tailcall retval"); -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		err = bpf_map_delete_elem(map_fd, &i);  		if (CHECK_FAIL(err >= 0 || errno != ENOENT))  			goto out; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != 3, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, 3, "tailcall retval");  	}  out: @@ -150,9 +148,13 @@ static void test_tailcall_2(void)  	struct bpf_map *prog_array;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char prog_name[32];  	char buff[128] = {}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,  			    &prog_fd); @@ -175,7 +177,7 @@ static void test_tailcall_2(void)  	if (CHECK_FAIL(map_fd < 0))  		goto out; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -191,30 +193,27 @@ static void test_tailcall_2(void)  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 2, "tailcall retval");  	i = 2;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 1, "tailcall retval");  	i = 0;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 3, "tailcall retval");  out:  	bpf_object__close(obj);  } @@ -225,8 +224,12 @@ static void test_tailcall_count(const char *which)  	struct bpf_map *prog_array, *data_map;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char buff[128] = {}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,  			    &prog_fd); @@ -262,10 +265,9 @@ static void test_tailcall_count(const char *which)  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 1, "tailcall retval");  	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");  	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) @@ -277,18 +279,17 @@ static void test_tailcall_count(const char *which)  	i = 0;  	err = bpf_map_lookup_elem(data_fd, &i, &val); -	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", -	      err, errno, val); +	ASSERT_OK(err, "tailcall count"); +	ASSERT_EQ(val, 33, "tailcall count");  	i = 0;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_OK(topts.retval, "tailcall retval");  out:  	bpf_object__close(obj);  } @@ -319,10 +320,14 @@ static void test_tailcall_4(void)  	struct bpf_map *prog_array, *data_map;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	static const int zero = 0;  	char buff[128] = {};  	char prog_name[32]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,  			    &prog_fd); @@ -353,7 +358,7 @@ static void test_tailcall_4(void)  	if (CHECK_FAIL(map_fd < 0))  		return; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -369,18 +374,17 @@ static void test_tailcall_4(void)  			goto out;  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);  		if (CHECK_FAIL(err))  			goto out; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != i, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, i, "tailcall retval");  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);  		if (CHECK_FAIL(err))  			goto out; @@ -389,10 +393,9 @@ static void test_tailcall_4(void)  		if (CHECK_FAIL(err))  			goto out; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != 3, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, 3, "tailcall retval");  	}  out:  	bpf_object__close(obj); @@ -407,10 +410,14 @@ static void test_tailcall_5(void)  	struct bpf_map *prog_array, *data_map;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	static const int zero = 0;  	char buff[128] = {};  	char prog_name[32]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,  			    &prog_fd); @@ -441,7 +448,7 @@ static void test_tailcall_5(void)  	if (CHECK_FAIL(map_fd < 0))  		return; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -457,18 +464,17 @@ static void test_tailcall_5(void)  			goto out;  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);  		if (CHECK_FAIL(err))  			goto out; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != i, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, i, "tailcall retval");  	} -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);  		if (CHECK_FAIL(err))  			goto out; @@ -477,10 +483,9 @@ static void test_tailcall_5(void)  		if (CHECK_FAIL(err))  			goto out; -		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -					&duration, &retval, NULL); -		CHECK(err || retval != 3, "tailcall", -		      "err %d errno %d retval %d\n", err, errno, retval); +		err = bpf_prog_test_run_opts(main_fd, &topts); +		ASSERT_OK(err, "tailcall"); +		ASSERT_EQ(topts.retval, 3, "tailcall retval");  	}  out:  	bpf_object__close(obj); @@ -495,8 +500,12 @@ static void test_tailcall_bpf2bpf_1(void)  	struct bpf_map *prog_array;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char prog_name[32]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,  			    &obj, &prog_fd); @@ -520,7 +529,7 @@ static void test_tailcall_bpf2bpf_1(void)  		goto out;  	/* nop -> jmp */ -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -536,10 +545,9 @@ static void test_tailcall_bpf2bpf_1(void)  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				0, &retval, &duration); -	CHECK(err || retval != 1, "tailcall", -	      "err %d errno %d retval %d\n", err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 1, "tailcall retval");  	/* jmp -> nop, call subprog that will do tailcall */  	i = 1; @@ -547,10 +555,9 @@ static void test_tailcall_bpf2bpf_1(void)  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				0, &retval, &duration); -	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_OK(topts.retval, "tailcall retval");  	/* make sure that subprog can access ctx and entry prog that  	 * called this subprog can properly return @@ -560,11 +567,9 @@ static void test_tailcall_bpf2bpf_1(void)  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				0, &retval, &duration); -	CHECK(err || retval != sizeof(pkt_v4) * 2, -	      "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");  out:  	bpf_object__close(obj);  } @@ -579,8 +584,12 @@ static void test_tailcall_bpf2bpf_2(void)  	struct bpf_map *prog_array, *data_map;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char buff[128] = {}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = buff, +		.data_size_in = sizeof(buff), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,  			    &obj, &prog_fd); @@ -616,10 +625,9 @@ static void test_tailcall_bpf2bpf_2(void)  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, 1, "tailcall retval");  	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");  	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) @@ -631,18 +639,17 @@ static void test_tailcall_bpf2bpf_2(void)  	i = 0;  	err = bpf_map_lookup_elem(data_fd, &i, &val); -	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", -	      err, errno, val); +	ASSERT_OK(err, "tailcall count"); +	ASSERT_EQ(val, 33, "tailcall count");  	i = 0;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_OK(topts.retval, "tailcall retval");  out:  	bpf_object__close(obj);  } @@ -657,8 +664,12 @@ static void test_tailcall_bpf2bpf_3(void)  	struct bpf_map *prog_array;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char prog_name[32]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,  			    &obj, &prog_fd); @@ -681,7 +692,7 @@ static void test_tailcall_bpf2bpf_3(void)  	if (CHECK_FAIL(map_fd < 0))  		goto out; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -697,33 +708,27 @@ static void test_tailcall_bpf2bpf_3(void)  			goto out;  	} -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != sizeof(pkt_v4) * 3, -	      "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");  	i = 1;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != sizeof(pkt_v4), -	      "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");  	i = 0;  	err = bpf_map_delete_elem(map_fd, &i);  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != sizeof(pkt_v4) * 2, -	      "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");  out:  	bpf_object__close(obj);  } @@ -754,8 +759,12 @@ static void test_tailcall_bpf2bpf_4(bool noise)  	struct bpf_map *prog_array, *data_map;  	struct bpf_program *prog;  	struct bpf_object *obj; -	__u32 retval, duration;  	char prog_name[32]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,  			    &obj, &prog_fd); @@ -778,7 +787,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)  	if (CHECK_FAIL(map_fd < 0))  		goto out; -	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { +	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {  		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);  		prog = bpf_object__find_program_by_name(obj, prog_name); @@ -809,15 +818,14 @@ static void test_tailcall_bpf2bpf_4(bool noise)  	if (CHECK_FAIL(err))  		goto out; -	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, -				&duration, &retval, NULL); -	CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n", -	      err, errno, retval); +	err = bpf_prog_test_run_opts(main_fd, &topts); +	ASSERT_OK(err, "tailcall"); +	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");  	i = 0;  	err = bpf_map_lookup_elem(data_fd, &i, &val); -	CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n", -	      err, errno, val.count); +	ASSERT_OK(err, "tailcall count"); +	ASSERT_EQ(val.count, 31, "tailcall count");  out:  	bpf_object__close(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c index 37c20b5ffa70..61935e7e056a 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c +++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c @@ -3,18 +3,22 @@  #include <test_progs.h>  #include "test_task_pt_regs.skel.h" +/* uprobe attach point */ +static void trigger_func(void) +{ +	asm volatile (""); +} +  void test_task_pt_regs(void)  {  	struct test_task_pt_regs *skel;  	struct bpf_link *uprobe_link; -	size_t uprobe_offset; -	ssize_t base_addr; +	ssize_t uprobe_offset;  	bool match; -	base_addr = get_base_addr(); -	if (!ASSERT_GT(base_addr, 0, "get_base_addr")) +	uprobe_offset = get_uprobe_offset(&trigger_func); +	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))  		return; -	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr);  	skel = test_task_pt_regs__open_and_load();  	if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -32,7 +36,7 @@ void test_task_pt_regs(void)  	skel->links.handle_uprobe = uprobe_link;  	/* trigger & validate uprobe */ -	get_base_addr(); +	trigger_func();  	if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res"))  		goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index c2426df58e17..7ad66a247c02 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -10,17 +10,15 @@   * to drop unexpected traffic.   */ -#define _GNU_SOURCE -  #include <arpa/inet.h>  #include <linux/if.h>  #include <linux/if_tun.h>  #include <linux/limits.h>  #include <linux/sysctl.h> -#include <sched.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h>  #include <stdbool.h>  #include <stdio.h> -#include <sys/mount.h>  #include <sys/stat.h>  #include <unistd.h> @@ -29,6 +27,11 @@  #include "test_tc_neigh_fib.skel.h"  #include "test_tc_neigh.skel.h"  #include "test_tc_peer.skel.h" +#include "test_tc_dtime.skel.h" + +#ifndef TCP_TX_DELAY +#define TCP_TX_DELAY 37 +#endif  #define NS_SRC "ns_src"  #define NS_FWD "ns_fwd" @@ -61,6 +64,7 @@  #define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk"  #define TIMEOUT_MILLIS 10000 +#define NSEC_PER_SEC 1000000000ULL  #define log_err(MSG, ...) \  	fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ @@ -84,91 +88,6 @@ static int write_file(const char *path, const char *newval)  	return 0;  } -struct nstoken { -	int orig_netns_fd; -}; - -static int setns_by_fd(int nsfd) -{ -	int err; - -	err = setns(nsfd, CLONE_NEWNET); -	close(nsfd); - -	if (!ASSERT_OK(err, "setns")) -		return err; - -	/* Switch /sys to the new namespace so that e.g. /sys/class/net -	 * reflects the devices in the new namespace. -	 */ -	err = unshare(CLONE_NEWNS); -	if (!ASSERT_OK(err, "unshare")) -		return err; - -	/* Make our /sys mount private, so the following umount won't -	 * trigger the global umount in case it's shared. -	 */ -	err = mount("none", "/sys", NULL, MS_PRIVATE, NULL); -	if (!ASSERT_OK(err, "remount private /sys")) -		return err; - -	err = umount2("/sys", MNT_DETACH); -	if (!ASSERT_OK(err, "umount2 /sys")) -		return err; - -	err = mount("sysfs", "/sys", "sysfs", 0, NULL); -	if (!ASSERT_OK(err, "mount /sys")) -		return err; - -	err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL); -	if (!ASSERT_OK(err, "mount /sys/fs/bpf")) -		return err; - -	return 0; -} - -/** - * open_netns() - Switch to specified network namespace by name. - * - * Returns token with which to restore the original namespace - * using close_netns(). - */ -static struct nstoken *open_netns(const char *name) -{ -	int nsfd; -	char nspath[PATH_MAX]; -	int err; -	struct nstoken *token; - -	token = malloc(sizeof(struct nstoken)); -	if (!ASSERT_OK_PTR(token, "malloc token")) -		return NULL; - -	token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY); -	if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net")) -		goto fail; - -	snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); -	nsfd = open(nspath, O_RDONLY | O_CLOEXEC); -	if (!ASSERT_GE(nsfd, 0, "open netns fd")) -		goto fail; - -	err = setns_by_fd(nsfd); -	if (!ASSERT_OK(err, "setns_by_fd")) -		goto fail; - -	return token; -fail: -	free(token); -	return NULL; -} - -static void close_netns(struct nstoken *token) -{ -	ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd"); -	free(token); -} -  static int netns_setup_namespaces(const char *verb)  {  	const char * const *ns = namespaces; @@ -440,6 +359,431 @@ static int set_forwarding(bool enable)  	return 0;  } +static void rcv_tstamp(int fd, const char *expected, size_t s) +{ +	struct __kernel_timespec pkt_ts = {}; +	char ctl[CMSG_SPACE(sizeof(pkt_ts))]; +	struct timespec now_ts; +	struct msghdr msg = {}; +	__u64 now_ns, pkt_ns; +	struct cmsghdr *cmsg; +	struct iovec iov; +	char data[32]; +	int ret; + +	iov.iov_base = data; +	iov.iov_len = sizeof(data); +	msg.msg_iov = &iov; +	msg.msg_iovlen = 1; +	msg.msg_control = &ctl; +	msg.msg_controllen = sizeof(ctl); + +	ret = recvmsg(fd, &msg, 0); +	if (!ASSERT_EQ(ret, s, "recvmsg")) +		return; +	ASSERT_STRNEQ(data, expected, s, "expected rcv data"); + +	cmsg = CMSG_FIRSTHDR(&msg); +	if (cmsg && cmsg->cmsg_level == SOL_SOCKET && +	    cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) +		memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); + +	pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; +	ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp"); + +	ret = clock_gettime(CLOCK_REALTIME, &now_ts); +	ASSERT_OK(ret, "clock_gettime"); +	now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + +	if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp")) +		ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC, +			  "check rcv tstamp"); +} + +static void snd_tstamp(int fd, char *b, size_t s) +{ +	struct sock_txtime opt = { .clockid = CLOCK_TAI }; +	char ctl[CMSG_SPACE(sizeof(__u64))]; +	struct timespec now_ts; +	struct msghdr msg = {}; +	struct cmsghdr *cmsg; +	struct iovec iov; +	__u64 now_ns; +	int ret; + +	ret = clock_gettime(CLOCK_TAI, &now_ts); +	ASSERT_OK(ret, "clock_get_time(CLOCK_TAI)"); +	now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + +	iov.iov_base = b; +	iov.iov_len = s; +	msg.msg_iov = &iov; +	msg.msg_iovlen = 1; +	msg.msg_control = &ctl; +	msg.msg_controllen = sizeof(ctl); + +	cmsg = CMSG_FIRSTHDR(&msg); +	cmsg->cmsg_level = SOL_SOCKET; +	cmsg->cmsg_type = SCM_TXTIME; +	cmsg->cmsg_len = CMSG_LEN(sizeof(now_ns)); +	*(__u64 *)CMSG_DATA(cmsg) = now_ns; + +	ret = setsockopt(fd, SOL_SOCKET, SO_TXTIME, &opt, sizeof(opt)); +	ASSERT_OK(ret, "setsockopt(SO_TXTIME)"); + +	ret = sendmsg(fd, &msg, 0); +	ASSERT_EQ(ret, s, "sendmsg"); +} + +static void test_inet_dtime(int family, int type, const char *addr, __u16 port) +{ +	int opt = 1, accept_fd = -1, client_fd = -1, listen_fd, err; +	char buf[] = "testing testing"; +	struct nstoken *nstoken; + +	nstoken = open_netns(NS_DST); +	if (!ASSERT_OK_PTR(nstoken, "setns dst")) +		return; +	listen_fd = start_server(family, type, addr, port, 0); +	close_netns(nstoken); + +	if (!ASSERT_GE(listen_fd, 0, "listen")) +		return; + +	/* Ensure the kernel puts the (rcv) timestamp for all skb */ +	err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, +			 &opt, sizeof(opt)); +	if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) +		goto done; + +	if (type == SOCK_STREAM) { +		/* Ensure the kernel set EDT when sending out rst/ack +		 * from the kernel's ctl_sk. +		 */ +		err = setsockopt(listen_fd, SOL_TCP, TCP_TX_DELAY, &opt, +				 sizeof(opt)); +		if (!ASSERT_OK(err, "setsockopt(TCP_TX_DELAY)")) +			goto done; +	} + +	nstoken = open_netns(NS_SRC); +	if (!ASSERT_OK_PTR(nstoken, "setns src")) +		goto done; +	client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS); +	close_netns(nstoken); + +	if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) +		goto done; + +	if (type == SOCK_STREAM) { +		int n; + +		accept_fd = accept(listen_fd, NULL, NULL); +		if (!ASSERT_GE(accept_fd, 0, "accept")) +			goto done; + +		n = write(client_fd, buf, sizeof(buf)); +		if (!ASSERT_EQ(n, sizeof(buf), "send to server")) +			goto done; +		rcv_tstamp(accept_fd, buf, sizeof(buf)); +	} else { +		snd_tstamp(client_fd, buf, sizeof(buf)); +		rcv_tstamp(listen_fd, buf, sizeof(buf)); +	} + +done: +	close(listen_fd); +	if (accept_fd != -1) +		close(accept_fd); +	if (client_fd != -1) +		close(client_fd); +} + +static int netns_load_dtime_bpf(struct test_tc_dtime *skel) +{ +	struct nstoken *nstoken; + +#define PIN_FNAME(__file) "/sys/fs/bpf/" #__file +#define PIN(__prog) ({							\ +		int err = bpf_program__pin(skel->progs.__prog, PIN_FNAME(__prog)); \ +		if (!ASSERT_OK(err, "pin " #__prog))		\ +			goto fail;					\ +		}) + +	/* setup ns_src tc progs */ +	nstoken = open_netns(NS_SRC); +	if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) +		return -1; +	PIN(egress_host); +	PIN(ingress_host); +	SYS("tc qdisc add dev veth_src clsact"); +	SYS("tc filter add dev veth_src ingress bpf da object-pinned " +	    PIN_FNAME(ingress_host)); +	SYS("tc filter add dev veth_src egress bpf da object-pinned " +	    PIN_FNAME(egress_host)); +	close_netns(nstoken); + +	/* setup ns_dst tc progs */ +	nstoken = open_netns(NS_DST); +	if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST)) +		return -1; +	PIN(egress_host); +	PIN(ingress_host); +	SYS("tc qdisc add dev veth_dst clsact"); +	SYS("tc filter add dev veth_dst ingress bpf da object-pinned " +	    PIN_FNAME(ingress_host)); +	SYS("tc filter add dev veth_dst egress bpf da object-pinned " +	    PIN_FNAME(egress_host)); +	close_netns(nstoken); + +	/* setup ns_fwd tc progs */ +	nstoken = open_netns(NS_FWD); +	if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) +		return -1; +	PIN(ingress_fwdns_prio100); +	PIN(egress_fwdns_prio100); +	PIN(ingress_fwdns_prio101); +	PIN(egress_fwdns_prio101); +	SYS("tc qdisc add dev veth_dst_fwd clsact"); +	SYS("tc filter add dev veth_dst_fwd ingress prio 100 bpf da object-pinned " +	    PIN_FNAME(ingress_fwdns_prio100)); +	SYS("tc filter add dev veth_dst_fwd ingress prio 101 bpf da object-pinned " +	    PIN_FNAME(ingress_fwdns_prio101)); +	SYS("tc filter add dev veth_dst_fwd egress prio 100 bpf da object-pinned " +	    PIN_FNAME(egress_fwdns_prio100)); +	SYS("tc filter add dev veth_dst_fwd egress prio 101 bpf da object-pinned " +	    PIN_FNAME(egress_fwdns_prio101)); +	SYS("tc qdisc add dev veth_src_fwd clsact"); +	SYS("tc filter add dev veth_src_fwd ingress prio 100 bpf da object-pinned " +	    PIN_FNAME(ingress_fwdns_prio100)); +	SYS("tc filter add dev veth_src_fwd ingress prio 101 bpf da object-pinned " +	    PIN_FNAME(ingress_fwdns_prio101)); +	SYS("tc filter add dev veth_src_fwd egress prio 100 bpf da object-pinned " +	    PIN_FNAME(egress_fwdns_prio100)); +	SYS("tc filter add dev veth_src_fwd egress prio 101 bpf da object-pinned " +	    PIN_FNAME(egress_fwdns_prio101)); +	close_netns(nstoken); + +#undef PIN + +	return 0; + +fail: +	close_netns(nstoken); +	return -1; +} + +enum { +	INGRESS_FWDNS_P100, +	INGRESS_FWDNS_P101, +	EGRESS_FWDNS_P100, +	EGRESS_FWDNS_P101, +	INGRESS_ENDHOST, +	EGRESS_ENDHOST, +	SET_DTIME, +	__MAX_CNT, +}; + +const char *cnt_names[] = { +	"ingress_fwdns_p100", +	"ingress_fwdns_p101", +	"egress_fwdns_p100", +	"egress_fwdns_p101", +	"ingress_endhost", +	"egress_endhost", +	"set_dtime", +}; + +enum { +	TCP_IP6_CLEAR_DTIME, +	TCP_IP4, +	TCP_IP6, +	UDP_IP4, +	UDP_IP6, +	TCP_IP4_RT_FWD, +	TCP_IP6_RT_FWD, +	UDP_IP4_RT_FWD, +	UDP_IP6_RT_FWD, +	UKN_TEST, +	__NR_TESTS, +}; + +const char *test_names[] = { +	"tcp ip6 clear dtime", +	"tcp ip4", +	"tcp ip6", +	"udp ip4", +	"udp ip6", +	"tcp ip4 rt fwd", +	"tcp ip6 rt fwd", +	"udp ip4 rt fwd", +	"udp ip6 rt fwd", +}; + +static const char *dtime_cnt_str(int test, int cnt) +{ +	static char name[64]; + +	snprintf(name, sizeof(name), "%s %s", test_names[test], cnt_names[cnt]); + +	return name; +} + +static const char *dtime_err_str(int test, int cnt) +{ +	static char name[64]; + +	snprintf(name, sizeof(name), "%s %s errs", test_names[test], +		 cnt_names[cnt]); + +	return name; +} + +static void test_tcp_clear_dtime(struct test_tc_dtime *skel) +{ +	int i, t = TCP_IP6_CLEAR_DTIME; +	__u32 *dtimes = skel->bss->dtimes[t]; +	__u32 *errs = skel->bss->errs[t]; + +	skel->bss->test = t; +	test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 0); + +	ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, +		  dtime_cnt_str(t, INGRESS_FWDNS_P100)); +	ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, +		  dtime_cnt_str(t, INGRESS_FWDNS_P101)); +	ASSERT_GT(dtimes[EGRESS_FWDNS_P100], 0, +		  dtime_cnt_str(t, EGRESS_FWDNS_P100)); +	ASSERT_EQ(dtimes[EGRESS_FWDNS_P101], 0, +		  dtime_cnt_str(t, EGRESS_FWDNS_P101)); +	ASSERT_GT(dtimes[EGRESS_ENDHOST], 0, +		  dtime_cnt_str(t, EGRESS_ENDHOST)); +	ASSERT_GT(dtimes[INGRESS_ENDHOST], 0, +		  dtime_cnt_str(t, INGRESS_ENDHOST)); + +	for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) +		ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ +	__u32 *dtimes, *errs; +	const char *addr; +	int i, t; + +	if (family == AF_INET) { +		t = bpf_fwd ? TCP_IP4 : TCP_IP4_RT_FWD; +		addr = IP4_DST; +	} else { +		t = bpf_fwd ? TCP_IP6 : TCP_IP6_RT_FWD; +		addr = IP6_DST; +	} + +	dtimes = skel->bss->dtimes[t]; +	errs = skel->bss->errs[t]; + +	skel->bss->test = t; +	test_inet_dtime(family, SOCK_STREAM, addr, 0); + +	/* fwdns_prio100 prog does not read delivery_time_type, so +	 * kernel puts the (rcv) timetamp in __sk_buff->tstamp +	 */ +	ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, +		  dtime_cnt_str(t, INGRESS_FWDNS_P100)); +	for (i = INGRESS_FWDNS_P101; i < SET_DTIME; i++) +		ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + +	for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) +		ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ +	__u32 *dtimes, *errs; +	const char *addr; +	int i, t; + +	if (family == AF_INET) { +		t = bpf_fwd ? UDP_IP4 : UDP_IP4_RT_FWD; +		addr = IP4_DST; +	} else { +		t = bpf_fwd ? UDP_IP6 : UDP_IP6_RT_FWD; +		addr = IP6_DST; +	} + +	dtimes = skel->bss->dtimes[t]; +	errs = skel->bss->errs[t]; + +	skel->bss->test = t; +	test_inet_dtime(family, SOCK_DGRAM, addr, 0); + +	ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, +		  dtime_cnt_str(t, INGRESS_FWDNS_P100)); +	/* non mono delivery time is not forwarded */ +	ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, +		  dtime_cnt_str(t, INGRESS_FWDNS_P100)); +	for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++) +		ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + +	for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) +		ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) +{ +	struct test_tc_dtime *skel; +	struct nstoken *nstoken; +	int err; + +	skel = test_tc_dtime__open(); +	if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) +		return; + +	skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; +	skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + +	err = test_tc_dtime__load(skel); +	if (!ASSERT_OK(err, "test_tc_dtime__load")) +		goto done; + +	if (netns_load_dtime_bpf(skel)) +		goto done; + +	nstoken = open_netns(NS_FWD); +	if (!ASSERT_OK_PTR(nstoken, "setns fwd")) +		goto done; +	err = set_forwarding(false); +	close_netns(nstoken); +	if (!ASSERT_OK(err, "disable forwarding")) +		goto done; + +	test_tcp_clear_dtime(skel); + +	test_tcp_dtime(skel, AF_INET, true); +	test_tcp_dtime(skel, AF_INET6, true); +	test_udp_dtime(skel, AF_INET, true); +	test_udp_dtime(skel, AF_INET6, true); + +	/* Test the kernel ip[6]_forward path instead +	 * of bpf_redirect_neigh(). +	 */ +	nstoken = open_netns(NS_FWD); +	if (!ASSERT_OK_PTR(nstoken, "setns fwd")) +		goto done; +	err = set_forwarding(true); +	close_netns(nstoken); +	if (!ASSERT_OK(err, "enable forwarding")) +		goto done; + +	test_tcp_dtime(skel, AF_INET, false); +	test_tcp_dtime(skel, AF_INET6, false); +	test_udp_dtime(skel, AF_INET, false); +	test_udp_dtime(skel, AF_INET6, false); + +done: +	test_tc_dtime__destroy(skel); +} +  static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)  {  	struct nstoken *nstoken = NULL; @@ -787,6 +1131,7 @@ static void *test_tc_redirect_run_tests(void *arg)  	RUN_TEST(tc_redirect_peer_l3);  	RUN_TEST(tc_redirect_neigh);  	RUN_TEST(tc_redirect_neigh_fib); +	RUN_TEST(tc_redirect_dtime);  	return NULL;  } diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c new file mode 100644 index 000000000000..c381faaae741 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2022 Sony Group Corporation */ +#include <sys/prctl.h> +#include <test_progs.h> +#include "bpf_syscall_macro.skel.h" + +void test_bpf_syscall_macro(void) +{ +	struct bpf_syscall_macro *skel = NULL; +	int err; +	int exp_arg1 = 1001; +	unsigned long exp_arg2 = 12; +	unsigned long exp_arg3 = 13; +	unsigned long exp_arg4 = 14; +	unsigned long exp_arg5 = 15; + +	/* check whether it can open program */ +	skel = bpf_syscall_macro__open(); +	if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open")) +		return; + +	skel->rodata->filter_pid = getpid(); + +	/* check whether it can load program */ +	err = bpf_syscall_macro__load(skel); +	if (!ASSERT_OK(err, "bpf_syscall_macro__load")) +		goto cleanup; + +	/* check whether it can attach kprobe */ +	err = bpf_syscall_macro__attach(skel); +	if (!ASSERT_OK(err, "bpf_syscall_macro__attach")) +		goto cleanup; + +	/* check whether args of syscall are copied correctly */ +	prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5); +#if defined(__aarch64__) || defined(__s390__) +	ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#else +	ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#endif +	ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); +	ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); +	/* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */ +#ifdef __x86_64__ +	ASSERT_NEQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#else +	ASSERT_EQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#endif +	ASSERT_EQ(skel->bss->arg4, exp_arg4, "syscall_arg4"); +	ASSERT_EQ(skel->bss->arg5, exp_arg5, "syscall_arg5"); + +	/* check whether args of syscall are copied correctly for CORE variants */ +	ASSERT_EQ(skel->bss->arg1_core, exp_arg1, "syscall_arg1_core_variant"); +	ASSERT_EQ(skel->bss->arg2_core, exp_arg2, "syscall_arg2_core_variant"); +	ASSERT_EQ(skel->bss->arg3_core, exp_arg3, "syscall_arg3_core_variant"); +	/* it cannot copy arg4 when uses PT_REGS_PARM4_CORE on x86_64 */ +#ifdef __x86_64__ +	ASSERT_NEQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#else +	ASSERT_EQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#endif +	ASSERT_EQ(skel->bss->arg4_core, exp_arg4, "syscall_arg4_core_variant"); +	ASSERT_EQ(skel->bss->arg5_core, exp_arg5, "syscall_arg5_core_variant"); + +	ASSERT_EQ(skel->bss->option_syscall, exp_arg1, "BPF_KPROBE_SYSCALL_option"); +	ASSERT_EQ(skel->bss->arg2_syscall, exp_arg2, "BPF_KPROBE_SYSCALL_arg2"); +	ASSERT_EQ(skel->bss->arg3_syscall, exp_arg3, "BPF_KPROBE_SYSCALL_arg3"); +	ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4"); +	ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5"); + +cleanup: +	bpf_syscall_macro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index 97d8a6f84f4a..b13feceb38f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -13,14 +13,17 @@  #include "ima.skel.h" -static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +#define MAX_SAMPLES 4 + +static int _run_measured_process(const char *measured_dir, u32 *monitored_pid, +				 const char *cmd)  {  	int child_pid, child_status;  	child_pid = fork();  	if (child_pid == 0) {  		*monitored_pid = getpid(); -		execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir, +		execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir,  		       NULL);  		exit(errno); @@ -32,19 +35,39 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid)  	return -EINVAL;  } -static u64 ima_hash_from_bpf; +static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +{ +	return _run_measured_process(measured_dir, monitored_pid, "run"); +} + +static u64 ima_hash_from_bpf[MAX_SAMPLES]; +static int ima_hash_from_bpf_idx;  static int process_sample(void *ctx, void *data, size_t len)  { -	ima_hash_from_bpf = *((u64 *)data); +	if (ima_hash_from_bpf_idx >= MAX_SAMPLES) +		return -ENOSPC; + +	ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data);  	return 0;  } +static void test_init(struct ima__bss *bss) +{ +	ima_hash_from_bpf_idx = 0; + +	bss->use_ima_file_hash = false; +	bss->enable_bprm_creds_for_exec = false; +	bss->enable_kernel_read_file = false; +	bss->test_deny = false; +} +  void test_test_ima(void)  {  	char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";  	struct ring_buffer *ringbuf = NULL;  	const char *measured_dir; +	u64 bin_true_sample;  	char cmd[256];  	int err, duration = 0; @@ -72,13 +95,127 @@ void test_test_ima(void)  	if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))  		goto close_clean; +	/* +	 * Test #1 +	 * - Goal: obtain a sample with the bpf_ima_inode_hash() helper +	 * - Expected result:  1 sample (/bin/true) +	 */ +	test_init(skel->bss);  	err = run_measured_process(measured_dir, &skel->bss->monitored_pid); -	if (CHECK(err, "run_measured_process", "err = %d\n", err)) +	if (CHECK(err, "run_measured_process #1", "err = %d\n", err))  		goto close_clean;  	err = ring_buffer__consume(ringbuf);  	ASSERT_EQ(err, 1, "num_samples_or_err"); -	ASSERT_NEQ(ima_hash_from_bpf, 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + +	/* +	 * Test #2 +	 * - Goal: obtain samples with the bpf_ima_file_hash() helper +	 * - Expected result: 2 samples (./ima_setup.sh, /bin/true) +	 */ +	test_init(skel->bss); +	skel->bss->use_ima_file_hash = true; +	err = run_measured_process(measured_dir, &skel->bss->monitored_pid); +	if (CHECK(err, "run_measured_process #2", "err = %d\n", err)) +		goto close_clean; + +	err = ring_buffer__consume(ringbuf); +	ASSERT_EQ(err, 2, "num_samples_or_err"); +	ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); +	bin_true_sample = ima_hash_from_bpf[1]; + +	/* +	 * Test #3 +	 * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest +	 * - Expected result: 2 samples (/bin/true: non-fresh, fresh) +	 */ +	test_init(skel->bss); + +	err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, +				    "modify-bin"); +	if (CHECK(err, "modify-bin #3", "err = %d\n", err)) +		goto close_clean; + +	skel->bss->enable_bprm_creds_for_exec = true; +	err = run_measured_process(measured_dir, &skel->bss->monitored_pid); +	if (CHECK(err, "run_measured_process #3", "err = %d\n", err)) +		goto close_clean; + +	err = ring_buffer__consume(ringbuf); +	ASSERT_EQ(err, 2, "num_samples_or_err"); +	ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); +	ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); +	/* IMA refreshed the digest. */ +	ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, +		   "sample_different_or_err"); + +	/* +	 * Test #4 +	 * - Goal: verify that bpf_ima_file_hash() returns a fresh digest +	 * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh; +	 *                               /bin/true: fresh, fresh) +	 */ +	test_init(skel->bss); +	skel->bss->use_ima_file_hash = true; +	skel->bss->enable_bprm_creds_for_exec = true; +	err = run_measured_process(measured_dir, &skel->bss->monitored_pid); +	if (CHECK(err, "run_measured_process #4", "err = %d\n", err)) +		goto close_clean; + +	err = ring_buffer__consume(ringbuf); +	ASSERT_EQ(err, 4, "num_samples_or_err"); +	ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample, +		   "sample_different_or_err"); +	ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2], +		  "sample_equal_or_err"); + +	skel->bss->use_ima_file_hash = false; +	skel->bss->enable_bprm_creds_for_exec = false; +	err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, +				    "restore-bin"); +	if (CHECK(err, "restore-bin #3", "err = %d\n", err)) +		goto close_clean; + +	/* +	 * Test #5 +	 * - Goal: obtain a sample from the kernel_read_file hook +	 * - Expected result: 2 samples (./ima_setup.sh, policy_test) +	 */ +	test_init(skel->bss); +	skel->bss->use_ima_file_hash = true; +	skel->bss->enable_kernel_read_file = true; +	err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, +				    "load-policy"); +	if (CHECK(err, "run_measured_process #5", "err = %d\n", err)) +		goto close_clean; + +	err = ring_buffer__consume(ringbuf); +	ASSERT_EQ(err, 2, "num_samples_or_err"); +	ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); +	ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + +	/* +	 * Test #6 +	 * - Goal: ensure that the kernel_read_file hook denies an operation +	 * - Expected result: 0 samples +	 */ +	test_init(skel->bss); +	skel->bss->enable_kernel_read_file = true; +	skel->bss->test_deny = true; +	err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, +				    "load-policy"); +	if (CHECK(!err, "run_measured_process #6", "err = %d\n", err)) +		goto close_clean; + +	err = ring_buffer__consume(ringbuf); +	ASSERT_EQ(err, 0, "num_samples_or_err");  close_clean:  	snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); diff --git a/tools/testing/selftests/bpf/prog_tests/test_profiler.c b/tools/testing/selftests/bpf/prog_tests/test_profiler.c index 4ca275101ee0..de24e8f0e738 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_profiler.c +++ b/tools/testing/selftests/bpf/prog_tests/test_profiler.c @@ -8,20 +8,20 @@  static int sanity_run(struct bpf_program *prog)  { -	struct bpf_prog_test_run_attr test_attr = {}; +	LIBBPF_OPTS(bpf_test_run_opts, test_attr);  	__u64 args[] = {1, 2, 3}; -	__u32 duration = 0;  	int err, prog_fd;  	prog_fd = bpf_program__fd(prog); -	test_attr.prog_fd = prog_fd;  	test_attr.ctx_in = args;  	test_attr.ctx_size_in = sizeof(args); -	err = bpf_prog_test_run_xattr(&test_attr); -	if (CHECK(err || test_attr.retval, "test_run", -		  "err %d errno %d retval %d duration %d\n", -		  err, errno, test_attr.retval, duration)) +	err = bpf_prog_test_run_opts(prog_fd, &test_attr); +	if (!ASSERT_OK(err, "test_run")) +		return -1; + +	if (!ASSERT_OK(test_attr.retval, "test_run retval"))  		return -1; +  	return 0;  } diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c index cf1215531920..ae93411fd582 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c +++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c @@ -6,15 +6,18 @@  static int sanity_run(struct bpf_program *prog)  { -	__u32 duration, retval;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	prog_fd = bpf_program__fd(prog); -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	if (CHECK(err || retval != 123, "test_run", -		  "err %d errno %d retval %d duration %d\n", -		  err, errno, retval, duration)) +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	if (!ASSERT_OK(err, "test_run")) +		return -1; +	if (!ASSERT_EQ(topts.retval, 123, "test_run retval"))  		return -1;  	return 0;  } diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 0f4e49e622cd..7eb049214859 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -6,7 +6,7 @@  static int timer(struct timer *timer_skel)  {  	int err, prog_fd; -	__u32 duration = 0, retval; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	err = timer__attach(timer_skel);  	if (!ASSERT_OK(err, "timer_attach")) @@ -16,10 +16,9 @@ static int timer(struct timer *timer_skel)  	ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1");  	prog_fd = bpf_program__fd(timer_skel->progs.test1); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	timer__detach(timer_skel);  	usleep(50); /* 10 usecs should be enough, but give it extra */ diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c index 949a0617869d..2ee5f5ae11d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -6,19 +6,18 @@  static int timer_mim(struct timer_mim *timer_skel)  { -	__u32 duration = 0, retval;  	__u64 cnt1, cnt2;  	int err, prog_fd, key1 = 1; +	LIBBPF_OPTS(bpf_test_run_opts, topts);  	err = timer_mim__attach(timer_skel);  	if (!ASSERT_OK(err, "timer_attach"))  		return err;  	prog_fd = bpf_program__fd(timer_skel->progs.test1); -	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, -				NULL, NULL, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(retval, 0, "test_run"); +	ASSERT_EQ(topts.retval, 0, "test_run");  	timer_mim__detach(timer_skel);  	/* check that timer_cb[12] are incrementing 'cnt' */ diff --git a/tools/testing/selftests/bpf/prog_tests/trace_ext.c b/tools/testing/selftests/bpf/prog_tests/trace_ext.c index 924441d4362d..aabdff7bea3e 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_ext.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_ext.c @@ -23,8 +23,12 @@ void test_trace_ext(void)  	int err, pkt_fd, ext_fd;  	struct bpf_program *prog;  	char buf[100]; -	__u32 retval;  	__u64 len; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.repeat = 1, +	);  	/* open/load/attach test_pkt_md_access */  	skel_pkt = test_pkt_md_access__open_and_load(); @@ -77,32 +81,32 @@ void test_trace_ext(void)  	/* load/attach tracing */  	err = test_trace_ext_tracing__load(skel_trace); -	if (CHECK(err, "setup", "tracing/test_pkt_md_access_new load failed\n")) { +	if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new load")) {  		libbpf_strerror(err, buf, sizeof(buf));  		fprintf(stderr, "%s\n", buf);  		goto cleanup;  	}  	err = test_trace_ext_tracing__attach(skel_trace); -	if (CHECK(err, "setup", "tracing/test_pkt_md_access_new attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new attach"))  		goto cleanup;  	/* trigger the test */ -	err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), -				NULL, NULL, &retval, &duration); -	CHECK(err || retval, "run", "err %d errno %d retval %d\n", err, errno, retval); +	err = bpf_prog_test_run_opts(pkt_fd, &topts); +	ASSERT_OK(err, "test_run_opts err"); +	ASSERT_OK(topts.retval, "test_run_opts retval");  	bss_ext = skel_ext->bss;  	bss_trace = skel_trace->bss;  	len = bss_ext->ext_called; -	CHECK(bss_ext->ext_called == 0, -		"check", "failed to trigger freplace/test_pkt_md_access\n"); -	CHECK(bss_trace->fentry_called != len, -		"check", "failed to trigger fentry/test_pkt_md_access_new\n"); -	CHECK(bss_trace->fexit_called != len, -		"check", "failed to trigger fexit/test_pkt_md_access_new\n"); +	ASSERT_NEQ(bss_ext->ext_called, 0, +		  "failed to trigger freplace/test_pkt_md_access"); +	ASSERT_EQ(bss_trace->fentry_called, len, +		  "failed to trigger fentry/test_pkt_md_access_new"); +	ASSERT_EQ(bss_trace->fexit_called, len, +		   "failed to trigger fexit/test_pkt_md_access_new");  cleanup:  	test_trace_ext_tracing__destroy(skel_trace); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index ac65456b7ab8..ec21c53cb1da 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -13,8 +13,14 @@ void test_xdp(void)  	char buf[128];  	struct ipv6hdr iph6;  	struct iphdr iph; -	__u32 duration, retval, size;  	int err, prog_fd, map_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = 1, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);  	if (CHECK_FAIL(err)) @@ -26,21 +32,23 @@ void test_xdp(void)  	bpf_map_update_elem(map_fd, &key4, &value4, 0);  	bpf_map_update_elem(map_fd, &key6, &value6, 0); -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); -	CHECK(err || retval != XDP_TX || size != 74 || -	      iph.protocol != IPPROTO_IPIP, "ipv4", -	      "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 74, "ipv4 test_run data_size_out"); +	ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 test_run iph.protocol"); -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), -				buf, &size, &retval, &duration); +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	topts.data_size_out = sizeof(buf); + +	err = bpf_prog_test_run_opts(prog_fd, &topts);  	memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6)); -	CHECK(err || retval != XDP_TX || size != 114 || -	      iph6.nexthdr != IPPROTO_IPV6, "ipv6", -	      "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 114, "ipv6 test_run data_size_out"); +	ASSERT_EQ(iph6.nexthdr, IPPROTO_IPV6, "ipv6 test_run iph6.nexthdr");  out:  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c new file mode 100644 index 000000000000..2f033da4cd45 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_xdp_update_frags(void) +{ +	const char *file = "./test_xdp_update_frags.o"; +	int err, prog_fd, max_skb_frags, buf_size, num; +	struct bpf_program *prog; +	struct bpf_object *obj; +	__u32 *offset; +	__u8 *buf; +	FILE *f; +	LIBBPF_OPTS(bpf_test_run_opts, topts); + +	obj = bpf_object__open(file); +	if (libbpf_get_error(obj)) +		return; + +	prog = bpf_object__next_program(obj, NULL); +	if (bpf_object__load(obj)) +		return; + +	prog_fd = bpf_program__fd(prog); + +	buf = malloc(128); +	if (!ASSERT_OK_PTR(buf, "alloc buf 128b")) +		goto out; + +	memset(buf, 0, 128); +	offset = (__u32 *)buf; +	*offset = 16; +	buf[*offset] = 0xaa;		/* marker at offset 16 (head) */ +	buf[*offset + 15] = 0xaa;	/* marker at offset 31 (head) */ + +	topts.data_in = buf; +	topts.data_out = buf; +	topts.data_size_in = 128; +	topts.data_size_out = 128; + +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	/* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */ +	ASSERT_OK(err, "xdp_update_frag"); +	ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); +	ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]"); +	ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]"); + +	free(buf); + +	buf = malloc(9000); +	if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) +		goto out; + +	memset(buf, 0, 9000); +	offset = (__u32 *)buf; +	*offset = 5000; +	buf[*offset] = 0xaa;		/* marker at offset 5000 (frag0) */ +	buf[*offset + 15] = 0xaa;	/* marker at offset 5015 (frag0) */ + +	topts.data_in = buf; +	topts.data_out = buf; +	topts.data_size_in = 9000; +	topts.data_size_out = 9000; + +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	/* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */ +	ASSERT_OK(err, "xdp_update_frag"); +	ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); +	ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]"); +	ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]"); + +	memset(buf, 0, 9000); +	offset = (__u32 *)buf; +	*offset = 3510; +	buf[*offset] = 0xaa;		/* marker at offset 3510 (head) */ +	buf[*offset + 15] = 0xaa;	/* marker at offset 3525 (frag0) */ + +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	/* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */ +	ASSERT_OK(err, "xdp_update_frag"); +	ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); +	ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]"); +	ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]"); + +	memset(buf, 0, 9000); +	offset = (__u32 *)buf; +	*offset = 7606; +	buf[*offset] = 0xaa;		/* marker at offset 7606 (frag0) */ +	buf[*offset + 15] = 0xaa;	/* marker at offset 7621 (frag1) */ + +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	/* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */ +	ASSERT_OK(err, "xdp_update_frag"); +	ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); +	ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]"); +	ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]"); + +	free(buf); + +	/* test_xdp_update_frags: unsupported buffer size */ +	f = fopen("/proc/sys/net/core/max_skb_frags", "r"); +	if (!ASSERT_OK_PTR(f, "max_skb_frag file pointer")) +		goto out; + +	num = fscanf(f, "%d", &max_skb_frags); +	fclose(f); + +	if (!ASSERT_EQ(num, 1, "max_skb_frags read failed")) +		goto out; + +	/* xdp_buff linear area size is always set to 4096 in the +	 * bpf_prog_test_run_xdp routine. +	 */ +	buf_size = 4096 + (max_skb_frags + 1) * sysconf(_SC_PAGE_SIZE); +	buf = malloc(buf_size); +	if (!ASSERT_OK_PTR(buf, "alloc buf")) +		goto out; + +	memset(buf, 0, buf_size); +	offset = (__u32 *)buf; +	*offset = 16; +	buf[*offset] = 0xaa; +	buf[*offset + 15] = 0xaa; + +	topts.data_in = buf; +	topts.data_out = buf; +	topts.data_size_in = buf_size; +	topts.data_size_out = buf_size; + +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_EQ(err, -ENOMEM, +		  "unsupported buf size, possible non-default /proc/sys/net/core/max_skb_flags?"); +	free(buf); +out: +	bpf_object__close(obj); +} + +void test_xdp_adjust_frags(void) +{ +	if (test__start_subtest("xdp_adjust_frags")) +		test_xdp_update_frags(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 3f5a17c38be5..21ceac24e174 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -5,28 +5,35 @@  static void test_xdp_adjust_tail_shrink(void)  {  	const char *file = "./test_xdp_adjust_tail_shrink.o"; -	__u32 duration, retval, size, expect_sz; +	__u32 expect_sz;  	struct bpf_object *obj;  	int err, prog_fd;  	char buf[128]; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = 1, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); -	if (CHECK_FAIL(err)) +	if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))  		return; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); - -	CHECK(err || retval != XDP_DROP, -	      "ipv4", "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv4"); +	ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");  	expect_sz = sizeof(pkt_v6) - 20;  /* Test shrink with 20 bytes */ -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), -				buf, &size, &retval, &duration); -	CHECK(err || retval != XDP_TX || size != expect_sz, -	      "ipv6", "err %d errno %d retval %d size %d expect-size %d\n", -	      err, errno, retval, size, expect_sz); +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	topts.data_size_out = sizeof(buf); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv6"); +	ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); +	ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size"); +  	bpf_object__close(obj);  } @@ -35,25 +42,31 @@ static void test_xdp_adjust_tail_grow(void)  	const char *file = "./test_xdp_adjust_tail_grow.o";  	struct bpf_object *obj;  	char buf[4096]; /* avoid segfault: large buf to hold grow results */ -	__u32 duration, retval, size, expect_sz; +	__u32 expect_sz;  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = 1, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); -	if (CHECK_FAIL(err)) +	if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))  		return; -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); -	CHECK(err || retval != XDP_DROP, -	      "ipv4", "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv4"); +	ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");  	expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */ -	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6) /* 74 */, -				buf, &size, &retval, &duration); -	CHECK(err || retval != XDP_TX || size != expect_sz, -	      "ipv6", "err %d errno %d retval %d size %d expect-size %d\n", -	      err, errno, retval, size, expect_sz); +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "ipv6"); +	ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); +	ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size");  	bpf_object__close(obj);  } @@ -65,18 +78,18 @@ static void test_xdp_adjust_tail_grow2(void)  	int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/;  	struct bpf_object *obj;  	int err, cnt, i; -	int max_grow; +	int max_grow, prog_fd; -	struct bpf_prog_test_run_attr tattr = { +	LIBBPF_OPTS(bpf_test_run_opts, tattr,  		.repeat		= 1,  		.data_in	= &buf,  		.data_out	= &buf,  		.data_size_in	= 0, /* Per test */  		.data_size_out	= 0, /* Per test */ -	}; +	); -	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); -	if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) +	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); +	if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))  		return;  	/* Test case-64 */ @@ -84,49 +97,171 @@ static void test_xdp_adjust_tail_grow2(void)  	tattr.data_size_in  =  64; /* Determine test case via pkt size */  	tattr.data_size_out = 128; /* Limit copy_size */  	/* Kernel side alloc packet memory area that is zero init */ -	err = bpf_prog_test_run_xattr(&tattr); +	err = bpf_prog_test_run_opts(prog_fd, &tattr); -	CHECK_ATTR(errno != ENOSPC /* Due limit copy_size in bpf_test_finish */ -		   || tattr.retval != XDP_TX -		   || tattr.data_size_out != 192, /* Expected grow size */ -		   "case-64", -		   "err %d errno %d retval %d size %d\n", -		   err, errno, tattr.retval, tattr.data_size_out); +	ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */ +	ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); +	ASSERT_EQ(tattr.data_size_out, 192, "case-64 data_size_out"); /* Expected grow size */  	/* Extra checks for data contents */ -	CHECK_ATTR(tattr.data_size_out != 192 -		   || buf[0]   != 1 ||  buf[63]  != 1  /*  0-63  memset to 1 */ -		   || buf[64]  != 0 ||  buf[127] != 0  /* 64-127 memset to 0 */ -		   || buf[128] != 1 ||  buf[191] != 1, /*128-191 memset to 1 */ -		   "case-64-data", -		   "err %d errno %d retval %d size %d\n", -		   err, errno, tattr.retval, tattr.data_size_out); +	ASSERT_EQ(buf[0], 1, "case-64-data buf[0]"); /*  0-63  memset to 1 */ +	ASSERT_EQ(buf[63], 1, "case-64-data buf[63]"); +	ASSERT_EQ(buf[64], 0, "case-64-data buf[64]"); /* 64-127 memset to 0 */ +	ASSERT_EQ(buf[127], 0, "case-64-data buf[127]"); +	ASSERT_EQ(buf[128], 1, "case-64-data buf[128]"); /* 128-191 memset to 1 */ +	ASSERT_EQ(buf[191], 1, "case-64-data buf[191]");  	/* Test case-128 */  	memset(buf, 2, sizeof(buf));  	tattr.data_size_in  = 128; /* Determine test case via pkt size */  	tattr.data_size_out = sizeof(buf);   /* Copy everything */ -	err = bpf_prog_test_run_xattr(&tattr); +	err = bpf_prog_test_run_opts(prog_fd, &tattr);  	max_grow = 4096 - XDP_PACKET_HEADROOM -	tailroom; /* 3520 */ -	CHECK_ATTR(err -		   || tattr.retval != XDP_TX -		   || tattr.data_size_out != max_grow,/* Expect max grow size */ -		   "case-128", -		   "err %d errno %d retval %d size %d expect-size %d\n", -		   err, errno, tattr.retval, tattr.data_size_out, max_grow); +	ASSERT_OK(err, "case-128"); +	ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval"); +	ASSERT_EQ(tattr.data_size_out, max_grow, "case-128 data_size_out"); /* Expect max grow */  	/* Extra checks for data content: Count grow size, will contain zeros */  	for (i = 0, cnt = 0; i < sizeof(buf); i++) {  		if (buf[i] == 0)  			cnt++;  	} -	CHECK_ATTR((cnt != (max_grow - tattr.data_size_in)) /* Grow increase */ -		   || tattr.data_size_out != max_grow, /* Total grow size */ -		   "case-128-data", -		   "err %d errno %d retval %d size %d grow-size %d\n", -		   err, errno, tattr.retval, tattr.data_size_out, cnt); +	ASSERT_EQ(cnt, max_grow - tattr.data_size_in, "case-128-data cnt"); /* Grow increase */ +	ASSERT_EQ(tattr.data_size_out, max_grow, "case-128-data data_size_out"); /* Total grow */ + +	bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_shrink(void) +{ +	const char *file = "./test_xdp_adjust_tail_shrink.o"; +	__u32 exp_size; +	struct bpf_program *prog; +	struct bpf_object *obj; +	int err, prog_fd; +	__u8 *buf; +	LIBBPF_OPTS(bpf_test_run_opts, topts); + +	/* For the individual test cases, the first byte in the packet +	 * indicates which test will be run. +	 */ +	obj = bpf_object__open(file); +	if (libbpf_get_error(obj)) +		return; + +	prog = bpf_object__next_program(obj, NULL); +	if (bpf_object__load(obj)) +		return; + +	prog_fd = bpf_program__fd(prog); + +	buf = malloc(9000); +	if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) +		goto out; + +	memset(buf, 0, 9000); + +	/* Test case removing 10 bytes from last frag, NOT freeing it */ +	exp_size = 8990; /* 9000 - 10 */ +	topts.data_in = buf; +	topts.data_out = buf; +	topts.data_size_in = 9000; +	topts.data_size_out = 9000; +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	ASSERT_OK(err, "9Kb-10b"); +	ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval"); +	ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-10b size"); + +	/* Test case removing one of two pages, assuming 4K pages */ +	buf[0] = 1; +	exp_size = 4900; /* 9000 - 4100 */ + +	topts.data_size_out = 9000; /* reset from previous invocation */ +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	ASSERT_OK(err, "9Kb-4Kb"); +	ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval"); +	ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-4Kb size"); + +	/* Test case removing two pages resulting in a linear xdp_buff */ +	buf[0] = 2; +	exp_size = 800; /* 9000 - 8200 */ +	topts.data_size_out = 9000; /* reset from previous invocation */ +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	ASSERT_OK(err, "9Kb-9Kb"); +	ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval"); +	ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-9Kb size"); + +	free(buf); +out: +	bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_grow(void) +{ +	const char *file = "./test_xdp_adjust_tail_grow.o"; +	__u32 exp_size; +	struct bpf_program *prog; +	struct bpf_object *obj; +	int err, i, prog_fd; +	__u8 *buf; +	LIBBPF_OPTS(bpf_test_run_opts, topts); + +	obj = bpf_object__open(file); +	if (libbpf_get_error(obj)) +		return; + +	prog = bpf_object__next_program(obj, NULL); +	if (bpf_object__load(obj)) +		return; + +	prog_fd = bpf_program__fd(prog); + +	buf = malloc(16384); +	if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb")) +		goto out; + +	/* Test case add 10 bytes to last frag */ +	memset(buf, 1, 16384); +	exp_size = 9000 + 10; + +	topts.data_in = buf; +	topts.data_out = buf; +	topts.data_size_in = 9000; +	topts.data_size_out = 16384; +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	ASSERT_OK(err, "9Kb+10b"); +	ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); +	ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); + +	for (i = 0; i < 9000; i++) +		ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + +	for (i = 9000; i < 9010; i++) +		ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + +	for (i = 9010; i < 16384; i++) +		ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + +	/* Test a too large grow */ +	memset(buf, 1, 16384); +	exp_size = 9001; + +	topts.data_in = topts.data_out = buf; +	topts.data_size_in = 9001; +	topts.data_size_out = 16384; +	err = bpf_prog_test_run_opts(prog_fd, &topts); + +	ASSERT_OK(err, "9Kb+10b"); +	ASSERT_EQ(topts.retval, XDP_DROP, "9Kb+10b retval"); +	ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); +	free(buf); +out:  	bpf_object__close(obj);  } @@ -138,4 +273,8 @@ void test_xdp_adjust_tail(void)  		test_xdp_adjust_tail_grow();  	if (test__start_subtest("xdp_adjust_tail_grow2"))  		test_xdp_adjust_tail_grow2(); +	if (test__start_subtest("xdp_adjust_frags_tail_shrink")) +		test_xdp_adjust_frags_tail_shrink(); +	if (test__start_subtest("xdp_adjust_frags_tail_grow")) +		test_xdp_adjust_frags_tail_grow();  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index c6fa390e3aa1..62aa3edda5e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -11,8 +11,7 @@ void serial_test_xdp_attach(void)  	const char *file = "./test_xdp.o";  	struct bpf_prog_info info = {};  	int err, fd1, fd2, fd3; -	DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, -			    .old_fd = -1); +	LIBBPF_OPTS(bpf_xdp_attach_opts, opts);  	len = sizeof(info); @@ -38,49 +37,47 @@ void serial_test_xdp_attach(void)  	if (CHECK_FAIL(err))  		goto out_2; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, -				       &opts); +	err = bpf_xdp_attach(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, &opts);  	if (CHECK(err, "load_ok", "initial load failed"))  		goto out_close; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (CHECK(err || id0 != id1, "id1_check",  		  "loaded prog id %u != id1 %u, err %d", id0, id1, err))  		goto out_close; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, -				       &opts); +	err = bpf_xdp_attach(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, &opts);  	if (CHECK(!err, "load_fail", "load with expected id didn't fail"))  		goto out; -	opts.old_fd = fd1; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts); +	opts.old_prog_fd = fd1; +	err = bpf_xdp_attach(IFINDEX_LO, fd2, 0, &opts);  	if (CHECK(err, "replace_ok", "replace valid old_fd failed"))  		goto out; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (CHECK(err || id0 != id2, "id2_check",  		  "loaded prog id %u != id2 %u, err %d", id0, id2, err))  		goto out_close; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts); +	err = bpf_xdp_attach(IFINDEX_LO, fd3, 0, &opts);  	if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))  		goto out; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); +	err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);  	if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail"))  		goto out; -	opts.old_fd = fd2; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); +	opts.old_prog_fd = fd2; +	err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);  	if (CHECK(err, "remove_ok", "remove valid old_fd failed"))  		goto out; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (CHECK(err || id0 != 0, "unload_check",  		  "loaded prog id %u != 0, err %d", id0, err))  		goto out_close;  out: -	bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); +	bpf_xdp_detach(IFINDEX_LO, 0, NULL);  out_close:  	bpf_object__close(obj3);  out_2: diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index c98a897ad692..76967d8ace9c 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -10,40 +10,101 @@ struct meta {  	int pkt_len;  }; +struct test_ctx_s { +	bool passed; +	int pkt_size; +}; + +struct test_ctx_s test_ctx; +  static void on_sample(void *ctx, int cpu, void *data, __u32 size)  { -	int duration = 0;  	struct meta *meta = (struct meta *)data;  	struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta); +	unsigned char *raw_pkt = data + sizeof(*meta); +	struct test_ctx_s *tst_ctx = ctx; + +	ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size"); +	ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex"); +	ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len"); +	ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0, +		  "check_packet_content"); + +	if (meta->pkt_len > sizeof(pkt_v4)) { +		for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++) +			ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i, +				  "check_packet_content"); +	} + +	tst_ctx->passed = true; +} -	if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta), -		  "check_size", "size %u < %zu\n", -		  size, sizeof(pkt_v4) + sizeof(*meta))) -		return; +#define BUF_SZ	9000 -	if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex", -		  "meta->ifindex = %d\n", meta->ifindex)) +static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb, +				     struct test_xdp_bpf2bpf *ftrace_skel, +				     int pkt_size) +{ +	__u8 *buf, *buf_in; +	int err; +	LIBBPF_OPTS(bpf_test_run_opts, topts); + +	if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") || +	    !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size"))  		return; -	if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len", -		  "meta->pkt_len = %zd\n", sizeof(pkt_v4))) +	buf_in = malloc(BUF_SZ); +	if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()"))  		return; -	if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), -		  "check_packet_content", "content not the same\n")) +	buf = malloc(BUF_SZ); +	if (!ASSERT_OK_PTR(buf, "buf malloc()")) { +		free(buf_in);  		return; +	} + +	test_ctx.passed = false; +	test_ctx.pkt_size = pkt_size; + +	memcpy(buf_in, &pkt_v4, sizeof(pkt_v4)); +	if (pkt_size > sizeof(pkt_v4)) { +		for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++) +			buf_in[i + sizeof(pkt_v4)] = i; +	} + +	/* Run test program */ +	topts.data_in = buf_in; +	topts.data_size_in = pkt_size; +	topts.data_out = buf; +	topts.data_size_out = BUF_SZ; + +	err = bpf_prog_test_run_opts(pkt_fd, &topts); + +	ASSERT_OK(err, "ipv4"); +	ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval"); +	ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size"); + +	/* Make sure bpf_xdp_output() was triggered and it sent the expected +	 * data to the perf ring buffer. +	 */ +	err = perf_buffer__poll(pb, 100); + +	ASSERT_GE(err, 0, "perf_buffer__poll"); +	ASSERT_TRUE(test_ctx.passed, "test passed"); +	/* Verify test results */ +	ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"), +		  "fentry result"); +	ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result"); -	*(bool *)ctx = true; +	free(buf); +	free(buf_in);  }  void test_xdp_bpf2bpf(void)  { -	__u32 duration = 0, retval, size; -	char buf[128];  	int err, pkt_fd, map_fd; -	bool passed = false; -	struct iphdr iph; -	struct iptnl_info value4 = {.family = AF_INET}; +	int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200}; +	struct iptnl_info value4 = {.family = AF_INET6};  	struct test_xdp *pkt_skel = NULL;  	struct test_xdp_bpf2bpf *ftrace_skel = NULL;  	struct vip key4 = {.protocol = 6, .family = AF_INET}; @@ -52,7 +113,7 @@ void test_xdp_bpf2bpf(void)  	/* Load XDP program to introspect */  	pkt_skel = test_xdp__open_and_load(); -	if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp skeleton failed\n")) +	if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load"))  		return;  	pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel); @@ -62,7 +123,7 @@ void test_xdp_bpf2bpf(void)  	/* Load trace program */  	ftrace_skel = test_xdp_bpf2bpf__open(); -	if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n")) +	if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open"))  		goto out;  	/* Demonstrate the bpf_program__set_attach_target() API rather than @@ -77,50 +138,24 @@ void test_xdp_bpf2bpf(void)  	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");  	err = test_xdp_bpf2bpf__load(ftrace_skel); -	if (CHECK(err, "__load", "ftrace skeleton failed\n")) +	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load"))  		goto out;  	err = test_xdp_bpf2bpf__attach(ftrace_skel); -	if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err)) +	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach"))  		goto out;  	/* Set up perf buffer */ -	pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 1, -			      on_sample, NULL, &passed, NULL); +	pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8, +			      on_sample, NULL, &test_ctx, NULL);  	if (!ASSERT_OK_PTR(pb, "perf_buf__new"))  		goto out; -	/* Run test program */ -	err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); -	memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); -	if (CHECK(err || retval != XDP_TX || size != 74 || -		  iph.protocol != IPPROTO_IPIP, "ipv4", -		  "err %d errno %d retval %d size %d\n", -		  err, errno, retval, size)) -		goto out; - -	/* Make sure bpf_xdp_output() was triggered and it sent the expected -	 * data to the perf ring buffer. -	 */ -	err = perf_buffer__poll(pb, 100); -	if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) -		goto out; - -	CHECK_FAIL(!passed); - -	/* Verify test results */ -	if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"), -		  "result", "fentry failed err %llu\n", -		  ftrace_skel->bss->test_result_fentry)) -		goto out; - -	CHECK(ftrace_skel->bss->test_result_fexit != XDP_TX, "result", -	      "fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit); - +	for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++) +		run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel, +					 pkt_sizes[i]);  out: -	if (pb) -		perf_buffer__free(pb); +	perf_buffer__free(pb);  	test_xdp__destroy(pkt_skel);  	test_xdp_bpf2bpf__destroy(ftrace_skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index fd812bd43600..f775a1613833 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -3,11 +3,12 @@  #include <linux/if_link.h>  #include <test_progs.h> +#include "test_xdp_with_cpumap_frags_helpers.skel.h"  #include "test_xdp_with_cpumap_helpers.skel.h"  #define IFINDEX_LO	1 -void serial_test_xdp_cpumap_attach(void) +static void test_xdp_with_cpumap_helpers(void)  {  	struct test_xdp_with_cpumap_helpers *skel;  	struct bpf_prog_info info = {}; @@ -23,11 +24,11 @@ void serial_test_xdp_cpumap_attach(void)  		return;  	prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); -	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);  	if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP"))  		goto out_close; -	err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);  	ASSERT_OK(err, "XDP program detach");  	prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); @@ -45,15 +46,76 @@ void serial_test_xdp_cpumap_attach(void)  	ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");  	/* can not attach BPF_XDP_CPUMAP program to a device */ -	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);  	if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program")) -		bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +		bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);  	val.qsize = 192;  	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);  	err = bpf_map_update_elem(map_fd, &idx, &val, 0);  	ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry"); +	/* Try to attach BPF_XDP program with frags to cpumap when we have +	 * already loaded a BPF_XDP program on the map +	 */ +	idx = 1; +	val.qsize = 192; +	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry"); +  out_close:  	test_xdp_with_cpumap_helpers__destroy(skel);  } + +static void test_xdp_with_cpumap_frags_helpers(void) +{ +	struct test_xdp_with_cpumap_frags_helpers *skel; +	struct bpf_prog_info info = {}; +	__u32 len = sizeof(info); +	struct bpf_cpumap_val val = { +		.qsize = 192, +	}; +	int err, frags_prog_fd, map_fd; +	__u32 idx = 0; + +	skel = test_xdp_with_cpumap_frags_helpers__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) +		return; + +	frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); +	map_fd = bpf_map__fd(skel->maps.cpu_map); +	err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len); +	if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) +		goto out_close; + +	val.bpf_prog.fd = frags_prog_fd; +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_OK(err, "Add program to cpumap entry"); + +	err = bpf_map_lookup_elem(map_fd, &idx, &val); +	ASSERT_OK(err, "Read cpumap entry"); +	ASSERT_EQ(info.id, val.bpf_prog.id, +		  "Match program id to cpumap entry prog_id"); + +	/* Try to attach BPF_XDP program to cpumap when we have +	 * already loaded a BPF_XDP program with frags on the map +	 */ +	idx = 1; +	val.qsize = 192; +	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm); +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry"); + +out_close: +	test_xdp_with_cpumap_frags_helpers__destroy(skel); +} + +void serial_test_xdp_cpumap_attach(void) +{ +	if (test__start_subtest("CPUMAP with programs in entries")) +		test_xdp_with_cpumap_helpers(); + +	if (test__start_subtest("CPUMAP with frags programs in entries")) +		test_xdp_with_cpumap_frags_helpers(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index 3079d5568f8f..ead40016c324 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -4,6 +4,7 @@  #include <test_progs.h>  #include "test_xdp_devmap_helpers.skel.h" +#include "test_xdp_with_devmap_frags_helpers.skel.h"  #include "test_xdp_with_devmap_helpers.skel.h"  #define IFINDEX_LO 1 @@ -25,11 +26,11 @@ static void test_xdp_with_devmap_helpers(void)  		return;  	dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog); -	err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);  	if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))  		goto out_close; -	err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);  	ASSERT_OK(err, "XDP program detach");  	dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); @@ -47,15 +48,24 @@ static void test_xdp_with_devmap_helpers(void)  	ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");  	/* can not attach BPF_XDP_DEVMAP program to a device */ -	err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);  	if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program")) -		bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +		bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);  	val.ifindex = 1;  	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);  	err = bpf_map_update_elem(map_fd, &idx, &val, 0);  	ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry"); +	/* Try to attach BPF_XDP program with frags to devmap when we have +	 * already loaded a BPF_XDP program on the map +	 */ +	idx = 1; +	val.ifindex = 1; +	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry"); +  out_close:  	test_xdp_with_devmap_helpers__destroy(skel);  } @@ -71,12 +81,57 @@ static void test_neg_xdp_devmap_helpers(void)  	}  } +static void test_xdp_with_devmap_frags_helpers(void) +{ +	struct test_xdp_with_devmap_frags_helpers *skel; +	struct bpf_prog_info info = {}; +	struct bpf_devmap_val val = { +		.ifindex = IFINDEX_LO, +	}; +	__u32 len = sizeof(info); +	int err, dm_fd_frags, map_fd; +	__u32 idx = 0; + +	skel = test_xdp_with_devmap_frags_helpers__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) +		return; + +	dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); +	map_fd = bpf_map__fd(skel->maps.dm_ports); +	err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len); +	if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) +		goto out_close; + +	val.bpf_prog.fd = dm_fd_frags; +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_OK(err, "Add frags program to devmap entry"); + +	err = bpf_map_lookup_elem(map_fd, &idx, &val); +	ASSERT_OK(err, "Read devmap entry"); +	ASSERT_EQ(info.id, val.bpf_prog.id, +		  "Match program id to devmap entry prog_id"); + +	/* Try to attach BPF_XDP program to devmap when we have +	 * already loaded a BPF_XDP program with frags on the map +	 */ +	idx = 1; +	val.ifindex = 1; +	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm); +	err = bpf_map_update_elem(map_fd, &idx, &val, 0); +	ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry"); + +out_close: +	test_xdp_with_devmap_frags_helpers__destroy(skel); +}  void serial_test_xdp_devmap_attach(void)  {  	if (test__start_subtest("DEVMAP with programs in entries"))  		test_xdp_with_devmap_helpers(); +	if (test__start_subtest("DEVMAP with frags programs in entries")) +		test_xdp_with_devmap_frags_helpers(); +  	if (test__start_subtest("Verifier check of DEVMAP programs"))  		test_neg_xdp_devmap_helpers();  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c new file mode 100644 index 000000000000..a50971c6cf4a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ipv6.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <bpf/bpf_endian.h> +#include "test_xdp_do_redirect.skel.h" + +#define SYS(fmt, ...)						\ +	({							\ +		char cmd[1024];					\ +		snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__);	\ +		if (!ASSERT_OK(system(cmd), cmd))		\ +			goto out;				\ +	}) + +struct udp_packet { +	struct ethhdr eth; +	struct ipv6hdr iph; +	struct udphdr udp; +	__u8 payload[64 - sizeof(struct udphdr) +		     - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; +} __packed; + +static struct udp_packet pkt_udp = { +	.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), +	.eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, +	.eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb}, +	.iph.version = 6, +	.iph.nexthdr = IPPROTO_UDP, +	.iph.payload_len = bpf_htons(sizeof(struct udp_packet) +				     - offsetof(struct udp_packet, udp)), +	.iph.hop_limit = 2, +	.iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, +	.iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, +	.udp.source = bpf_htons(1), +	.udp.dest = bpf_htons(1), +	.udp.len = bpf_htons(sizeof(struct udp_packet) +			     - offsetof(struct udp_packet, udp)), +	.payload = {0x42}, /* receiver XDP program matches on this */ +}; + +static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) +{ +	DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); +	int ret; + +	ret = bpf_tc_hook_create(hook); +	if (!ASSERT_OK(ret, "create tc hook")) +		return ret; + +	ret = bpf_tc_attach(hook, &opts); +	if (!ASSERT_OK(ret, "bpf_tc_attach")) { +		bpf_tc_hook_destroy(hook); +		return ret; +	} + +	return 0; +} + +/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - + * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes + */ +#define MAX_PKT_SIZE 3368 +static void test_max_pkt_size(int fd) +{ +	char data[MAX_PKT_SIZE + 1] = {}; +	int err; +	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, +			    .data_in = &data, +			    .data_size_in = MAX_PKT_SIZE, +			    .flags = BPF_F_TEST_XDP_LIVE_FRAMES, +			    .repeat = 1, +		); +	err = bpf_prog_test_run_opts(fd, &opts); +	ASSERT_OK(err, "prog_run_max_size"); + +	opts.data_size_in += 1; +	err = bpf_prog_test_run_opts(fd, &opts); +	ASSERT_EQ(err, -EINVAL, "prog_run_too_big"); +} + +#define NUM_PKTS 10000 +void test_xdp_do_redirect(void) +{ +	int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; +	char data[sizeof(pkt_udp) + sizeof(__u32)]; +	struct test_xdp_do_redirect *skel = NULL; +	struct nstoken *nstoken = NULL; +	struct bpf_link *link; + +	struct xdp_md ctx_in = { .data = sizeof(__u32), +				 .data_end = sizeof(data) }; +	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, +			    .data_in = &data, +			    .data_size_in = sizeof(data), +			    .ctx_in = &ctx_in, +			    .ctx_size_in = sizeof(ctx_in), +			    .flags = BPF_F_TEST_XDP_LIVE_FRAMES, +			    .repeat = NUM_PKTS, +			    .batch_size = 64, +		); +	DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, +			    .attach_point = BPF_TC_INGRESS); + +	memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp)); +	*((__u32 *)data) = 0x42; /* metadata test value */ + +	skel = test_xdp_do_redirect__open(); +	if (!ASSERT_OK_PTR(skel, "skel")) +		return; + +	/* The XDP program we run with bpf_prog_run() will cycle through all +	 * three xmit (PASS/TX/REDIRECT) return codes starting from above, and +	 * ending up with PASS, so we should end up with two packets on the dst +	 * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP +	 * payload. +	 */ +	SYS("ip netns add testns"); +	nstoken = open_netns("testns"); +	if (!ASSERT_OK_PTR(nstoken, "setns")) +		goto out; + +	SYS("ip link add veth_src type veth peer name veth_dst"); +	SYS("ip link set dev veth_src address 00:11:22:33:44:55"); +	SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb"); +	SYS("ip link set dev veth_src up"); +	SYS("ip link set dev veth_dst up"); +	SYS("ip addr add dev veth_src fc00::1/64"); +	SYS("ip addr add dev veth_dst fc00::2/64"); +	SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); + +	/* We enable forwarding in the test namespace because that will cause +	 * the packets that go through the kernel stack (with XDP_PASS) to be +	 * forwarded back out the same interface (because of the packet dst +	 * combined with the interface addresses). When this happens, the +	 * regular forwarding path will end up going through the same +	 * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a +	 * deadlock if it happens on the same CPU. There's a local_bh_disable() +	 * in the test_run code to prevent this, but an earlier version of the +	 * code didn't have this, so we keep the test behaviour to make sure the +	 * bug doesn't resurface. +	 */ +	SYS("sysctl -qw net.ipv6.conf.all.forwarding=1"); + +	ifindex_src = if_nametoindex("veth_src"); +	ifindex_dst = if_nametoindex("veth_dst"); +	if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") || +	    !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst")) +		goto out; + +	memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN); +	skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */ +	skel->rodata->ifindex_in = ifindex_src; +	ctx_in.ingress_ifindex = ifindex_src; +	tc_hook.ifindex = ifindex_src; + +	if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load")) +		goto out; + +	link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst); +	if (!ASSERT_OK_PTR(link, "prog_attach")) +		goto out; +	skel->links.xdp_count_pkts = link; + +	tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts); +	if (attach_tc_prog(&tc_hook, tc_prog_fd)) +		goto out; + +	xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect); +	err = bpf_prog_test_run_opts(xdp_prog_fd, &opts); +	if (!ASSERT_OK(err, "prog_run")) +		goto out_tc; + +	/* wait for the packets to be flushed */ +	kern_sync_rcu(); + +	/* There will be one packet sent through XDP_REDIRECT and one through +	 * XDP_TX; these will show up on the XDP counting program, while the +	 * rest will be counted at the TC ingress hook (and the counting program +	 * resets the packet payload so they don't get counted twice even though +	 * they are re-xmited out the veth device +	 */ +	ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp"); +	ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero"); +	ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc"); + +	test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts)); + +out_tc: +	bpf_tc_hook_destroy(&tc_hook); +out: +	if (nstoken) +		close_netns(nstoken); +	system("ip netns del testns"); +	test_xdp_do_redirect__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index abe48e82e1dc..0d01ff6cb91a 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -14,13 +14,13 @@ void serial_test_xdp_info(void)  	/* Get prog_id for XDP_ATTACHED_NONE mode */ -	err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);  	if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))  		return;  	if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))  		return; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);  	if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))  		return;  	if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n", @@ -37,32 +37,32 @@ void serial_test_xdp_info(void)  	if (CHECK(err, "get_prog_info", "errno=%d\n", errno))  		goto out_close; -	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);  	if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))  		goto out_close;  	/* Get prog_id for single prog mode */ -	err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);  	if (CHECK(err, "get_xdp", "errno=%d\n", errno))  		goto out;  	if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))  		goto out; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); +	err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);  	if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))  		goto out;  	if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))  		goto out; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE); +	err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id);  	if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))  		goto out;  	if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))  		goto out;  out: -	bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); +	bpf_xdp_detach(IFINDEX_LO, 0, NULL);  out_close:  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index b2b357f8c74c..3e9d5c5521f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -8,9 +8,9 @@  void serial_test_xdp_link(void)  { -	DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1);  	struct test_xdp_link *skel1 = NULL, *skel2 = NULL;  	__u32 id1, id2, id0 = 0, prog_fd1, prog_fd2; +	LIBBPF_OPTS(bpf_xdp_attach_opts, opts);  	struct bpf_link_info link_info;  	struct bpf_prog_info prog_info;  	struct bpf_link *link; @@ -41,12 +41,12 @@ void serial_test_xdp_link(void)  	id2 = prog_info.id;  	/* set initial prog attachment */ -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts); +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);  	if (!ASSERT_OK(err, "fd_attach"))  		goto cleanup;  	/* validate prog ID */ -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))  		goto cleanup; @@ -55,14 +55,14 @@ void serial_test_xdp_link(void)  	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {  		bpf_link__destroy(link);  		/* best-effort detach prog */ -		opts.old_fd = prog_fd1; -		bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); +		opts.old_prog_fd = prog_fd1; +		bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);  		goto cleanup;  	}  	/* detach BPF program */ -	opts.old_fd = prog_fd1; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); +	opts.old_prog_fd = prog_fd1; +	err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);  	if (!ASSERT_OK(err, "prog_detach"))  		goto cleanup; @@ -73,23 +73,23 @@ void serial_test_xdp_link(void)  	skel1->links.xdp_handler = link;  	/* validate prog ID */ -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))  		goto cleanup;  	/* BPF prog attach is not allowed to replace BPF link */ -	opts.old_fd = prog_fd1; -	err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts); +	opts.old_prog_fd = prog_fd1; +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);  	if (!ASSERT_ERR(err, "prog_attach_fail"))  		goto cleanup;  	/* Can't force-update when BPF link is active */ -	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0); +	err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, 0, NULL);  	if (!ASSERT_ERR(err, "prog_update_fail"))  		goto cleanup;  	/* Can't force-detach when BPF link is active */ -	err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); +	err = bpf_xdp_detach(IFINDEX_LO, 0, NULL);  	if (!ASSERT_ERR(err, "prog_detach_fail"))  		goto cleanup; @@ -109,7 +109,7 @@ void serial_test_xdp_link(void)  		goto cleanup;  	skel2->links.xdp_handler = link; -	err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); +	err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);  	if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val"))  		goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c index 0281095de266..92ef0aa50866 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -25,43 +25,49 @@ void test_xdp_noinline(void)  		__u8 flags;  	} real_def = {.dst = MAGIC_VAL};  	__u32 ch_key = 11, real_num = 3; -	__u32 duration = 0, retval, size;  	int err, i;  	__u64 bytes = 0, pkts = 0;  	char buf[128];  	u32 *magic = (u32 *)buf; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = &pkt_v4, +		.data_size_in = sizeof(pkt_v4), +		.data_out = buf, +		.data_size_out = sizeof(buf), +		.repeat = NUM_ITER, +	);  	skel = test_xdp_noinline__open_and_load(); -	if (CHECK(!skel, "skel_open_and_load", "failed\n")) +	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))  		return;  	bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);  	bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);  	bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0); -	err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4), -				NUM_ITER, &pkt_v4, sizeof(pkt_v4), -				buf, &size, &retval, &duration); -	CHECK(err || retval != 1 || size != 54 || -	      *magic != MAGIC_VAL, "ipv4", -	      "err %d errno %d retval %d size %d magic %x\n", -	      err, errno, retval, size, *magic); +	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v4), &topts); +	ASSERT_OK(err, "ipv4 test_run"); +	ASSERT_EQ(topts.retval, 1, "ipv4 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); +	ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 test_run magic"); -	err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6), -				NUM_ITER, &pkt_v6, sizeof(pkt_v6), -				buf, &size, &retval, &duration); -	CHECK(err || retval != 1 || size != 74 || -	      *magic != MAGIC_VAL, "ipv6", -	      "err %d errno %d retval %d size %d magic %x\n", -	      err, errno, retval, size, *magic); +	topts.data_in = &pkt_v6; +	topts.data_size_in = sizeof(pkt_v6); +	topts.data_out = buf; +	topts.data_size_out = sizeof(buf); + +	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v6), &topts); +	ASSERT_OK(err, "ipv6 test_run"); +	ASSERT_EQ(topts.retval, 1, "ipv6 test_run retval"); +	ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); +	ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 test_run magic");  	bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);  	for (i = 0; i < nr_cpus; i++) {  		bytes += stats[i].bytes;  		pkts += stats[i].pkts;  	} -	CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2, -	      "stats", "bytes %lld pkts %lld\n", -	      (unsigned long long)bytes, (unsigned long long)pkts); +	ASSERT_EQ(bytes, MAGIC_BYTES * NUM_ITER * 2, "stats bytes"); +	ASSERT_EQ(pkts, NUM_ITER * 2, "stats pkts");  	test_xdp_noinline__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c index 15a3900e4370..f543d1bd21b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -4,22 +4,25 @@  void test_xdp_perf(void)  {  	const char *file = "./xdp_dummy.o"; -	__u32 duration, retval, size;  	struct bpf_object *obj;  	char in[128], out[128];  	int err, prog_fd; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		.data_in = in, +		.data_size_in = sizeof(in), +		.data_out = out, +		.data_size_out = sizeof(out), +		.repeat = 1000000, +	);  	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);  	if (CHECK_FAIL(err))  		return; -	err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128, -				out, &size, &retval, &duration); - -	CHECK(err || retval != XDP_PASS || size != 128, -	      "xdp-perf", -	      "err %d errno %d retval %d size %d\n", -	      err, errno, retval, size); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(topts.retval, XDP_PASS, "test_run retval"); +	ASSERT_EQ(topts.data_size_out, 128, "test_run data_size_out");  	bpf_object__close(obj);  } |