aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/dns_resolver.rst4
-rw-r--r--arch/arm/net/bpf_jit_32.c25
-rw-r--r--arch/arm64/net/bpf_jit_comp.c7
-rw-r--r--arch/loongarch/net/bpf_jit.c22
-rw-r--r--arch/mips/net/bpf_jit_comp.c3
-rw-r--r--arch/parisc/net/bpf_jit_core.c8
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c10
-rw-r--r--arch/x86/net/bpf_jit_comp32.c3
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--include/linux/bpf.h9
-rw-r--r--include/linux/filter.h20
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/trace_events.h36
-rw-r--r--include/trace/bpf_probe.h3
-rw-r--r--include/trace/events/net_probe_common.h29
-rw-r--r--include/trace/events/sock.h35
-rw-r--r--include/trace/events/tcp.h29
-rw-r--r--include/uapi/linux/bpf.h6
-rw-r--r--kernel/bpf/bpf_struct_ops.c10
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/core.c32
-rw-r--r--kernel/bpf/helpers.c4
-rw-r--r--kernel/bpf/lpm_trie.c18
-rw-r--r--kernel/bpf/syscall.c22
-rw-r--r--kernel/bpf/trampoline.c15
-rw-r--r--kernel/bpf/verifier.c31
-rw-r--r--kernel/trace/bpf_trace.c52
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c4
-rw-r--r--net/core/datagram.c19
-rw-r--r--net/core/dev.c219
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/skbuff.c4
-rw-r--r--tools/bpf/bpftool/Makefile14
-rw-r--r--tools/bpf/bpftool/gen.c3
-rw-r--r--tools/bpf/bpftool/pids.c19
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c4
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/uapi/linux/bpf.h6
-rw-r--r--tools/lib/bpf/bpf.c16
-rw-r--r--tools/lib/bpf/bpf.h9
-rw-r--r--tools/lib/bpf/libbpf.c111
-rw-r--r--tools/lib/bpf/libbpf.h11
-rw-r--r--tools/lib/bpf/libbpf.map2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c214
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c47
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c31
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c39
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp5
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh13
63 files changed, 1059 insertions, 434 deletions
diff --git a/Documentation/networking/dns_resolver.rst b/Documentation/networking/dns_resolver.rst
index add4d59a99a5..c0364f7070af 100644
--- a/Documentation/networking/dns_resolver.rst
+++ b/Documentation/networking/dns_resolver.rst
@@ -118,7 +118,7 @@ Keys of dns_resolver type can be read from userspace using keyctl_read() or
Mechanism
=========
-The dnsresolver module registers a key type called "dns_resolver". Keys of
+The dns_resolver module registers a key type called "dns_resolver". Keys of
this type are used to transport and cache DNS lookup results from userspace.
When dns_query() is invoked, it calls request_key() to search the local
@@ -152,4 +152,4 @@ Debugging
Debugging messages can be turned on dynamically by writing a 1 into the
following file::
- /sys/module/dnsresolver/parameters/debug
+ /sys/module/dns_resolver/parameters/debug
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 1d672457d02f..01516f83a95a 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -2222,28 +2222,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If building the body of the JITed code fails somehow,
* we fall back to the interpretation.
*/
- if (build_body(&ctx) < 0) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (build_body(&ctx) < 0)
+ goto out_free;
build_epilogue(&ctx);
/* 3.) Extra pass to validate JITed Code */
- if (validate_code(&ctx)) {
- image_ptr = NULL;
- bpf_jit_binary_free(header);
- prog = orig_prog;
- goto out_imms;
- }
+ if (validate_code(&ctx))
+ goto out_free;
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
if (bpf_jit_enable > 1)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_free;
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
@@ -2260,5 +2253,11 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free:
+ image_ptr = NULL;
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_imms;
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 122021f9bdfc..82d6c562e8c7 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -2176,12 +2176,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e73323d759d0..7dbefd4ba210 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1294,16 +1294,19 @@ skip_init_ctx:
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
+ int err;
+
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
- prog->bpf_func = NULL;
- prog->jited = 0;
- prog->jited_len = 0;
- goto out_offset;
+ goto out_free;
+ }
+ err = bpf_jit_binary_lock_ro(header);
+ if (err) {
+ pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
+ err);
+ goto out_free;
}
- bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
@@ -1334,6 +1337,13 @@ out:
out_offset = -1;
return prog;
+
+out_free:
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c
index a40d926b6513..e355dfca4400 100644
--- a/arch/mips/net/bpf_jit_comp.c
+++ b/arch/mips/net/bpf_jit_comp.c
@@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
/* Set as read-only exec and flush instruction cache */
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header))
+ goto out_err;
flush_icache_range((unsigned long)header,
(unsigned long)&ctx.target[ctx.jit_index]);
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
index d6ee2fd45550..979f45d4d1fb 100644
--- a/arch/parisc/net/bpf_jit_core.c
+++ b/arch/parisc/net/bpf_jit_core.c
@@ -167,7 +167,13 @@ skip_init_ctx:
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(jit_data->header);
+ if (bpf_jit_binary_lock_ro(jit_data->header)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
+ }
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] += prologue_len;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5af0402e94b8..fa2f824e3b06 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2108,7 +2108,11 @@ skip_init_ctx:
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (!fp->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ fp = orig_fp;
+ goto free_addrs;
+ }
} else {
jit_data->header = header;
jit_data->ctx = jit;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index fa0759bfe498..73bf0aea8baf 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1602,7 +1602,11 @@ skip_init_ctx:
bpf_flush_icache(header, (u8 *)header + header->size);
if (!prog->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(header);
+ if (bpf_jit_binary_lock_ro(header)) {
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_off;
+ }
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a7ba8e178645..3b639d6f2f54 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1351,8 +1351,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
- if (insn->off == BPF_ADDR_SPACE_CAST &&
- insn->imm == 1U << 16) {
+ if (insn_is_cast_user(insn)) {
if (dst_reg != src_reg)
/* 32-bit mov */
emit_mov_reg(&prog, false, dst_reg, src_reg);
@@ -3004,12 +3003,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
-}
-
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
+ return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index c10083a8e68e..de0f9e5f9f73 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2600,8 +2600,7 @@ out_image:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
- if (image) {
- bpf_jit_binary_lock_ro(header);
+ if (image && !bpf_jit_binary_lock_ro(header)) {
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index ea6cfc2095e1..c1b5e9a94308 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1796,7 +1796,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
- lp->pm_cap = pdev->pm_cap;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 9d570adb295b..305232f5476d 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -764,7 +764,6 @@ struct amd8111e_priv{
u32 ext_phy_id;
struct amd8111e_link_config link_config;
- int pm_cap;
struct net_device *next;
int mii;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 17faac715882..5c13bcb4550d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
for (i = 0; i < nq; i++) {
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
- if (txq && txq->q.desc) {
+ if (txq->q.desc) {
tasklet_kill(&txq->qresume_tsk);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 382e8de1255d..7ae04d8d291c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -12,10 +12,8 @@
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4f20f62f9d63..62762390c93d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1116,8 +1116,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *func_addr);
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
-void arch_protect_bpf_trampoline(void *image, unsigned int size);
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr);
@@ -1608,6 +1607,12 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+ u64 cookie;
+};
+
struct bpf_link_primer {
struct bpf_link *link;
struct file *file;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c99bc3df2d28..44934b968b57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -228,6 +228,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
}
+/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
+ * to pointers in user vma.
+ */
+static inline bool insn_is_cast_user(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn->off == BPF_ADDR_SPACE_CAST &&
+ insn->imm == 1U << 16;
+}
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
@@ -887,20 +897,22 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited) {
set_vm_flush_reset_perms(fp);
- set_memory_ro((unsigned long)fp, fp->pages);
+ return set_memory_ro((unsigned long)fp, fp->pages);
}
#endif
+ return 0;
}
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline int __must_check
+bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
- set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
+ return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb37817d6382..e41d30ebaca6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3287,6 +3287,7 @@ static inline void dev_xmit_recursion_dec(void)
__this_cpu_dec(softnet_data.xmit.recursion);
}
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9d24aec064e8..517e546a120a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4058,12 +4058,6 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
-static inline void skb_free_datagram_locked(struct sock *sk,
- struct sk_buff *skb)
-{
- __skb_free_datagram_locked(sk, skb, 0);
-}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6f9bdfb09d1d..9df3e2973626 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -765,8 +765,11 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
-int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
-int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+
+struct bpf_raw_tp_link;
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
+
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
@@ -794,11 +797,12 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
return -EOPNOTSUPP;
}
-static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+struct bpf_raw_tp_link;
+static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
return -EOPNOTSUPP;
}
-static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
return -EOPNOTSUPP;
}
@@ -909,31 +913,31 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_free_bpf_prog(struct perf_event *event);
-void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
-void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
-void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1);
+void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2);
+void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3);
-void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4);
-void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5);
-void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6);
-void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
-void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8);
-void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9);
-void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10);
-void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11);
-void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
+void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index e609cd7da47e..a2ea11cc912e 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -46,8 +46,7 @@
static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
- struct bpf_prog *prog = __data; \
- CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
+ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \
}
#undef DECLARE_EVENT_CLASS
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
index 3930119cab08..b1f9a4d3ee13 100644
--- a/include/trace/events/net_probe_common.h
+++ b/include/trace/events/net_probe_common.h
@@ -41,4 +41,33 @@
#endif
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
+ do { \
+ struct in6_addr *pin6; \
+ \
+ pin6 = (struct in6_addr *)__entry->saddr_v6; \
+ ipv6_addr_set_v4mapped(saddr, pin6); \
+ pin6 = (struct in6_addr *)__entry->daddr_v6; \
+ ipv6_addr_set_v4mapped(daddr, pin6); \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
+ do { \
+ if (sk->sk_family == AF_INET6) { \
+ struct in6_addr *pin6; \
+ \
+ pin6 = (struct in6_addr *)__entry->saddr_v6; \
+ *pin6 = saddr6; \
+ pin6 = (struct in6_addr *)__entry->daddr_v6; \
+ *pin6 = daddr6; \
+ } else { \
+ TP_STORE_V4MAPPED(__entry, saddr, daddr); \
+ } \
+ } while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
+ TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
+
#endif
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index fd206a6ab5b8..0d1c5ce4e6a6 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <trace/events/net_probe_common.h>
#define family_names \
EM(AF_INET) \
@@ -159,7 +160,6 @@ TRACE_EVENT(inet_sock_set_state,
TP_fast_assign(
const struct inet_sock *inet = inet_sk(sk);
- struct in6_addr *pin6;
__be32 *p32;
__entry->skaddr = sk;
@@ -177,20 +177,8 @@ TRACE_EVENT(inet_sock_set_state,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- *pin6 = sk->sk_v6_rcv_saddr;
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- *pin6 = sk->sk_v6_daddr;
- } else
-#endif
- {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
- }
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -223,7 +211,6 @@ TRACE_EVENT(inet_sk_error_report,
TP_fast_assign(
const struct inet_sock *inet = inet_sk(sk);
- struct in6_addr *pin6;
__be32 *p32;
__entry->error = sk->sk_err;
@@ -238,20 +225,8 @@ TRACE_EVENT(inet_sk_error_report,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- *pin6 = sk->sk_v6_rcv_saddr;
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- *pin6 = sk->sk_v6_daddr;
- } else
-#endif
- {
- pin6 = (struct in6_addr *)__entry->saddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
- pin6 = (struct in6_addr *)__entry->daddr_v6;
- ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
- }
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d",
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 699dafd204ea..3c08a0846c47 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -12,35 +12,6 @@
#include <net/tcp.h>
#include <linux/sock_diag.h>
-#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
- do { \
- struct in6_addr *pin6; \
- \
- pin6 = (struct in6_addr *)__entry->saddr_v6; \
- ipv6_addr_set_v4mapped(saddr, pin6); \
- pin6 = (struct in6_addr *)__entry->daddr_v6; \
- ipv6_addr_set_v4mapped(daddr, pin6); \
- } while (0)
-
-#if IS_ENABLED(CONFIG_IPV6)
-#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
- do { \
- if (sk->sk_family == AF_INET6) { \
- struct in6_addr *pin6; \
- \
- pin6 = (struct in6_addr *)__entry->saddr_v6; \
- *pin6 = saddr6; \
- pin6 = (struct in6_addr *)__entry->daddr_v6; \
- *pin6 = daddr6; \
- } else { \
- TP_STORE_V4MAPPED(__entry, saddr, daddr); \
- } \
- } while (0)
-#else
-#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
- TP_STORE_V4MAPPED(__entry, saddr, daddr)
-#endif
-
/*
* tcp event with arguments sk and skb
*
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c42b9f1bada..9585f5345353 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1662,8 +1662,10 @@ union bpf_attr {
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
- __u64 name;
- __u32 prog_fd;
+ __u64 name;
+ __u32 prog_fd;
+ __u32 :32;
+ __aligned_u64 cookie;
} raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 43356faaa057..86c7884abaf8 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -728,8 +728,6 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
cur_image = image;
trampoline_start = 0;
}
- if (err < 0)
- goto reset_unlock;
*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
@@ -742,8 +740,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (err)
goto reset_unlock;
}
- for (i = 0; i < st_map->image_pages_cnt; i++)
- arch_protect_bpf_trampoline(st_map->image_pages[i], PAGE_SIZE);
+ for (i = 0; i < st_map->image_pages_cnt; i++) {
+ err = arch_protect_bpf_trampoline(st_map->image_pages[i],
+ PAGE_SIZE);
+ if (err)
+ goto reset_unlock;
+ }
if (st_map->map.map_flags & BPF_F_LINK) {
err = 0;
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 82243cb6c54d..8ba73042a239 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2575,8 +2575,6 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
switch (func_id) {
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
- case BPF_FUNC_get_current_pid_tgid:
- return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_get_current_comm:
return &bpf_get_current_comm_proto;
#ifdef CONFIG_CGROUP_NET_CLASSID
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 696bc55de8e8..5aacb1d3c4cc 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -908,23 +908,30 @@ static LIST_HEAD(pack_list);
static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_prog_pack *pack;
+ int err;
pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
GFP_KERNEL);
if (!pack)
return NULL;
pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
- if (!pack->ptr) {
- kfree(pack);
- return NULL;
- }
+ if (!pack->ptr)
+ goto out;
bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
- list_add_tail(&pack->list, &pack_list);
set_vm_flush_reset_perms(pack->ptr);
- set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+ err = set_memory_rox((unsigned long)pack->ptr,
+ BPF_PROG_PACK_SIZE / PAGE_SIZE);
+ if (err)
+ goto out;
+ list_add_tail(&pack->list, &pack_list);
return pack;
+
+out:
+ bpf_jit_free_exec(pack->ptr);
+ kfree(pack);
+ return NULL;
}
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
@@ -939,9 +946,16 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
size = round_up(size, PAGE_SIZE);
ptr = bpf_jit_alloc_exec(size);
if (ptr) {
+ int err;
+
bpf_fill_ill_insns(ptr, size);
set_vm_flush_reset_perms(ptr);
- set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
+ err = set_memory_rox((unsigned long)ptr,
+ size / PAGE_SIZE);
+ if (err) {
+ bpf_jit_free_exec(ptr);
+ ptr = NULL;
+ }
}
goto out;
}
@@ -2403,7 +2417,9 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
}
finalize:
- bpf_prog_lock_ro(fp);
+ *err = bpf_prog_lock_ro(fp);
+ if (*err)
+ return fp;
/* The tail call compatibility check can only be done at
* this late stage as we need to determine, if we deal
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 449b9a5d3fe3..f860adab3eb9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1730,6 +1730,10 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_strtol_proto;
case BPF_FUNC_strtoul:
return &bpf_strtoul_proto;
+ case BPF_FUNC_get_current_pid_tgid:
+ return &bpf_get_current_pid_tgid_proto;
+ case BPF_FUNC_get_ns_current_pid_tgid:
+ return &bpf_get_ns_current_pid_tgid_proto;
default:
break;
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 050fe1ebf0f7..939620b91c0e 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -155,16 +155,17 @@ static inline int extract_bit(const u8 *data, size_t index)
}
/**
- * longest_prefix_match() - determine the longest prefix
+ * __longest_prefix_match() - determine the longest prefix
* @trie: The trie to get internal sizes from
* @node: The node to operate on
* @key: The key to compare to @node
*
* Determine the longest prefix of @node that matches the bits in @key.
*/
-static size_t longest_prefix_match(const struct lpm_trie *trie,
- const struct lpm_trie_node *node,
- const struct bpf_lpm_trie_key_u8 *key)
+static __always_inline
+size_t __longest_prefix_match(const struct lpm_trie *trie,
+ const struct lpm_trie_node *node,
+ const struct bpf_lpm_trie_key_u8 *key)
{
u32 limit = min(node->prefixlen, key->prefixlen);
u32 prefixlen = 0, i = 0;
@@ -224,6 +225,13 @@ static size_t longest_prefix_match(const struct lpm_trie *trie,
return prefixlen;
}
+static size_t longest_prefix_match(const struct lpm_trie *trie,
+ const struct lpm_trie_node *node,
+ const struct bpf_lpm_trie_key_u8 *key)
+{
+ return __longest_prefix_match(trie, node, key);
+}
+
/* Called from syscall or from eBPF program */
static void *trie_lookup_elem(struct bpf_map *map, void *_key)
{
@@ -245,7 +253,7 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
* If it's the maximum possible prefix for this trie, we have
* an exact match and can return it directly.
*/
- matchlen = longest_prefix_match(trie, node, key);
+ matchlen = __longest_prefix_match(trie, node, key);
if (matchlen == trie->max_prefixlen) {
found = node;
break;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ae2ff73bde7e..e44c276e8617 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3469,17 +3469,12 @@ out_put_prog:
return err;
}
-struct bpf_raw_tp_link {
- struct bpf_link link;
- struct bpf_raw_event_map *btp;
-};
-
static void bpf_raw_tp_link_release(struct bpf_link *link)
{
struct bpf_raw_tp_link *raw_tp =
container_of(link, struct bpf_raw_tp_link, link);
- bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
+ bpf_probe_unregister(raw_tp->btp, raw_tp);
bpf_put_raw_tracepoint(raw_tp->btp);
}
@@ -3779,7 +3774,7 @@ static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *pro
#endif /* CONFIG_PERF_EVENTS */
static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
- const char __user *user_tp_name)
+ const char __user *user_tp_name, u64 cookie)
{
struct bpf_link_primer link_primer;
struct bpf_raw_tp_link *link;
@@ -3826,6 +3821,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
&bpf_raw_tp_link_lops, prog);
link->btp = btp;
+ link->cookie = cookie;
err = bpf_link_prime(&link->link, &link_primer);
if (err) {
@@ -3833,7 +3829,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
goto out_put_btp;
}
- err = bpf_probe_register(link->btp, prog);
+ err = bpf_probe_register(link->btp, link);
if (err) {
bpf_link_cleanup(&link_primer);
goto out_put_btp;
@@ -3846,11 +3842,13 @@ out_put_btp:
return err;
}
-#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
+#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
{
struct bpf_prog *prog;
+ void __user *tp_name;
+ __u64 cookie;
int fd;
if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
@@ -3860,7 +3858,9 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
- fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
+ tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
+ cookie = attr->raw_tracepoint.cookie;
+ fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
if (fd < 0)
bpf_prog_put(prog);
return fd;
@@ -5198,7 +5198,7 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
goto out;
}
if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
- ret = bpf_raw_tp_link_attach(prog, NULL);
+ ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
else if (prog->expected_attach_type == BPF_TRACE_ITER)
ret = bpf_iter_link_attach(attr, uattr, prog);
else if (prog->expected_attach_type == BPF_LSM_CGROUP)
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index db7599c59c78..cc50607f8d8c 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -456,7 +456,9 @@ again:
if (err < 0)
goto out_free;
- arch_protect_bpf_trampoline(im->image, im->size);
+ err = arch_protect_bpf_trampoline(im->image, im->size);
+ if (err)
+ goto out_free;
WARN_ON(tr->cur_image && total == 0);
if (tr->cur_image)
@@ -1072,17 +1074,10 @@ void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_jit_free_exec(image);
}
-void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
-{
- WARN_ON_ONCE(size > PAGE_SIZE);
- set_memory_rox((long)image, 1);
-}
-
-void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
{
WARN_ON_ONCE(size > PAGE_SIZE);
- set_memory_nx((long)image, 1);
- set_memory_rw((long)image, 1);
+ return set_memory_rox((long)image, 1);
}
int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 353985b2b6a2..17f26ba1a9e0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14564,7 +14564,19 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
struct tnum t;
u64 val;
-again:
+ /* In case of GE/GT/SGE/JST, reuse LE/LT/SLE/SLT logic from below */
+ switch (opcode) {
+ case BPF_JGE:
+ case BPF_JGT:
+ case BPF_JSGE:
+ case BPF_JSGT:
+ opcode = flip_opcode(opcode);
+ swap(reg1, reg2);
+ break;
+ default:
+ break;
+ }
+
switch (opcode) {
case BPF_JEQ:
if (is_jmp32) {
@@ -14707,14 +14719,6 @@ again:
reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value);
}
break;
- case BPF_JGE:
- case BPF_JGT:
- case BPF_JSGE:
- case BPF_JSGT:
- /* just reuse LE/LT logic above */
- opcode = flip_opcode(opcode);
- swap(reg1, reg2);
- goto again;
default:
return;
}
@@ -19178,6 +19182,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (bpf_prog_calc_tag(func[i]))
goto out_free;
func[i]->is_func = 1;
+ func[i]->sleepable = prog->sleepable;
func[i]->aux->func_idx = i;
/* Below members will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
@@ -19282,10 +19287,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
* bpf_prog_load will add the kallsyms for the main program.
*/
for (i = 1; i < env->subprog_cnt; i++) {
- bpf_prog_lock_ro(func[i]);
- bpf_prog_kallsyms_add(func[i]);
+ err = bpf_prog_lock_ro(func[i]);
+ if (err)
+ goto out_free;
}
+ for (i = 1; i < env->subprog_cnt; i++)
+ bpf_prog_kallsyms_add(func[i]);
+
/* Last step: make now unused interpreter insns from main
* prog consistent for later dump requests, so they can
* later look the same as if they were interpreted only.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0a5c4efc73c3..6d0c95638e1b 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1053,9 +1053,15 @@ static unsigned long get_entry_ip(unsigned long fentry_ip)
{
u32 instr;
- /* Being extra safe in here in case entry ip is on the page-edge. */
- if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
- return fentry_ip;
+ /* We want to be extra safe in case entry ip is on the page edge,
+ * but otherwise we need to avoid get_kernel_nofault()'s overhead.
+ */
+ if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
+ if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
+ return fentry_ip;
+ } else {
+ instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
+ }
if (is_endbr(instr))
fentry_ip -= ENDBR_INSN_SIZE;
return fentry_ip;
@@ -1525,8 +1531,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
- case BPF_FUNC_get_current_pid_tgid:
- return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_get_current_task:
return &bpf_get_current_task_proto;
case BPF_FUNC_get_current_task_btf:
@@ -1582,8 +1586,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_send_signal_thread_proto;
case BPF_FUNC_perf_event_read_value:
return &bpf_perf_event_read_value_proto;
- case BPF_FUNC_get_ns_current_pid_tgid:
- return &bpf_get_ns_current_pid_tgid_proto;
case BPF_FUNC_ringbuf_output:
return &bpf_ringbuf_output_proto;
case BPF_FUNC_ringbuf_reserve:
@@ -2008,6 +2010,8 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stackid_proto_raw_tp;
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto_raw_tp;
+ case BPF_FUNC_get_attach_cookie:
+ return &bpf_get_attach_cookie_proto_tracing;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -2070,6 +2074,9 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_func_arg_cnt:
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
case BPF_FUNC_get_attach_cookie:
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ return &bpf_get_attach_cookie_proto_tracing;
return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
default:
fn = raw_tp_prog_func_proto(func_id, prog);
@@ -2370,16 +2377,26 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
}
static __always_inline
-void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
+void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
{
+ struct bpf_prog *prog = link->link.prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+
cant_sleep();
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
bpf_prog_inc_misses_counter(prog);
goto out;
}
+
+ run_ctx.bpf_cookie = link->cookie;
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+
rcu_read_lock();
(void) bpf_prog_run(prog, args);
rcu_read_unlock();
+
+ bpf_reset_run_ctx(old_run_ctx);
out:
this_cpu_dec(*(prog->active));
}
@@ -2408,12 +2425,12 @@ out:
#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
#define BPF_TRACE_DEFN_x(x) \
- void bpf_trace_run##x(struct bpf_prog *prog, \
+ void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
{ \
u64 args[x]; \
REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
- __bpf_trace_run(prog, args); \
+ __bpf_trace_run(link, args); \
} \
EXPORT_SYMBOL_GPL(bpf_trace_run##x)
BPF_TRACE_DEFN_x(1);
@@ -2429,9 +2446,10 @@ BPF_TRACE_DEFN_x(10);
BPF_TRACE_DEFN_x(11);
BPF_TRACE_DEFN_x(12);
-static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
struct tracepoint *tp = btp->tp;
+ struct bpf_prog *prog = link->link.prog;
/*
* check that program doesn't access arguments beyond what's
@@ -2443,18 +2461,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
if (prog->aux->max_tp_access > btp->writable_size)
return -EINVAL;
- return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
- prog);
-}
-
-int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
-{
- return __bpf_probe_register(btp, prog);
+ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
}
-int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
{
- return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
+ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
}
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index de33dc1b0daa..25b75844891a 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -133,7 +133,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (err < 0)
goto out;
- arch_protect_bpf_trampoline(image, PAGE_SIZE);
+ err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
+ if (err)
+ goto out;
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a8b625abe242..e614cfd8e14a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,25 +324,6 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_free_datagram);
-void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
-{
- bool slow;
-
- if (!skb_unref(skb)) {
- sk_peek_offset_bwd(sk, len);
- return;
- }
-
- slow = lock_sock_fast(sk);
- sk_peek_offset_bwd(sk, len);
- skb_orphan(skb);
- unlock_sock_fast(sk, slow);
-
- /* skb is now orphaned, can be freed outside of locked section */
- __kfree_skb(skb);
-}
-EXPORT_SYMBOL(__skb_free_datagram_locked);
-
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
diff --git a/net/core/dev.c b/net/core/dev.c
index 9a67003e49db..5d36a634f468 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -78,6 +78,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
@@ -197,35 +198,60 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
-static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
+#ifndef CONFIG_PREEMPT_RT
+
+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
+
+static int __init setup_backlog_napi_threads(char *arg)
+{
+ static_branch_enable(&use_backlog_threads_key);
+ return 0;
+}
+early_param("thread_backlog_napi", setup_backlog_napi_threads);
+
+static bool use_backlog_threads(void)
+{
+ return static_branch_unlikely(&use_backlog_threads_key);
+}
+
+#else
+
+static bool use_backlog_threads(void)
{
- if (IS_ENABLED(CONFIG_RPS))
+ return true;
+}
+
+#endif
+
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
+ unsigned long *flags)
+{
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_save(*flags);
}
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_disable();
}
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- unsigned long *flags)
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
+ unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(*flags);
}
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_enable();
@@ -4404,6 +4430,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
/*************************************************************************
* Receiver routines
*************************************************************************/
+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
unsigned int sysctl_skb_defer_max __read_mostly = 64;
int weight_p __read_mostly = 64; /* old backlog weight */
@@ -4427,18 +4454,16 @@ static inline void ____napi_schedule(struct softnet_data *sd,
*/
thread = READ_ONCE(napi->thread);
if (thread) {
- /* Avoid doing set_bit() if the thread is in
- * INTERRUPTIBLE state, cause napi_thread_wait()
- * makes sure to proceed with napi polling
- * if the thread is explicitly woken from here.
- */
- if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
+ goto use_local_napi;
+
+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
}
+use_local_napi:
list_add_tail(&napi->poll_list, &sd->poll_list);
WRITE_ONCE(napi->list_owner, smp_processor_id());
/* If not called from net_rx_action()
@@ -4678,6 +4703,11 @@ static void napi_schedule_rps(struct softnet_data *sd)
#ifdef CONFIG_RPS
if (sd != mysd) {
+ if (use_backlog_threads()) {
+ __napi_schedule_irqoff(&sd->backlog);
+ return;
+ }
+
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
@@ -4692,6 +4722,23 @@ static void napi_schedule_rps(struct softnet_data *sd)
__napi_schedule_irqoff(&mysd->backlog);
}
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
+{
+ unsigned long flags;
+
+ if (use_backlog_threads()) {
+ backlog_lock_irq_save(sd, &flags);
+
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ __napi_schedule_irqoff(&sd->backlog);
+
+ backlog_unlock_irq_restore(sd, &flags);
+
+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+ }
+}
+
#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
#endif
@@ -4747,7 +4794,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
reason = SKB_DROP_REASON_NOT_SPECIFIED;
sd = &per_cpu(softnet_data, cpu);
- rps_lock_irqsave(sd, &flags);
+ backlog_lock_irq_save(sd, &flags);
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
@@ -4757,7 +4804,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
return NET_RX_SUCCESS;
}
@@ -4772,7 +4819,7 @@ enqueue:
drop:
sd->dropped++;
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
@@ -5838,7 +5885,7 @@ static void flush_backlog(struct work_struct *work)
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -5846,7 +5893,7 @@ static void flush_backlog(struct work_struct *work)
input_queue_head_incr(sd);
}
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
@@ -5864,14 +5911,14 @@ static bool flush_required(int cpu)
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
/* as insertion into process_queue happens with the rps lock held,
* process_queue access may race only with dequeue
*/
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
!skb_queue_empty_lockless(&sd->process_queue);
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
return do_flush;
#endif
@@ -5937,7 +5984,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
#ifdef CONFIG_RPS
struct softnet_data *remsd = sd->rps_ipi_list;
- if (remsd) {
+ if (!use_backlog_threads() && remsd) {
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -5952,7 +5999,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- return sd->rps_ipi_list != NULL;
+ return !use_backlog_threads() && sd->rps_ipi_list;
#else
return false;
#endif
@@ -5986,7 +6033,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
}
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
@@ -5996,13 +6043,13 @@ static int process_backlog(struct napi_struct *napi, int quota)
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
again = false;
} else {
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
}
return work;
@@ -6710,8 +6757,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
static int napi_thread_wait(struct napi_struct *napi)
{
- bool woken = false;
-
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -6720,15 +6765,13 @@ static int napi_thread_wait(struct napi_struct *napi)
* Testing SCHED bit is not enough because SCHED bit might be
* set by some other busy poll thread or by napi_disable().
*/
- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
WARN_ON(!list_empty(&napi->poll_list));
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
- /* woken being true indicates this thread owns this napi. */
- woken = true;
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
@@ -6736,43 +6779,48 @@ static int napi_thread_wait(struct napi_struct *napi)
return -1;
}
-static int napi_threaded_poll(void *data)
+static void napi_threaded_poll_loop(struct napi_struct *napi)
{
- struct napi_struct *napi = data;
struct softnet_data *sd;
- void *have;
+ unsigned long last_qs = jiffies;
- while (!napi_thread_wait(napi)) {
- unsigned long last_qs = jiffies;
-
- for (;;) {
- bool repoll = false;
+ for (;;) {
+ bool repoll = false;
+ void *have;
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
- sd->in_napi_threaded_poll = true;
+ local_bh_disable();
+ sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
- have = netpoll_poll_lock(napi);
- __napi_poll(napi, &repoll);
- netpoll_poll_unlock(have);
+ have = netpoll_poll_lock(napi);
+ __napi_poll(napi, &repoll);
+ netpoll_poll_unlock(have);
- sd->in_napi_threaded_poll = false;
- barrier();
+ sd->in_napi_threaded_poll = false;
+ barrier();
- if (sd_has_rps_ipi_waiting(sd)) {
- local_irq_disable();
- net_rps_action_and_irq_enable(sd);
- }
- skb_defer_free_flush(sd);
- local_bh_enable();
+ if (sd_has_rps_ipi_waiting(sd)) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
+ skb_defer_free_flush(sd);
+ local_bh_enable();
- if (!repoll)
- break;
+ if (!repoll)
+ break;
- rcu_softirq_qs_periodic(last_qs);
- cond_resched();
- }
+ rcu_softirq_qs_periodic(last_qs);
+ cond_resched();
}
+}
+
+static int napi_threaded_poll(void *data)
+{
+ struct napi_struct *napi = data;
+
+ while (!napi_thread_wait(napi))
+ napi_threaded_poll_loop(napi);
+
return 0;
}
@@ -11373,7 +11421,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
list_del_init(&napi->poll_list);
if (napi->poll == process_backlog)
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
else
____napi_schedule(sd, napi);
}
@@ -11381,12 +11429,14 @@ static int dev_cpu_dead(unsigned int oldcpu)
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+ if (!use_backlog_threads()) {
#ifdef CONFIG_RPS
- remsd = oldsd->rps_ipi_list;
- oldsd->rps_ipi_list = NULL;
+ remsd = oldsd->rps_ipi_list;
+ oldsd->rps_ipi_list = NULL;
#endif
- /* send out pending IPI's on offline CPU */
- net_rps_send_ipi(remsd);
+ /* send out pending IPI's on offline CPU */
+ net_rps_send_ipi(remsd);
+ }
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
@@ -11712,7 +11762,7 @@ static int net_page_pool_create(int cpuid)
struct page_pool_params page_pool_params = {
.pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
.flags = PP_FLAG_SYSTEM_POOL,
- .nid = NUMA_NO_NODE,
+ .nid = cpu_to_mem(cpuid),
};
struct page_pool *pp_ptr;
@@ -11725,6 +11775,38 @@ static int net_page_pool_create(int cpuid)
return 0;
}
+static int backlog_napi_should_run(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+}
+
+static void run_backlog_napi(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+
+ napi_threaded_poll_loop(&sd->backlog);
+}
+
+static void backlog_napi_setup(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ napi->thread = this_cpu_read(backlog_napi);
+ set_bit(NAPI_STATE_THREADED, &napi->state);
+}
+
+static struct smp_hotplug_thread backlog_threads = {
+ .store = &backlog_napi,
+ .thread_should_run = backlog_napi_should_run,
+ .thread_fn = run_backlog_napi,
+ .thread_comm = "backlog_napi/%u",
+ .setup = backlog_napi_setup,
+};
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11776,10 +11858,13 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+ INIT_LIST_HEAD(&sd->backlog.poll_list);
if (net_page_pool_create(i))
goto out;
}
+ if (use_backlog_threads())
+ smpboot_register_percpu_thread(&backlog_threads);
dev_boot_phase = 0;
diff --git a/net/core/filter.c b/net/core/filter.c
index 8adf95765cdd..0c66e4a3fc5b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8342,8 +8342,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_event_output_data_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
- case BPF_FUNC_get_current_pid_tgid:
- return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b99127712e67..17617c29be2d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7039,8 +7039,8 @@ nodefer: __kfree_skb(skb);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
- smp_call_function_single_async(cpu, &sd->defer_csd);
+ if (unlikely(kick))
+ kick_defer_list_purge(sd, cpu);
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index e9154ace80ff..b67454b45a49 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -89,6 +89,10 @@ ifneq ($(EXTRA_LDFLAGS),)
LDFLAGS += $(EXTRA_LDFLAGS)
endif
+HOST_CFLAGS := $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
+ $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
+HOST_LDFLAGS := $(LDFLAGS)
+
INSTALL ?= install
RM ?= rm -f
@@ -178,12 +182,9 @@ ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),)
SRCS := $(filter-out jit_disasm.c,$(SRCS))
endif
-HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
- $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
-
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
@@ -231,14 +232,11 @@ endif
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
-$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
- $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
-
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
- $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
+ $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(HOST_LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 540c0f2c4fda..786268f1a483 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -1131,7 +1131,8 @@ static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
continue;
codegen("\
\n\
- obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
+ obj->struct_ops.%1$s = (typeof(obj->struct_ops.%1$s))\n\
+ bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
\n\
", ident);
}
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
index 00c77edb6331..9b898571b49e 100644
--- a/tools/bpf/bpftool/pids.c
+++ b/tools/bpf/bpftool/pids.c
@@ -101,7 +101,6 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
char buf[4096 / sizeof(*e) * sizeof(*e)];
struct pid_iter_bpf *skel;
int err, ret, fd = -1, i;
- libbpf_print_fn_t default_print;
*map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
if (IS_ERR(*map)) {
@@ -118,12 +117,18 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
skel->rodata->obj_type = type;
- /* we don't want output polluted with libbpf errors if bpf_iter is not
- * supported
- */
- default_print = libbpf_set_print(libbpf_print_none);
- err = pid_iter_bpf__load(skel);
- libbpf_set_print(default_print);
+ if (!verifier_logs) {
+ libbpf_print_fn_t default_print;
+
+ /* Unless debug information is on, we don't want the output to
+ * be polluted with libbpf errors if bpf_iter is not supported.
+ */
+ default_print = libbpf_set_print(libbpf_print_none);
+ err = pid_iter_bpf__load(skel);
+ libbpf_set_print(default_print);
+ } else {
+ err = pid_iter_bpf__load(skel);
+ }
if (err) {
/* too bad, kernel doesn't support BPF iterators yet */
err = 0;
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
index 26004f0c5a6a..7bdbcac3cf62 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx)
BPF_LINK_TYPE_PERF_EVENT___local)) {
struct bpf_link *link = (struct bpf_link *) file->private_data;
- if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
- BPF_LINK_TYPE_PERF_EVENT___local)) {
+ if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local,
+ BPF_LINK_TYPE_PERF_EVENT___local)) {
e.has_bpf_cookie = true;
e.bpf_cookie = get_bpf_cookie(link);
}
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 7b65566f3e42..8a63a9913495 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -58,6 +58,10 @@
#define noinline
#endif
+#ifndef __nocf_check
+#define __nocf_check __attribute__((nocf_check))
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3c42b9f1bada..9585f5345353 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1662,8 +1662,10 @@ union bpf_attr {
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
- __u64 name;
- __u32 prog_fd;
+ __u64 name;
+ __u32 prog_fd;
+ __u32 :32;
+ __aligned_u64 cookie;
} raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 97ec005c3c47..c9f4e04f38fe 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -785,6 +785,7 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, uprobe_multi))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_RAW_TP:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
case BPF_MODIFY_RETURN:
@@ -1173,20 +1174,31 @@ int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info
return bpf_obj_get_info_by_fd(link_fd, info, info_len);
}
-int bpf_raw_tracepoint_open(const char *name, int prog_fd)
+int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_raw_tp_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, attr_sz);
- attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
+ attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL));
+ attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0);
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
return libbpf_err_errno(fd);
}
+int bpf_raw_tracepoint_open(const char *name, int prog_fd)
+{
+ LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name);
+
+ return bpf_raw_tracepoint_open_opts(prog_fd, &opts);
+}
+
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index df0db2f0cdb7..972e17ec0c09 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -617,6 +617,15 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
__u32 query_flags, __u32 *attach_flags,
__u32 *prog_ids, __u32 *prog_cnt);
+struct bpf_raw_tp_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ const char *tp_name;
+ __u64 cookie;
+ size_t :0;
+};
+#define bpf_raw_tp_opts__last_field cookie
+
+LIBBPF_API int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts);
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a2061fcd612d..d7d8f78f8846 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1132,8 +1132,26 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
const char *mname;
mname = btf__name_by_offset(btf, member->name_off);
+ moff = member->offset / 8;
+ mdata = data + moff;
+ msize = btf__resolve_size(btf, member->type);
+ if (msize < 0) {
+ pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
+ map->name, mname);
+ return msize;
+ }
+
kern_member = find_member_by_name(kern_btf, kern_type, mname);
if (!kern_member) {
+ /* Skip all zeros or null fields if they are not
+ * presented in the kernel BTF.
+ */
+ if (libbpf_is_mem_zeroed(mdata, msize)) {
+ pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
+ map->name, mname);
+ continue;
+ }
+
pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
map->name, mname);
return -ENOTSUP;
@@ -1147,10 +1165,7 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
return -ENOTSUP;
}
- moff = member->offset / 8;
kern_moff = kern_member->offset / 8;
-
- mdata = data + moff;
kern_mdata = kern_data + kern_moff;
mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
@@ -1230,9 +1245,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
continue;
}
- msize = btf__resolve_size(btf, mtype_id);
kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
- if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
+ if (kern_msize < 0 || msize != kern_msize) {
pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
map->name, mname, (ssize_t)msize,
(ssize_t)kern_msize);
@@ -7321,9 +7335,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL, *tmp;
- int btf_fd, ret, err;
bool own_log_buf = true;
__u32 log_level = prog->log_level;
+ int ret, err;
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -7347,9 +7361,8 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_ifindex = prog->prog_ifindex;
/* specify func_info/line_info only if kernel supports them */
- btf_fd = btf__fd(obj->btf);
- if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
- load_attr.prog_btf_fd = btf_fd;
+ if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
+ load_attr.prog_btf_fd = btf__fd(obj->btf);
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
@@ -8563,6 +8576,11 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
return libbpf_err(-EINVAL);
}
+ if (map->fd < 0) {
+ pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
+ return libbpf_err(-EINVAL);
+ }
+
if (map->pin_path) {
if (path && strcmp(path, map->pin_path)) {
pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
@@ -9298,6 +9316,7 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
@@ -10307,6 +10326,11 @@ static int validate_map_op(const struct bpf_map *map, size_t key_sz,
return -EINVAL;
}
+ if (map->fd < 0) {
+ pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
+ return -EINVAL;
+ }
+
if (!check_value_sz)
return 0;
@@ -10419,8 +10443,15 @@ long libbpf_get_error(const void *ptr)
int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
{
int ret;
+ int prog_fd = bpf_program__fd(prog);
- ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+ if (prog_fd < 0) {
+ pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
+ prog->name);
+ return libbpf_err(-EINVAL);
+ }
+
+ ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
return libbpf_err_errno(ret);
}
@@ -10614,7 +10645,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
prog->name);
return libbpf_err_ptr(-EINVAL);
}
@@ -11338,6 +11369,13 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
return libbpf_err_ptr(-EINVAL);
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
syms = OPTS_GET(opts, syms, false);
addrs = OPTS_GET(opts, addrs, false);
cnt = OPTS_GET(opts, cnt, false);
@@ -11378,7 +11416,6 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
}
link->detach = &bpf_link__detach_fd;
- prog_fd = bpf_program__fd(prog);
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
if (link_fd < 0) {
err = -errno;
@@ -11761,6 +11798,13 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
return libbpf_err_ptr(-EINVAL);
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
syms = OPTS_GET(opts, syms, NULL);
offsets = OPTS_GET(opts, offsets, NULL);
ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
@@ -11836,7 +11880,6 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
}
link->detach = &bpf_link__detach_fd;
- prog_fd = bpf_program__fd(prog);
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
if (link_fd < 0) {
err = -errno;
@@ -12080,7 +12123,7 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
if (bpf_program__fd(prog) < 0) {
- pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
prog->name);
return libbpf_err_ptr(-EINVAL);
}
@@ -12271,13 +12314,19 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin
return libbpf_get_error(*link);
}
-struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
- const char *tp_name)
+struct bpf_link *
+bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
+ const char *tp_name,
+ struct bpf_raw_tracepoint_opts *opts)
{
+ LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, pfd;
+ if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
+ return libbpf_err_ptr(-EINVAL);
+
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
@@ -12289,7 +12338,9 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
+ raw_opts.tp_name = tp_name;
+ raw_opts.cookie = OPTS_GET(opts, cookie, 0);
+ pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
if (pfd < 0) {
pfd = -errno;
free(link);
@@ -12301,6 +12352,12 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
return link;
}
+struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
+ const char *tp_name)
+{
+ return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
+}
+
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
static const char *const prefixes[] = {
@@ -12662,6 +12719,12 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
return libbpf_err_ptr(-EOPNOTSUPP);
+ if (bpf_program__fd(prog) < 0) {
+ pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
if (err)
return libbpf_err_ptr(err);
@@ -12702,8 +12765,13 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
__u32 zero = 0;
int err, fd;
- if (!bpf_map__is_struct_ops(map) || map->fd == -1)
+ if (!bpf_map__is_struct_ops(map))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (map->fd < 0) {
+ pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
return libbpf_err_ptr(-EINVAL);
+ }
link = calloc(1, sizeof(*link));
if (!link)
@@ -12751,9 +12819,14 @@ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
__u32 zero = 0;
int err;
- if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
+ if (!bpf_map__is_struct_ops(map))
return -EINVAL;
+ if (map->fd < 0) {
+ pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
+ return -EINVAL;
+ }
+
st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
/* Ensure the type of a link is correct */
if (st_ops_link->map_fd < 0)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 7b510761f545..f88ab50c0229 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -760,9 +760,20 @@ bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
const char *tp_name,
const struct bpf_tracepoint_opts *opts);
+struct bpf_raw_tracepoint_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u64 cookie;
+ size_t :0;
+};
+#define bpf_raw_tracepoint_opts__last_field cookie
+
LIBBPF_API struct bpf_link *
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
const char *tp_name);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
+ const char *tp_name,
+ struct bpf_raw_tracepoint_opts *opts);
struct bpf_trace_opts {
/* size of this struct, for forward/backward compatibility */
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 86804fd90dd1..51732ecb1385 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -410,6 +410,8 @@ LIBBPF_1.3.0 {
LIBBPF_1.4.0 {
global:
+ bpf_program__attach_raw_tracepoint_opts;
+ bpf_raw_tracepoint_open_opts;
bpf_token_create;
btf__new_split;
btf_ext__raw_data;
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
index b36de42ee4d9..e2ff8ea1cb79 100644
--- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
@@ -186,7 +186,7 @@ static void *task_producer(void *input)
for (i = 0; i < batch_sz; i++) {
if (!pthd_results[i])
- pthread_join(pthds[i], NULL);;
+ pthread_join(pthds[i], NULL);
}
}
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index ace0d1011a8e..d66eddacd642 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -1,15 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
+#define _GNU_SOURCE
+#include <unistd.h>
#include "bench.h"
#include "trigger_bench.skel.h"
#include "trace_helpers.h"
+/* adjust slot shift in inc_hits() if changing */
+#define MAX_BUCKETS 256
+
+#pragma GCC diagnostic ignored "-Wattributes"
+
/* BPF triggering benchmarks */
static struct trigger_ctx {
struct trigger_bench *skel;
} ctx;
-static struct counter base_hits;
+static struct counter base_hits[MAX_BUCKETS];
+
+static __always_inline void inc_counter(struct counter *counters)
+{
+ static __thread int tid = 0;
+ unsigned slot;
+
+ if (unlikely(tid == 0))
+ tid = syscall(SYS_gettid);
+
+ /* multiplicative hashing, it's fast */
+ slot = 2654435769U * tid;
+ slot >>= 24;
+
+ atomic_inc(&base_hits[slot].value); /* use highest byte as an index */
+}
+
+static long sum_and_reset_counters(struct counter *counters)
+{
+ int i;
+ long sum = 0;
+
+ for (i = 0; i < MAX_BUCKETS; i++)
+ sum += atomic_swap(&counters[i].value, 0);
+ return sum;
+}
static void trigger_validate(void)
{
@@ -23,14 +55,14 @@ static void *trigger_base_producer(void *input)
{
while (true) {
(void)syscall(__NR_getpgid);
- atomic_inc(&base_hits.value);
+ inc_counter(base_hits);
}
return NULL;
}
static void trigger_base_measure(struct bench_res *res)
{
- res->hits = atomic_swap(&base_hits.value, 0);
+ res->hits = sum_and_reset_counters(base_hits);
}
static void *trigger_producer(void *input)
@@ -42,7 +74,7 @@ static void *trigger_producer(void *input)
static void trigger_measure(struct bench_res *res)
{
- res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+ res->hits = sum_and_reset_counters(ctx.skel->bss->hits);
}
static void setup_ctx(void)
@@ -137,7 +169,7 @@ static void trigger_fmodret_setup(void)
* GCC doesn't generate stack setup preample for these functions due to them
* having no input arguments and doing nothing in the body.
*/
-__weak void uprobe_target_nop(void)
+__nocf_check __weak void uprobe_target_nop(void)
{
asm volatile ("nop");
}
@@ -146,7 +178,7 @@ __weak void opaque_noop_func(void)
{
}
-__weak int uprobe_target_push(void)
+__nocf_check __weak int uprobe_target_push(void)
{
/* overhead of function call is negligible compared to uprobe
* triggering, so this shouldn't affect benchmark results much
@@ -155,7 +187,7 @@ __weak int uprobe_target_push(void)
return 1;
}
-__weak void uprobe_target_ret(void)
+__nocf_check __weak void uprobe_target_ret(void)
{
asm volatile ("");
}
@@ -164,7 +196,7 @@ static void *uprobe_base_producer(void *input)
{
while (true) {
uprobe_target_nop();
- atomic_inc(&base_hits.value);
+ inc_counter(base_hits);
}
return NULL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 1454cebc262b..4407ea428e77 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -573,6 +573,115 @@ cleanup:
close(lsm_fd);
}
+static void tp_btf_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd, link_fd = -1;
+ struct bpf_link *link = NULL;
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
+ LIBBPF_OPTS(bpf_trace_opts, trace_opts);
+
+ /* There are three different ways to attach tp_btf (BTF-aware raw
+ * tracepoint) programs. Let's test all of them.
+ */
+ prog_fd = bpf_program__fd(skel->progs.handle_tp_btf);
+
+ /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
+ skel->bss->tp_btf_res = 0;
+
+ raw_tp_opts.cookie = cookie = 0x11000000000000L;
+ link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "raw_tp_open_res");
+
+ /* low-level generic bpf_link_create() API */
+ skel->bss->tp_btf_res = 0;
+
+ link_opts.tracing.cookie = cookie = 0x22000000000000L;
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_RAW_TP, &link_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_link_create"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "link_create_res");
+
+ /* high-level bpf_link-based bpf_program__attach_trace_opts() API */
+ skel->bss->tp_btf_res = 0;
+
+ trace_opts.cookie = cookie = 0x33000000000000L;
+ link = bpf_program__attach_trace_opts(skel->progs.handle_tp_btf, &trace_opts);
+ if (!ASSERT_OK_PTR(link, "attach_trace_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ bpf_link__destroy(link); /* detach */
+ link = NULL;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "attach_trace_opts_res");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ bpf_link__destroy(link);
+}
+
+static void raw_tp_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd, link_fd = -1;
+ struct bpf_link *link = NULL;
+ LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
+ LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts);
+
+ /* There are two different ways to attach raw_tp programs */
+ prog_fd = bpf_program__fd(skel->progs.handle_raw_tp);
+
+ /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
+ skel->bss->raw_tp_res = 0;
+
+ raw_tp_opts.tp_name = "sys_enter";
+ raw_tp_opts.cookie = cookie = 0x55000000000000L;
+ link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->raw_tp_res, cookie, "raw_tp_open_res");
+
+ /* high-level bpf_link-based bpf_program__attach_raw_tracepoint_opts() API */
+ skel->bss->raw_tp_res = 0;
+
+ opts.cookie = cookie = 0x66000000000000L;
+ link = bpf_program__attach_raw_tracepoint_opts(skel->progs.handle_raw_tp,
+ "sys_enter", &opts);
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ bpf_link__destroy(link); /* detach */
+ link = NULL;
+
+ ASSERT_EQ(skel->bss->raw_tp_res, cookie, "attach_raw_tp_opts_res");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ bpf_link__destroy(link);
+}
+
void test_bpf_cookie(void)
{
struct test_bpf_cookie *skel;
@@ -601,6 +710,9 @@ void test_bpf_cookie(void)
tracing_subtest(skel);
if (test__start_subtest("lsm"))
lsm_subtest(skel);
-
+ if (test__start_subtest("tp_btf"))
+ tp_btf_subtest(skel);
+ if (test__start_subtest("raw_tp"))
+ raw_tp_subtest(skel);
test_bpf_cookie__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index a88e6e07e4f5..94cb22b01482 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -89,7 +89,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
WRITE_ONCE(stop, 0);
- lfd = socket(AF_INET6, SOCK_STREAM, 0);
+ lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_NEQ(lfd, -1, "socket"))
return;
@@ -103,21 +103,10 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
settimeo(lfd, 0) || settimeo(fd, 0))
goto done;
- /* bind, listen and start server thread to accept */
- sa6.sin6_family = AF_INET6;
- sa6.sin6_addr = in6addr_loopback;
- err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
- if (!ASSERT_NEQ(err, -1, "bind"))
- goto done;
-
err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
if (!ASSERT_NEQ(err, -1, "getsockname"))
goto done;
- err = listen(lfd, 1);
- if (!ASSERT_NEQ(err, -1, "listen"))
- goto done;
-
if (sk_stg_map) {
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
&expected_stg, BPF_NOEXIST);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index a8b53b8736f0..f66ceccd7029 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -25,7 +25,7 @@ static void test_lookup_update(void)
int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
struct test_btf_map_in_map *skel;
- int err, key = 0, val, i, fd;
+ int err, key = 0, val, i;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -102,30 +102,6 @@ static void test_lookup_update(void)
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
- test_btf_map_in_map__destroy(skel);
- skel = NULL;
-
- /* we need to either wait for or force synchronize_rcu(), before
- * checking for "still exists" condition, otherwise map could still be
- * resolvable by ID, causing false positives.
- *
- * Older kernels (5.8 and earlier) freed map only after two
- * synchronize_rcu()s, so trigger two, to be entirely sure.
- */
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
-
- fd = bpf_map_get_fd_by_id(map1_id);
- if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
- fd = bpf_map_get_fd_by_id(map2_id);
- if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
-
cleanup:
test_btf_map_in_map__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
index 24d493482ffc..e72d75d6baa7 100644
--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -12,77 +12,229 @@
#include <sys/wait.h>
#include <sys/mount.h>
#include <sys/fcntl.h>
+#include "network_helpers.h"
#define STACK_SIZE (1024 * 1024)
static char child_stack[STACK_SIZE];
-static int test_current_pid_tgid(void *args)
+static int get_pid_tgid(pid_t *pid, pid_t *tgid,
+ struct test_ns_current_pid_tgid__bss *bss)
{
- struct test_ns_current_pid_tgid__bss *bss;
- struct test_ns_current_pid_tgid *skel;
- int err = -1, duration = 0;
- pid_t tgid, pid;
struct stat st;
+ int err;
- skel = test_ns_current_pid_tgid__open_and_load();
- if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n"))
- goto cleanup;
-
- pid = syscall(SYS_gettid);
- tgid = getpid();
+ *pid = syscall(SYS_gettid);
+ *tgid = getpid();
err = stat("/proc/self/ns/pid", &st);
- if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err))
- goto cleanup;
+ if (!ASSERT_OK(err, "stat /proc/self/ns/pid"))
+ return err;
- bss = skel->bss;
bss->dev = st.st_dev;
bss->ino = st.st_ino;
bss->user_pid = 0;
bss->user_tgid = 0;
+ return 0;
+}
+
+static int test_current_pid_tgid_tp(void *args)
+{
+ struct test_ns_current_pid_tgid__bss *bss;
+ struct test_ns_current_pid_tgid *skel;
+ int ret = -1, err;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
+
+ bpf_program__set_autoload(skel->progs.tp_handler, true);
+
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
+ goto cleanup;
+
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, bss))
+ goto cleanup;
err = test_ns_current_pid_tgid__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
- ASSERT_EQ(bss->user_pid, pid, "pid");
- ASSERT_EQ(bss->user_tgid, tgid, "tgid");
- err = 0;
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
+ goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
+
+cleanup:
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
+}
+
+static int test_current_pid_tgid_cgrp(void *args)
+{
+ struct test_ns_current_pid_tgid__bss *bss;
+ struct test_ns_current_pid_tgid *skel;
+ int server_fd = -1, ret = -1, err;
+ int cgroup_fd = *(int *)args;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
+
+ bpf_program__set_autoload(skel->progs.cgroup_bind4, true);
+
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
+ goto cleanup;
+
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, bss))
+ goto cleanup;
+
+ skel->links.cgroup_bind4 = bpf_program__attach_cgroup(
+ skel->progs.cgroup_bind4, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.cgroup_bind4, "bpf_program__attach_cgroup"))
+ goto cleanup;
+
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
+ goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
cleanup:
- test_ns_current_pid_tgid__destroy(skel);
+ if (server_fd >= 0)
+ close(server_fd);
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
+}
+
+static int test_current_pid_tgid_sk_msg(void *args)
+{
+ int verdict, map, server_fd = -1, client_fd = -1;
+ struct test_ns_current_pid_tgid__bss *bss;
+ static const char send_msg[] = "message";
+ struct test_ns_current_pid_tgid *skel;
+ int ret = -1, err, key = 0;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
+
+ bpf_program__set_autoload(skel->progs.sk_msg, true);
+
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
+ goto cleanup;
+
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, skel->bss))
+ goto cleanup;
+
+ verdict = bpf_program__fd(skel->progs.sk_msg);
+ map = bpf_map__fd(skel->maps.sock_map);
+ err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "prog_attach"))
+ goto cleanup;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto cleanup;
- return err;
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto cleanup;
+
+ err = bpf_map_update_elem(map, &key, &client_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto cleanup;
+
+ err = send(client_fd, send_msg, sizeof(send_msg), 0);
+ if (!ASSERT_EQ(err, sizeof(send_msg), "send(msg)"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
+ goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
+
+cleanup:
+ if (server_fd >= 0)
+ close(server_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
}
-static void test_ns_current_pid_tgid_new_ns(void)
+static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg)
{
- int wstatus, duration = 0;
+ int wstatus;
pid_t cpid;
/* Create a process in a new namespace, this process
* will be the init process of this new namespace hence will be pid 1.
*/
- cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE,
- CLONE_NEWPID | SIGCHLD, NULL);
+ cpid = clone(fn, child_stack + STACK_SIZE,
+ CLONE_NEWPID | SIGCHLD, arg);
- if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
+ if (!ASSERT_NEQ(cpid, -1, "clone"))
return;
- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
+ if (!ASSERT_NEQ(waitpid(cpid, &wstatus, 0), -1, "waitpid"))
return;
- if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed"))
+ if (!ASSERT_OK(WEXITSTATUS(wstatus), "newns_pidtgid"))
return;
}
+static void test_in_netns(int (*fn)(void *), void *arg)
+{
+ struct nstoken *nstoken = NULL;
+
+ SYS(cleanup, "ip netns add ns_current_pid_tgid");
+ SYS(cleanup, "ip -net ns_current_pid_tgid link set dev lo up");
+
+ nstoken = open_netns("ns_current_pid_tgid");
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto cleanup;
+
+ test_ns_current_pid_tgid_new_ns(fn, arg);
+
+cleanup:
+ if (nstoken)
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del ns_current_pid_tgid");
+}
+
/* TODO: use a different tracepoint */
void serial_test_ns_current_pid_tgid(void)
{
- if (test__start_subtest("ns_current_pid_tgid_root_ns"))
- test_current_pid_tgid(NULL);
- if (test__start_subtest("ns_current_pid_tgid_new_ns"))
- test_ns_current_pid_tgid_new_ns();
+ if (test__start_subtest("root_ns_tp"))
+ test_current_pid_tgid_tp(NULL);
+ if (test__start_subtest("new_ns_tp"))
+ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL);
+ if (test__start_subtest("new_ns_cgrp")) {
+ int cgroup_fd = -1;
+
+ cgroup_fd = test__join_cgroup("/sock_addr");
+ if (ASSERT_GE(cgroup_fd, 0, "join_cgroup")) {
+ test_in_netns(test_current_pid_tgid_cgrp, &cgroup_fd);
+ close(cgroup_fd);
+ }
+ }
+ if (test__start_subtest("new_ns_sk_msg"))
+ test_in_netns(test_current_pid_tgid_sk_msg, NULL);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index ee5372c7f2c7..098776d00ab4 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -93,9 +93,56 @@ cleanup:
struct_ops_module__destroy(skel);
}
+static void test_struct_ops_not_zeroed(void)
+{
+ struct struct_ops_module *skel;
+ int err;
+
+ /* zeroed is 0, and zeroed_op is null */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ err = struct_ops_module__load(skel);
+ ASSERT_OK(err, "struct_ops_module_load");
+
+ struct_ops_module__destroy(skel);
+
+ /* zeroed is not 0 */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed"))
+ return;
+
+ /* libbpf should reject the testmod_zeroed since struct
+ * bpf_testmod_ops in the kernel has no "zeroed" field and the
+ * value of "zeroed" is non-zero.
+ */
+ skel->struct_ops.testmod_zeroed->zeroed = 0xdeadbeef;
+ err = struct_ops_module__load(skel);
+ ASSERT_ERR(err, "struct_ops_module_load_not_zeroed");
+
+ struct_ops_module__destroy(skel);
+
+ /* zeroed_op is not null */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed_op"))
+ return;
+
+ /* libbpf should reject the testmod_zeroed since the value of its
+ * "zeroed_op" is not null.
+ */
+ skel->struct_ops.testmod_zeroed->zeroed_op = skel->progs.test_3;
+ err = struct_ops_module__load(skel);
+ ASSERT_ERR(err, "struct_ops_module_load_not_zeroed_op");
+
+ struct_ops_module__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("test_struct_ops_load"))
test_struct_ops_load();
+ if (test__start_subtest("test_struct_ops_not_zeroed"))
+ test_struct_ops_not_zeroed();
}
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 3db416606f2f..fe65e0952a1e 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -673,7 +673,7 @@ static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
{
- int *t, i, sum = 0;;
+ int *t, i, sum = 0;
while ((t = bpf_iter_num_next(it))) {
i = *t;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
index 026cabfa7f1f..86e1e50c5531 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_module.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -23,7 +23,7 @@ void BPF_PROG(test_2, int a, int b)
test_2_result = a + b;
}
-SEC("struct_ops/test_3")
+SEC("?struct_ops/test_3")
int BPF_PROG(test_3, int a, int b)
{
test_2_result = a + b + 3;
@@ -54,3 +54,17 @@ struct bpf_testmod_ops___v2 testmod_2 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2_v2,
};
+
+struct bpf_testmod_ops___zeroed {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+ void (*zeroed_op)(int a, int b);
+ int zeroed;
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___zeroed testmod_zeroed = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+};
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
index 5a3a80f751c4..c83142b55f47 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -15,6 +15,8 @@ __u64 uprobe_res;
__u64 uretprobe_res;
__u64 tp_res;
__u64 pe_res;
+__u64 raw_tp_res;
+__u64 tp_btf_res;
__u64 fentry_res;
__u64 fexit_res;
__u64 fmod_ret_res;
@@ -87,6 +89,20 @@ int handle_pe(struct pt_regs *ctx)
return 0;
}
+SEC("raw_tp/sys_enter")
+int handle_raw_tp(void *ctx)
+{
+ update(ctx, &raw_tp_res);
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int handle_tp_btf(void *ctx)
+{
+ update(ctx, &tp_btf_res);
+ return 0;
+}
+
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(fentry_test1, int a)
{
diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
index 0763d49f9c42..386315afad65 100644
--- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
@@ -5,23 +5,48 @@
#include <stdint.h>
#include <bpf/bpf_helpers.h>
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u32);
+} sock_map SEC(".maps");
+
__u64 user_pid = 0;
__u64 user_tgid = 0;
__u64 dev = 0;
__u64 ino = 0;
-SEC("tracepoint/syscalls/sys_enter_nanosleep")
-int handler(const void *ctx)
+static void get_pid_tgid(void)
{
struct bpf_pidns_info nsdata;
if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info)))
- return 0;
+ return;
user_pid = nsdata.pid;
user_tgid = nsdata.tgid;
+}
+SEC("?tracepoint/syscalls/sys_enter_nanosleep")
+int tp_handler(const void *ctx)
+{
+ get_pid_tgid();
return 0;
}
+SEC("?cgroup/bind4")
+int cgroup_bind4(struct bpf_sock_addr *ctx)
+{
+ get_pid_tgid();
+ return 1;
+}
+
+SEC("?sk_msg")
+int sk_msg(struct sk_msg_md *msg)
+{
+ get_pid_tgid();
+ return SK_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 5fda43901033..42ec202015ed 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -9,12 +9,27 @@
char _license[] SEC("license") = "GPL";
-long hits = 0;
+#define CPU_MASK 255
+#define MAX_CPUS (CPU_MASK + 1) /* should match MAX_BUCKETS in benchs/bench_trigger.c */
+
+/* matches struct counter in bench.h */
+struct counter {
+ long value;
+} __attribute__((aligned(128)));
+
+struct counter hits[MAX_CPUS];
+
+static __always_inline void inc_counter(void)
+{
+ int cpu = bpf_get_smp_processor_id();
+
+ __sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
+}
SEC("tp/syscalls/sys_enter_getpgid")
int bench_trigger_tp(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
@@ -22,69 +37,69 @@ SEC("raw_tp/sys_enter")
int BPF_PROG(bench_trigger_raw_tp, struct pt_regs *regs, long id)
{
if (id == __NR_getpgid)
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("kprobe/" SYS_PREFIX "sys_getpgid")
int bench_trigger_kprobe(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("kretprobe/" SYS_PREFIX "sys_getpgid")
int bench_trigger_kretprobe(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("kprobe.multi/" SYS_PREFIX "sys_getpgid")
int bench_trigger_kprobe_multi(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("kretprobe.multi/" SYS_PREFIX "sys_getpgid")
int bench_trigger_kretprobe_multi(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("fexit/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fexit(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry_sleep(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
SEC("fmod_ret/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fmodret(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return -22;
}
SEC("uprobe")
int bench_trigger_uprobe(void *ctx)
{
- __sync_add_and_fetch(&hits, 1);
+ inc_counter();
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
index f4936834f76f..dde0bb16e782 100644
--- a/tools/testing/selftests/bpf/test_cpp.cpp
+++ b/tools/testing/selftests/bpf/test_cpp.cpp
@@ -7,6 +7,7 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include "test_core_extern.skel.h"
+#include "struct_ops_module.skel.h"
template <typename T>
class Skeleton {
@@ -98,6 +99,7 @@ int main(int argc, char *argv[])
{
struct btf_dump_opts opts = { };
struct test_core_extern *skel;
+ struct struct_ops_module *skel2;
struct btf *btf;
int fd;
@@ -118,6 +120,9 @@ int main(int argc, char *argv[])
skel = test_core_extern__open_and_load();
test_core_extern__destroy(skel);
+ skel2 = struct_ops_module__open_and_load();
+ struct_ops_module__destroy(skel2);
+
fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (fd < 0)
std::cout << "FAILED to enable stats: " << fd << std::endl;
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 910044f08908..7989ec608454 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -72,7 +72,6 @@ cleanup() {
server_listen() {
ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
server_pid=$!
- sleep 0.2
}
client_connect() {
@@ -93,6 +92,16 @@ verify_data() {
fi
}
+wait_for_port() {
+ for i in $(seq 20); do
+ if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
+ return 0
+ fi
+ sleep 0.1
+ done
+ return 1
+}
+
set -e
# no arguments: automated test, run all
@@ -193,6 +202,7 @@ setup
# basic communication works
echo "test basic connectivity"
server_listen
+wait_for_port ${port} ${netcat_opt}
client_connect
verify_data
@@ -204,6 +214,7 @@ ip netns exec "${ns1}" tc filter add dev veth1 egress \
section "encap_${tuntype}_${mac}"
echo "test bpf encap without decap (expect failure)"
server_listen
+wait_for_port ${port} ${netcat_opt}
! client_connect
if [[ "$tuntype" =~ "udp" ]]; then