aboutsummaryrefslogtreecommitdiff
path: root/kernel/bpf/sockmap.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-10-20 13:01:30 +0100
committerDavid S. Miller <davem@davemloft.net>2017-10-20 13:01:30 +0100
commite95c6cf447ecac5ab5bc38600e1d2ac7b3d54aae (patch)
tree52d1bcf9c1257a973d3a78cdf54d0d6f005df705 /kernel/bpf/sockmap.c
parent1cc276cec9ec574d41cf47dfc0f51406b6f26ab4 (diff)
parent9ef2a8cd5c0dcb8e1f1534615c56eb13b630c363 (diff)
Merge branch 'sockmap-fixes'
John Fastabend says: ==================== sockmap fixes for net The following implements a set of fixes for sockmap and changes the API slightly in a few places to reduce preempt_disable/enable scope. We do this here in net because it requires an API change and this avoids getting stuck with legacy API going forward. The short description: Access to skb mark is removed, it is problematic when we add features in the future because mark is a union and used by the TCP/socket code internally. We don't want to expose this to the BPF programs or let programs change the values. The other change is caching metadata in the skb itself between when the BPF program returns a redirect code and the core code implements the redirect. This avoids having per cpu metadata. Finally, tighten restriction on using sockmap to CAP_NET_ADMIN and only SOCK_STREAM sockets. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/sockmap.c')
-rw-r--r--kernel/bpf/sockmap.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 6424ce0e4969..2b6eb35ae5d3 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <net/strparser.h>
+#include <net/tcp.h>
struct bpf_stab {
struct bpf_map map;
@@ -101,9 +102,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
return SK_DROP;
skb_orphan(skb);
+ /* We need to ensure that BPF metadata for maps is also cleared
+ * when we orphan the skb so that we don't have the possibility
+ * to reference a stale map.
+ */
+ TCP_SKB_CB(skb)->bpf.map = NULL;
skb->sk = psock->sock;
bpf_compute_data_end(skb);
+ preempt_disable();
rc = (*prog->bpf_func)(skb, prog->insnsi);
+ preempt_enable();
skb->sk = NULL;
return rc;
@@ -114,17 +122,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
struct sock *sk;
int rc;
- /* Because we use per cpu values to feed input from sock redirect
- * in BPF program to do_sk_redirect_map() call we need to ensure we
- * are not preempted. RCU read lock is not sufficient in this case
- * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
- */
- preempt_disable();
rc = smap_verdict_func(psock, skb);
switch (rc) {
case SK_REDIRECT:
- sk = do_sk_redirect_map();
- preempt_enable();
+ sk = do_sk_redirect_map(skb);
if (likely(sk)) {
struct smap_psock *peer = smap_psock_sk(sk);
@@ -141,8 +142,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
/* Fall through and free skb otherwise */
case SK_DROP:
default:
- if (rc != SK_REDIRECT)
- preempt_enable();
kfree_skb(skb);
}
}
@@ -487,6 +486,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int err = -EINVAL;
u64 cost;
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +842,12 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ if (skops.sk->sk_type != SOCK_STREAM ||
+ skops.sk->sk_protocol != IPPROTO_TCP) {
+ fput(socket->file);
+ return -EOPNOTSUPP;
+ }
+
err = sock_map_ctx_update_elem(&skops, map, key, flags);
fput(socket->file);
return err;