aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <[email protected]>2019-06-14 20:18:28 -0700
committerDavid S. Miller <[email protected]>2019-06-14 20:18:28 -0700
commit35fc07aee8f6d55aeacdbfdccc425e684737f741 (patch)
tree220daaf7fb72acc2cfe6fab06c1ac0e0f4384157 /include
parent9a33629ba6b26caebd73e3c581ba1e6068c696a7 (diff)
parentce27ec60648d8e066227cb2f58b1d3d4f7253d08 (diff)
Merge branch 'tcp-add-three-static-keys'
Eric Dumazet says: ==================== tcp: add three static keys Recent addition of per TCP socket rx/tx cache brought regressions for some workloads, as reported by Feng Tang. It seems better to make them opt-in, before we adopt better heuristics. The last patch adds high_order_alloc_disable sysctl to ask TCP sendmsg() to exclusively use order-0 allocations, as mm layer has specific optimizations. ==================== Signed-off-by: David S. Miller <[email protected]>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/net/sock.h12
3 files changed, 10 insertions, 6 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5df8e9e2a393..b92ef9f73e42 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -600,7 +600,6 @@ void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
-extern int sysctl_bpf_stats_enabled;
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b769ecfcc3bd..aadd310769d0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -63,6 +63,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
void __user *, size_t *, loff_t *);
extern int proc_do_large_bitmap(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_do_static_key(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
/*
* Register a set of sysctl names by calling register_sysctl_table
diff --git a/include/net/sock.h b/include/net/sock.h
index e9d769c04637..6cbc16136357 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1463,12 +1463,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
__sk_mem_reclaim(sk, 1 << 20);
}
+DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
- if (!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
+ if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
+ !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
skb_zcopy_clear(skb, true);
sk->sk_tx_skb_cache = skb;
return;
@@ -2433,13 +2435,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
+DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (
-#ifdef CONFIG_RPS
- !static_branch_unlikely(&rps_needed) &&
-#endif
+ if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
!sk->sk_rx_skb_cache) {
sk->sk_rx_skb_cache = skb;
skb_orphan(skb);
@@ -2534,6 +2534,8 @@ extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
+DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
+
static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_wmem ? */