diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/hashtab.c | 23 | ||||
| -rw-r--r-- | kernel/bpf/sockmap.c | 11 | ||||
| -rw-r--r-- | kernel/cpu.c | 26 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 1 | ||||
| -rw-r--r-- | kernel/watchdog.c | 4 | ||||
| -rw-r--r-- | kernel/watchdog_hld.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 2 | 
7 files changed, 30 insertions, 39 deletions
| diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 04b8eda94e7d..03cc59ee9c95 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -15,6 +15,7 @@  #include <linux/jhash.h>  #include <linux/filter.h>  #include <linux/rculist_nulls.h> +#include <linux/random.h>  #include <uapi/linux/btf.h>  #include "percpu_freelist.h"  #include "bpf_lru_list.h" @@ -41,6 +42,7 @@ struct bpf_htab {  	atomic_t count;	/* number of elements in this hashtable */  	u32 n_buckets;	/* number of hash buckets */  	u32 elem_size;	/* size of each element in bytes */ +	u32 hashrnd;  };  /* each htab element is struct htab_elem + key + value */ @@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)  	if (!htab->buckets)  		goto free_htab; +	htab->hashrnd = get_random_int();  	for (i = 0; i < htab->n_buckets; i++) {  		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);  		raw_spin_lock_init(&htab->buckets[i].lock); @@ -402,9 +405,9 @@ free_htab:  	return ERR_PTR(err);  } -static inline u32 htab_map_hash(const void *key, u32 key_len) +static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)  { -	return jhash(key, key_len, 0); +	return jhash(key, key_len, hashrnd);  }  static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) @@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	head = select_bucket(htab, hash); @@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)  	if (!key)  		goto find_first_elem; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	head = select_bucket(htab, hash); @@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; @@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; @@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; @@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; @@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; @@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)  	key_size = map->key_size; -	hash = htab_map_hash(key, key_size); +	hash = htab_map_hash(key, key_size, htab->hashrnd);  	b = __select_bucket(htab, hash);  	head = &b->head; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 98e621a29e8e..cf5195c7c331 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1427,12 +1427,15 @@ out:  static void smap_write_space(struct sock *sk)  {  	struct smap_psock *psock; +	void (*write_space)(struct sock *sk);  	rcu_read_lock();  	psock = smap_psock_sk(sk);  	if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))  		schedule_work(&psock->tx_work); +	write_space = psock->save_write_space;  	rcu_read_unlock(); +	write_space(sk);  }  static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) @@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)  		return ERR_PTR(-EPERM);  	/* check sanity of attributes */ -	if (attr->max_entries == 0 || attr->value_size != 4 || +	if (attr->max_entries == 0 || +	    attr->key_size == 0 || +	    attr->value_size != 4 ||  	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)  		return ERR_PTR(-EINVAL); @@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,  	}  	l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,  			     htab->map.numa_node); -	if (!l_new) +	if (!l_new) { +		atomic_dec(&htab->count);  		return ERR_PTR(-ENOMEM); +	}  	memcpy(l_new->key, key, key_size);  	l_new->sk = sk; diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..aa7fe85ad62e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }   * @name:	Name of the step   * @startup:	Startup function of the step   * @teardown:	Teardown function of the step - * @skip_onerr:	Do not invoke the functions on error rollback - *		Will go away once the notifiers	are gone   * @cant_stop:	Bringup/teardown can't be stopped at this step   */  struct cpuhp_step { @@ -119,7 +117,6 @@ struct cpuhp_step {  					 struct hlist_node *node);  	} teardown;  	struct hlist_head	list; -	bool			skip_onerr;  	bool			cant_stop;  	bool			multi_instance;  }; @@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)  static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)  { -	for (st->state--; st->state > st->target; st->state--) { -		struct cpuhp_step *step = cpuhp_get_step(st->state); - -		if (!step->skip_onerr) -			cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); -	} +	for (st->state--; st->state > st->target; st->state--) +		cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);  }  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, @@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)  	WARN_ON_ONCE(!cpuhp_is_ap_state(state)); -	if (st->rollback) { -		struct cpuhp_step *step = cpuhp_get_step(state); -		if (step->skip_onerr) -			goto next; -	} -  	if (cpuhp_is_atomic_state(state)) {  		local_irq_disable();  		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); @@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)  		st->should_run = false;  	} -next:  	cpuhp_lock_release(bringup);  	if (!st->should_run) @@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)  static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)  { -	for (st->state++; st->state < st->target; st->state++) { -		struct cpuhp_step *step = cpuhp_get_step(st->state); - -		if (!step->skip_onerr) -			cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); -	} +	for (st->state++; st->state < st->target; st->state++) +		cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);  }  static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 924e37fb1620..fd6f8ed28e01 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -38,7 +38,6 @@  #include <linux/kmsg_dump.h>  #include <linux/syslog.h>  #include <linux/cpu.h> -#include <linux/notifier.h>  #include <linux/rculist.h>  #include <linux/poll.h>  #include <linux/irq_work.h> diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5470dce212c0..977918d5d350 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -261,7 +261,7 @@ static void __touch_watchdog(void)   * entering idle state.  This should only be used for scheduler events.   * Use touch_softlockup_watchdog() for everything else.   */ -void touch_softlockup_watchdog_sched(void) +notrace void touch_softlockup_watchdog_sched(void)  {  	/*  	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp @@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)  	raw_cpu_write(watchdog_touch_ts, 0);  } -void touch_softlockup_watchdog(void) +notrace void touch_softlockup_watchdog(void)  {  	touch_softlockup_watchdog_sched();  	wq_watchdog_touch(raw_smp_processor_id()); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 1f7020d65d0a..71381168dede 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;  static unsigned long hardlockup_allcpu_dumped;  static atomic_t watchdog_cpus = ATOMIC_INIT(0); -void arch_touch_nmi_watchdog(void) +notrace void arch_touch_nmi_watchdog(void)  {  	/*  	 * Using __raw here because some code paths have diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60e80198c3df..0280deac392e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)  	mod_timer(&wq_watchdog_timer, jiffies + thresh);  } -void wq_watchdog_touch(int cpu) +notrace void wq_watchdog_touch(int cpu)  {  	if (cpu >= 0)  		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; |