diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 33 | 
1 files changed, 31 insertions, 2 deletions
| diff --git a/net/core/dev.c b/net/core/dev.c index 6c5967e80132..0f72ff5d34ba 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1184,6 +1184,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)  			return -ENOMEM;  		for_each_netdev(net, d) { +			struct netdev_name_node *name_node; +			list_for_each_entry(name_node, &d->name_node->list, list) { +				if (!sscanf(name_node->name, name, &i)) +					continue; +				if (i < 0 || i >= max_netdevices) +					continue; + +				/*  avoid cases where sscanf is not exact inverse of printf */ +				snprintf(buf, IFNAMSIZ, name, i); +				if (!strncmp(buf, name_node->name, IFNAMSIZ)) +					set_bit(i, inuse); +			}  			if (!sscanf(d->name, name, &i))  				continue;  			if (i < 0 || i >= max_netdevices) @@ -4294,6 +4306,13 @@ static inline void ____napi_schedule(struct softnet_data *sd,  		 */  		thread = READ_ONCE(napi->thread);  		if (thread) { +			/* Avoid doing set_bit() if the thread is in +			 * INTERRUPTIBLE state, cause napi_thread_wait() +			 * makes sure to proceed with napi polling +			 * if the thread is explicitly woken from here. +			 */ +			if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE) +				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);  			wake_up_process(thread);  			return;  		} @@ -6486,6 +6505,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)  		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));  		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | +			      NAPIF_STATE_SCHED_THREADED |  			      NAPIF_STATE_PREFER_BUSY_POLL);  		/* If STATE_MISSED was set, leave STATE_SCHED set, @@ -6968,16 +6988,25 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)  static int napi_thread_wait(struct napi_struct *napi)  { +	bool woken = false; +  	set_current_state(TASK_INTERRUPTIBLE);  	while (!kthread_should_stop() && !napi_disable_pending(napi)) { -		if (test_bit(NAPI_STATE_SCHED, &napi->state)) { +		/* Testing SCHED_THREADED bit here to make sure the current +		 * kthread owns this napi and could poll on this napi. +		 * Testing SCHED bit is not enough because SCHED bit might be +		 * set by some other busy poll thread or by napi_disable(). +		 */ +		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {  			WARN_ON(!list_empty(&napi->poll_list));  			__set_current_state(TASK_RUNNING);  			return 0;  		}  		schedule(); +		/* woken being true indicates this thread owns this napi. */ +		woken = true;  		set_current_state(TASK_INTERRUPTIBLE);  	}  	__set_current_state(TASK_RUNNING); @@ -11346,7 +11375,7 @@ static void __net_exit default_device_exit(struct net *net)  			continue;  		/* Leave virtual devices for the generic cleanup */ -		if (dev->rtnl_link_ops) +		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)  			continue;  		/* Push remaining network devices to init_net */ |