diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 106 | 
1 files changed, 75 insertions, 31 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6778a9999d52..877c84834d81 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)  	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)  		return dev->netdev_ops->ndo_get_iflink(dev); -	/* If dev->rtnl_link_ops is set, it's a virtual interface. */ -	if (dev->rtnl_link_ops) -		return 0; -  	return dev->ifindex;  }  EXPORT_SYMBOL(dev_get_iflink); @@ -3065,6 +3061,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)  	else  		skb_dst_force(skb); +#ifdef CONFIG_NET_SWITCHDEV +	/* Don't forward if offload device already forwarded */ +	if (skb->offload_fwd_mark && +	    skb->offload_fwd_mark == dev->offload_fwd_mark) { +		consume_skb(skb); +		rc = NET_XMIT_SUCCESS; +		goto out; +	} +#endif +  	txq = netdev_pick_tx(dev, skb, accel_priv);  	q = rcu_dereference_bh(txq->qdisc); @@ -3452,6 +3458,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,  	local_irq_save(flags);  	rps_lock(sd); +	if (!netif_running(skb->dev)) +		goto drop;  	qlen = skb_queue_len(&sd->input_pkt_queue);  	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {  		if (qlen) { @@ -3473,6 +3481,7 @@ enqueue:  		goto enqueue;  	} +drop:  	sd->dropped++;  	rps_unlock(sd); @@ -3646,15 +3655,15 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,  	qdisc_skb_cb(skb)->pkt_len = skb->len;  	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); -	qdisc_bstats_update_cpu(cl->q, skb); +	qdisc_bstats_cpu_update(cl->q, skb); -	switch (tc_classify(skb, cl, &cl_res)) { +	switch (tc_classify(skb, cl, &cl_res, false)) {  	case TC_ACT_OK:  	case TC_ACT_RECLASSIFY:  		skb->tc_index = TC_H_MIN(cl_res.classid);  		break;  	case TC_ACT_SHOT: -		qdisc_qstats_drop_cpu(cl->q); +		qdisc_qstats_cpu_drop(cl->q);  	case TC_ACT_STOLEN:  	case TC_ACT_QUEUED:  		kfree_skb(skb); @@ -3775,8 +3784,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)  	pt_prev = NULL; -	rcu_read_lock(); -  another_round:  	skb->skb_iif = skb->dev->ifindex; @@ -3786,7 +3793,7 @@ another_round:  	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {  		skb = skb_vlan_untag(skb);  		if (unlikely(!skb)) -			goto unlock; +			goto out;  	}  #ifdef CONFIG_NET_CLS_ACT @@ -3816,10 +3823,10 @@ skip_taps:  	if (static_key_false(&ingress_needed)) {  		skb = handle_ing(skb, &pt_prev, &ret, orig_dev);  		if (!skb) -			goto unlock; +			goto out;  		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) -			goto unlock; +			goto out;  	}  #endif  #ifdef CONFIG_NET_CLS_ACT @@ -3837,7 +3844,7 @@ ncls:  		if (vlan_do_receive(&skb))  			goto another_round;  		else if (unlikely(!skb)) -			goto unlock; +			goto out;  	}  	rx_handler = rcu_dereference(skb->dev->rx_handler); @@ -3849,7 +3856,7 @@ ncls:  		switch (rx_handler(&skb)) {  		case RX_HANDLER_CONSUMED:  			ret = NET_RX_SUCCESS; -			goto unlock; +			goto out;  		case RX_HANDLER_ANOTHER:  			goto another_round;  		case RX_HANDLER_EXACT: @@ -3903,8 +3910,7 @@ drop:  		ret = NET_RX_DROP;  	} -unlock: -	rcu_read_unlock(); +out:  	return ret;  } @@ -3935,29 +3941,30 @@ static int __netif_receive_skb(struct sk_buff *skb)  static int netif_receive_skb_internal(struct sk_buff *skb)  { +	int ret; +  	net_timestamp_check(netdev_tstamp_prequeue, skb);  	if (skb_defer_rx_timestamp(skb))  		return NET_RX_SUCCESS; +	rcu_read_lock(); +  #ifdef CONFIG_RPS  	if (static_key_false(&rps_needed)) {  		struct rps_dev_flow voidflow, *rflow = &voidflow; -		int cpu, ret; - -		rcu_read_lock(); - -		cpu = get_rps_cpu(skb->dev, skb, &rflow); +		int cpu = get_rps_cpu(skb->dev, skb, &rflow);  		if (cpu >= 0) {  			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);  			rcu_read_unlock();  			return ret;  		} -		rcu_read_unlock();  	}  #endif -	return __netif_receive_skb(skb); +	ret = __netif_receive_skb(skb); +	rcu_read_unlock(); +	return ret;  }  /** @@ -4502,8 +4509,10 @@ static int process_backlog(struct napi_struct *napi, int quota)  		struct sk_buff *skb;  		while ((skb = __skb_dequeue(&sd->process_queue))) { +			rcu_read_lock();  			local_irq_enable();  			__netif_receive_skb(skb); +			rcu_read_unlock();  			local_irq_disable();  			input_queue_head_incr(sd);  			if (++work >= quota) { @@ -4986,7 +4995,7 @@ EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);   * Gets the next netdev_adjacent->private from the dev's lower neighbour   * list, starting from iter position. The caller must hold either hold the   * RTNL lock or its own locking that guarantees that the neighbour lower - * list will remain unchainged. + * list will remain unchanged.   */  void *netdev_lower_get_next_private(struct net_device *dev,  				    struct list_head **iter) @@ -5041,7 +5050,7 @@ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);   * Gets the next netdev_adjacent from the dev's lower neighbour   * list, starting from iter position. The caller must hold RTNL lock or   * its own locking that guarantees that the neighbour lower - * list will remain unchainged. + * list will remain unchanged.   */  void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)  { @@ -5302,6 +5311,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,  				   struct net_device *upper_dev, bool master,  				   void *private)  { +	struct netdev_notifier_changeupper_info changeupper_info;  	struct netdev_adjacent *i, *j, *to_i, *to_j;  	int ret = 0; @@ -5320,6 +5330,10 @@ static int __netdev_upper_dev_link(struct net_device *dev,  	if (master && netdev_master_upper_dev_get(dev))  		return -EBUSY; +	changeupper_info.upper_dev = upper_dev; +	changeupper_info.master = master; +	changeupper_info.linking = true; +  	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,  						   master);  	if (ret) @@ -5358,7 +5372,8 @@ static int __netdev_upper_dev_link(struct net_device *dev,  			goto rollback_lower_mesh;  	} -	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); +	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, +				      &changeupper_info.info);  	return 0;  rollback_lower_mesh: @@ -5453,9 +5468,14 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link_private);  void netdev_upper_dev_unlink(struct net_device *dev,  			     struct net_device *upper_dev)  { +	struct netdev_notifier_changeupper_info changeupper_info;  	struct netdev_adjacent *i, *j;  	ASSERT_RTNL(); +	changeupper_info.upper_dev = upper_dev; +	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; +	changeupper_info.linking = false; +  	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);  	/* Here is the tricky part. We must remove all dev's lower @@ -5475,7 +5495,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,  	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)  		__netdev_adjacent_dev_unlink(dev, i->dev); -	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); +	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, +				      &changeupper_info.info);  }  EXPORT_SYMBOL(netdev_upper_dev_unlink); @@ -6076,6 +6097,26 @@ int dev_get_phys_port_name(struct net_device *dev,  EXPORT_SYMBOL(dev_get_phys_port_name);  /** + *	dev_change_proto_down - update protocol port state information + *	@dev: device + *	@proto_down: new value + * + *	This info can be used by switch drivers to set the phys state of the + *	port. + */ +int dev_change_proto_down(struct net_device *dev, bool proto_down) +{ +	const struct net_device_ops *ops = dev->netdev_ops; + +	if (!ops->ndo_change_proto_down) +		return -EOPNOTSUPP; +	if (!netif_device_present(dev)) +		return -ENODEV; +	return ops->ndo_change_proto_down(dev, proto_down); +} +EXPORT_SYMBOL(dev_change_proto_down); + +/**   *	dev_new_index	-	allocate an ifindex   *	@net: the applicable net namespace   * @@ -6139,6 +6180,7 @@ static void rollback_registered_many(struct list_head *head)  		unlist_netdevice(dev);  		dev->reg_state = NETREG_UNREGISTERING; +		on_each_cpu(flush_backlog, dev, 1);  	}  	synchronize_net(); @@ -6409,7 +6451,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)  	struct netdev_queue *tx;  	size_t sz = count * sizeof(*tx); -	BUG_ON(count < 1 || count > 0xffff); +	if (count < 1 || count > 0xffff) +		return -EINVAL;  	tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);  	if (!tx) { @@ -6773,8 +6816,6 @@ void netdev_run_todo(void)  		dev->reg_state = NETREG_UNREGISTERED; -		on_each_cpu(flush_backlog, dev, 1); -  		netdev_wait_allrefs(dev);  		/* paranoia */ @@ -6968,6 +7009,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,  	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;  	setup(dev); +	if (!dev->tx_queue_len) +		dev->priv_flags |= IFF_NO_QUEUE; +  	dev->num_tx_queues = txqs;  	dev->real_num_tx_queues = txqs;  	if (netif_alloc_netdev_queues(dev)) @@ -7640,7 +7684,7 @@ static int __init net_dev_init(void)  	open_softirq(NET_RX_SOFTIRQ, net_rx_action);  	hotcpu_notifier(dev_cpu_callback, 0); -	dst_init(); +	dst_subsys_init();  	rc = 0;  out:  	return rc;  |