diff options
Diffstat (limited to 'drivers/net/virtio_net.c')
| -rw-r--r-- | drivers/net/virtio_net.c | 37 | 
1 files changed, 34 insertions, 3 deletions
| diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 356cf8dd4164..ec8e1b3108c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -242,9 +242,15 @@ struct virtnet_info {  	/* Packet virtio header size */  	u8 hdr_len; -	/* Work struct for refilling if we run low on memory. */ +	/* Work struct for delayed refilling if we run low on memory. */  	struct delayed_work refill; +	/* Is delayed refill enabled? */ +	bool refill_enabled; + +	/* The lock to synchronize the access to refill_enabled */ +	spinlock_t refill_lock; +  	/* Work struct for config space updates */  	struct work_struct config_work; @@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)  	return p;  } +static void enable_delayed_refill(struct virtnet_info *vi) +{ +	spin_lock_bh(&vi->refill_lock); +	vi->refill_enabled = true; +	spin_unlock_bh(&vi->refill_lock); +} + +static void disable_delayed_refill(struct virtnet_info *vi) +{ +	spin_lock_bh(&vi->refill_lock); +	vi->refill_enabled = false; +	spin_unlock_bh(&vi->refill_lock); +} +  static void virtqueue_napi_schedule(struct napi_struct *napi,  				    struct virtqueue *vq)  { @@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,  	}  	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { -		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) -			schedule_delayed_work(&vi->refill, 0); +		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { +			spin_lock(&vi->refill_lock); +			if (vi->refill_enabled) +				schedule_delayed_work(&vi->refill, 0); +			spin_unlock(&vi->refill_lock); +		}  	}  	u64_stats_update_begin(&rq->stats.syncp); @@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)  	struct virtnet_info *vi = netdev_priv(dev);  	int i, err; +	enable_delayed_refill(vi); +  	for (i = 0; i < vi->max_queue_pairs; i++) {  		if (i < vi->curr_queue_pairs)  			/* Make sure we have some buffers: if oom use wq. */ @@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)  	struct virtnet_info *vi = netdev_priv(dev);  	int i; +	/* Make sure NAPI doesn't schedule refill work */ +	disable_delayed_refill(vi);  	/* Make sure refill_work doesn't re-enable napi! */  	cancel_delayed_work_sync(&vi->refill); @@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)  	virtio_device_ready(vdev); +	enable_delayed_refill(vi); +  	if (netif_running(vi->dev)) {  		err = virtnet_open(vi->dev);  		if (err) @@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)  	vdev->priv = vi;  	INIT_WORK(&vi->config_work, virtnet_config_changed_work); +	spin_lock_init(&vi->refill_lock);  	/* If we can receive ANY GSO packets, we must allocate large ones. */  	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |