aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c66
1 files changed, 32 insertions, 34 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ce07f52d89e7..21b71148c532 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -63,6 +63,11 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM
};
+#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO))
+
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
@@ -689,6 +694,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_end = xdp.data + len;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = buflen;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -702,7 +708,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
@@ -723,7 +729,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
@@ -797,10 +803,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
- unsigned int truesize;
+ unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- int err;
unsigned int metasize = 0;
+ unsigned int frame_sz;
+ int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -821,6 +828,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Buffers with headroom use PAGE_SIZE as alloc size,
+ * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ */
+ frame_sz = headroom ? PAGE_SIZE : truesize;
+
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
@@ -834,6 +846,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
page, offset,
VIRTIO_XDP_HEADROOM,
&len);
+ frame_sz = PAGE_SIZE;
+
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
@@ -850,6 +864,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data_end = xdp.data + (len - vi->hdr_len);
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = frame_sz - vi->hdr_len;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -882,7 +897,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
@@ -912,10 +927,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
@@ -924,7 +939,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- truesize = mergeable_ctx_to_truesize(ctx);
if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
@@ -2255,12 +2269,13 @@ static void virtnet_update_settings(struct virtnet_info *vi)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
return;
- speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
- speed));
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
if (ethtool_validate_speed(speed))
vi->speed = speed;
- duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
- duplex));
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
if (ethtool_validate_duplex(duplex))
vi->duplex = duplex;
}
@@ -2481,28 +2496,11 @@ err:
return err;
}
-static u32 virtnet_xdp_query(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- const struct bpf_prog *xdp_prog;
- int i;
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
- if (xdp_prog)
- return xdp_prog->aux->id;
- }
- return 0;
-}
-
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
- case XDP_QUERY_PROG:
- xdp->prog_id = virtnet_xdp_query(dev);
- return 0;
default:
return -EINVAL;
}
@@ -2538,7 +2536,8 @@ static int virtnet_set_features(struct net_device *dev,
if (features & NETIF_F_LRO)
offloads = vi->guest_offloads_capable;
else
- offloads = 0;
+ offloads = vi->guest_offloads_capable &
+ ~GUEST_OFFLOAD_LRO_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
@@ -2611,12 +2610,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_hash_del(&vi->rq[i].napi);
- netif_napi_del(&vi->rq[i].napi);
- netif_napi_del(&vi->sq[i].napi);
+ __netif_napi_del(&vi->rq[i].napi);
+ __netif_napi_del(&vi->sq[i].napi);
}
- /* We called napi_hash_del() before netif_napi_del(),
+ /* We called __netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();