diff options
Diffstat (limited to 'drivers/net/virtio_net.c')
| -rw-r--r-- | drivers/net/virtio_net.c | 952 |
1 files changed, 584 insertions, 368 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 765c2d6358da..99a26a9efec1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -23,41 +23,37 @@ #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/average.h> -#include <net/busy_poll.h> +#include <net/route.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); -static bool csum = true, gso = true; +static bool csum = true, gso = true, napi_tx; module_param(csum, bool, 0444); module_param(gso, bool, 0444); +module_param(napi_tx, bool, 0644); /* FIXME: MTU in config. */ #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 +#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + +/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ +#define VIRTIO_XDP_HEADROOM 256 + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- * term, transient changes in packet size. */ -DECLARE_EWMA(pkt_len, 1, 64) - -/* With mergeable buffers we align buffer address and use the low bits to - * encode its true size. Buffer size is up to 1 page so we need to align to - * square root of page size to ensure we reserve enough bits to encode the true - * size. - */ -#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) - -/* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ - 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) +DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" @@ -81,6 +77,8 @@ struct send_queue { /* Name of the send queue: output.$index */ char name[40]; + + struct napi_struct napi; }; /* Internal representation of a receive virtqueue */ @@ -104,6 +102,9 @@ struct receive_queue { /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; + /* Min single buffer size for mergeable buffers case. */ + unsigned int min_buf_len; + /* Name of this receive queue: input.$index */ char name[40]; }; @@ -234,33 +235,39 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } -static void skb_xmit_done(struct virtqueue *vq) +static void virtqueue_napi_schedule(struct napi_struct *napi, + struct virtqueue *vq) { - struct virtnet_info *vi = vq->vdev->priv; - - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(napi)) { + virtqueue_disable_cb(vq); + __napi_schedule(napi); + } } -static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) +static void virtqueue_napi_complete(struct napi_struct *napi, + struct virtqueue *vq, int processed) { - unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); - return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; + int opaque; + + opaque = virtqueue_enable_cb_prepare(vq); + if (napi_complete_done(napi, processed) && + unlikely(virtqueue_poll(vq, opaque))) + virtqueue_napi_schedule(napi, vq); } -static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) +static void skb_xmit_done(struct virtqueue *vq) { - return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); + struct virtnet_info *vi = vq->vdev->priv; + struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; -} + /* Suppress further interrupts. */ + virtqueue_disable_cb(vq); -static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) -{ - unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; - return (unsigned long)buf | (size - 1); + if (napi->weight) + virtqueue_napi_schedule(napi, vq); + else + /* We were probably waiting for more output buffers. */ + netif_wake_subqueue(vi->dev, vq2txq(vq)); } /* Called from bottom half context */ @@ -298,7 +305,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); - memcpy(skb_put(skb, copy), p, copy); + skb_put_data(skb, p, copy); len -= copy; offset += copy; @@ -338,105 +345,49 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_xmit(struct virtnet_info *vi, +static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, - struct send_queue *sq, - struct xdp_buff *xdp, - void *data) + struct xdp_buff *xdp) { struct virtio_net_hdr_mrg_rxbuf *hdr; - unsigned int num_sg, len; + unsigned int len; + struct send_queue *sq; + unsigned int qp; void *xdp_sent; int err; + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; + /* Free up any pending old buffers before queueing new ones. */ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { - if (vi->mergeable_rx_bufs) { - struct page *sent_page = virt_to_head_page(xdp_sent); + struct page *sent_page = virt_to_head_page(xdp_sent); - put_page(sent_page); - } else { /* small buffer */ - struct sk_buff *skb = xdp_sent; - - kfree_skb(skb); - } + put_page(sent_page); } - if (vi->mergeable_rx_bufs) { - /* Zero header and leave csum up to XDP layers */ - hdr = xdp->data; - memset(hdr, 0, vi->hdr_len); + xdp->data -= vi->hdr_len; + /* Zero header and leave csum up to XDP layers */ + hdr = xdp->data; + memset(hdr, 0, vi->hdr_len); - num_sg = 1; - sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); - } else { /* small buffer */ - struct sk_buff *skb = data; - - /* Zero header and leave csum up to XDP layers */ - hdr = skb_vnet_hdr(skb); - memset(hdr, 0, vi->hdr_len); + sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); - num_sg = 2; - sg_init_table(sq->sg, 2); - sg_set_buf(sq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); - } - err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, - data, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); if (unlikely(err)) { - if (vi->mergeable_rx_bufs) { - struct page *page = virt_to_head_page(xdp->data); + struct page *page = virt_to_head_page(xdp->data); - put_page(page); - } else /* small buffer */ - kfree_skb(data); - return; // On error abort to avoid unnecessary kick + put_page(page); + return false; } virtqueue_kick(sq->vq); + return true; } -static u32 do_xdp_prog(struct virtnet_info *vi, - struct receive_queue *rq, - struct bpf_prog *xdp_prog, - void *data, int len) +static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - int hdr_padded_len; - struct xdp_buff xdp; - void *buf; - unsigned int qp; - u32 act; - - if (vi->mergeable_rx_bufs) { - hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); - xdp.data = data + hdr_padded_len; - xdp.data_end = xdp.data + (len - vi->hdr_len); - buf = data; - } else { /* small buffers */ - struct sk_buff *skb = data; - - xdp.data = skb->data; - xdp.data_end = xdp.data + len; - buf = skb->data; - } - - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_PASS: - return XDP_PASS; - case XDP_TX: - qp = vi->curr_queue_pairs - - vi->xdp_queue_pairs + - smp_processor_id(); - xdp.data = buf; - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); - return XDP_TX; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - case XDP_DROP: - return XDP_DROP; - } + return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; } static struct sk_buff *receive_small(struct net_device *dev, @@ -444,40 +395,72 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len) { - struct sk_buff * skb = buf; + struct sk_buff *skb; struct bpf_prog *xdp_prog; - + unsigned int xdp_headroom = virtnet_get_headroom(vi); + unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; + unsigned int headroom = vi->hdr_len + header_offset; + unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int delta = 0; len -= vi->hdr_len; - skb_trim(skb, len); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { - struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; + struct xdp_buff xdp; + void *orig_data; u32 act; if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, skb, len); + + xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; + xdp.data = xdp.data_hard_start + xdp_headroom; + xdp.data_end = xdp.data + len; + orig_data = xdp.data; + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { case XDP_PASS: + /* Recalculate length in case bpf program changed it */ + delta = orig_data - xdp.data; break; case XDP_TX: + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) + trace_xdp_exception(vi->dev, xdp_prog, act); rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); + case XDP_DROP: goto err_xdp; } } rcu_read_unlock(); + skb = build_skb(buf, buflen); + if (!skb) { + put_page(virt_to_head_page(buf)); + goto err; + } + skb_reserve(skb, headroom - delta); + skb_put(skb, len + delta); + if (!delta) { + buf += header_offset; + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + } /* keep zeroed vnet hdr since packet was changed by bpf */ + +err: return skb; err_xdp: rcu_read_unlock(); dev->stats.rx_dropped++; - kfree_skb(skb); + put_page(virt_to_head_page(buf)); xdp_xmit: return NULL; } @@ -520,7 +503,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, unsigned int *len) { struct page *page = alloc_page(GFP_ATOMIC); - unsigned int page_off = 0; + unsigned int page_off = VIRTIO_XDP_HEADROOM; if (!page) return NULL; @@ -530,15 +513,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, while (--*num_buf) { unsigned int buflen; - unsigned long ctx; void *buf; int off; - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); - if (unlikely(!ctx)) + buf = virtqueue_get_buf(rq->vq, &buflen); + if (unlikely(!buf)) goto err_buf; - buf = mergeable_ctx_to_buf_address(ctx); p = virt_to_head_page(buf); off = buf - page_address(p); @@ -556,7 +537,8 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, put_page(p); } - *len = page_off; + /* Headroom does not contribute to packet length */ + *len = page_off - VIRTIO_XDP_HEADROOM; return page; err_buf: __free_pages(page, 0); @@ -566,10 +548,10 @@ err_buf: static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, - unsigned long ctx, + void *buf, + void *ctx, unsigned int len) { - void *buf = mergeable_ctx_to_buf_address(ctx); struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); struct page *page = virt_to_head_page(buf); @@ -584,6 +566,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct page *xdp_page; + struct xdp_buff xdp; + void *data; u32 act; /* This happens when rx buffer size is underestimated */ @@ -593,7 +577,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, page, offset, &len); if (!xdp_page) goto err_xdp; - offset = 0; + offset = VIRTIO_XDP_HEADROOM; } else { xdp_page = page; } @@ -606,28 +590,47 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, - page_address(xdp_page) + offset, len); + /* Allow consuming headroom but reserve enough space to push + * the descriptor on if we get an XDP_TX return code. + */ + data = page_address(xdp_page) + offset; + xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; + xdp.data = data + vi->hdr_len; + xdp.data_end = xdp.data + (len - vi->hdr_len); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { case XDP_PASS: + /* recalculate offset to account for any header + * adjustments. Note other cases do not build an + * skb and avoid using offset + */ + offset = xdp.data - + page_address(xdp_page) - vi->hdr_len; + /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, - 0, len, PAGE_SIZE); + offset, len, PAGE_SIZE); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); return head_skb; } break; case XDP_TX: + if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) + trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); + case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); @@ -636,7 +639,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); - truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); + if (unlikely(len > (unsigned long)ctx)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)ctx); + dev->stats.rx_length_errors++; + goto err_skb; + } + truesize = (unsigned long)ctx; head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; @@ -645,7 +654,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, while (--num_buf) { int num_skb_frags; - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); + buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); if (unlikely(!ctx)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, @@ -655,8 +664,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - buf = mergeable_ctx_to_buf_address(ctx); page = virt_to_head_page(buf); + if (unlikely(len > (unsigned long)ctx)) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)ctx); + dev->stats.rx_length_errors++; + goto err_skb; + } + truesize = (unsigned long)ctx; num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { @@ -672,7 +687,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, head_skb->truesize += nskb->truesize; num_skb_frags = 0; } - truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; @@ -697,14 +711,14 @@ err_xdp: err_skb: put_page(page); while (--num_buf) { - ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); - if (unlikely(!ctx)) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } - page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); + page = virt_to_head_page(buf); put_page(page); } err_buf: @@ -714,45 +728,40 @@ xdp_xmit: return NULL; } -static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) +static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len, void **ctx) { struct net_device *dev = vi->dev; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; + int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs) { - unsigned long ctx = (unsigned long)buf; - void *base = mergeable_ctx_to_buf_address(ctx); - put_page(virt_to_head_page(base)); + put_page(virt_to_head_page(buf)); } else if (vi->big_packets) { give_pages(rq, buf); } else { - dev_kfree_skb(buf); + put_page(virt_to_head_page(buf)); } - return; + return 0; } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len); else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else skb = receive_small(dev, vi, rq, buf, len); if (unlikely(!skb)) - return; + return 0; hdr = skb_vnet_hdr(skb); - u64_stats_update_begin(&stats->rx_syncp); - stats->rx_bytes += skb->len; - stats->rx_packets++; - u64_stats_update_end(&stats->rx_syncp); + ret = skb->len; if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -770,34 +779,36 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return; + return ret; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); + return 0; } static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { - struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + struct page_frag *alloc_frag = &rq->alloc_frag; + char *buf; + unsigned int xdp_headroom = virtnet_get_headroom(vi); + int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); - if (unlikely(!skb)) + len = SKB_DATA_ALIGN(len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) return -ENOMEM; - skb_put(skb, GOOD_PACKET_LEN); - - hdr = skb_vnet_hdr(skb); - sg_init_table(rq->sg, 2); - sg_set_buf(rq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); - - err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + get_page(alloc_frag->page); + alloc_frag->offset += len; + sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, + vi->hdr_len + GOOD_PACKET_LEN); + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); if (err < 0) - dev_kfree_skb(skb); + put_page(virt_to_head_page(buf)); return err; } @@ -851,34 +862,38 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, return err; } -static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) +static unsigned int get_mergeable_buf_len(struct receive_queue *rq, + struct ewma_pkt_len *avg_pkt_len) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); - return ALIGN(len, MERGEABLE_BUFFER_ALIGN); + rq->min_buf_len, PAGE_SIZE - hdr_len); + return ALIGN(len, L1_CACHE_BYTES); } -static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +static int add_recvbuf_mergeable(struct virtnet_info *vi, + struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; + unsigned int headroom = virtnet_get_headroom(vi); char *buf; - unsigned long ctx; + void *ctx; int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); - if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); + if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; - ctx = mergeable_buf_to_ctx(buf, len); + buf += headroom; /* advance address leaving hole at front of pkt */ + ctx = (void *)(unsigned long)len; get_page(alloc_frag->page); - alloc_frag->offset += len; + alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; - if (hole < len) { + if (hole < len + headroom) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. This extra space is not included in @@ -889,7 +904,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) } sg_init_one(rq->sg, buf, len); - err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -912,7 +927,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp |= __GFP_COLD; do { if (vi->mergeable_rx_bufs) - err = add_recvbuf_mergeable(rq, gfp); + err = add_recvbuf_mergeable(vi, rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(vi, rq, gfp); else @@ -931,27 +946,44 @@ static void skb_recv_done(struct virtqueue *rvq) struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; - /* Schedule NAPI, Suppress further interrupts if successful. */ - if (napi_schedule_prep(&rq->napi)) { - virtqueue_disable_cb(rvq); - __napi_schedule(&rq->napi); - } + virtqueue_napi_schedule(&rq->napi, rvq); } -static void virtnet_napi_enable(struct receive_queue *rq) +static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) { - napi_enable(&rq->napi); + napi_enable(napi); /* If all buffers were filled by other side before we napi_enabled, we - * won't get another interrupt, so process any outstanding packets - * now. virtnet_poll wants re-enable the queue, so we disable here. - * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (napi_schedule_prep(&rq->napi)) { - virtqueue_disable_cb(rq->vq); - local_bh_disable(); - __napi_schedule(&rq->napi); - local_bh_enable(); + * won't get another interrupt, so process any outstanding packets now. + * Call local_bh_enable after to trigger softIRQ processing. + */ + local_bh_disable(); + virtqueue_napi_schedule(napi, vq); + local_bh_enable(); +} + +static void virtnet_napi_tx_enable(struct virtnet_info *vi, + struct virtqueue *vq, + struct napi_struct *napi) +{ + if (!napi->weight) + return; + + /* Tx napi touches cachelines on the cpu handling tx interrupts. Only + * enable the feature if this is likely affine with the transmit path. + */ + if (!vi->affinity_hint_set) { + napi->weight = 0; + return; } + + return virtnet_napi_enable(vq, napi); +} + +static void virtnet_napi_tx_disable(struct napi_struct *napi) +{ + if (napi->weight) + napi_disable(napi); } static void refill_work(struct work_struct *work) @@ -966,7 +998,7 @@ static void refill_work(struct work_struct *work) napi_disable(&rq->napi); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); - virtnet_napi_enable(rq); + virtnet_napi_enable(rq->vq, &rq->napi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. @@ -979,13 +1011,24 @@ static void refill_work(struct work_struct *work) static int virtnet_receive(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0; + unsigned int len, received = 0, bytes = 0; void *buf; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - while (received < budget && - (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len); - received++; + if (vi->mergeable_rx_bufs) { + void *ctx; + + while (received < budget && + (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { + bytes += receive_buf(vi, rq, buf, len, ctx); + received++; + } + } else { + while (received < budget && + (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + bytes += receive_buf(vi, rq, buf, len, NULL); + received++; + } } if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { @@ -993,67 +1036,79 @@ static int virtnet_receive(struct receive_queue *rq, int budget) schedule_delayed_work(&vi->refill, 0); } + u64_stats_update_begin(&stats->rx_syncp); + stats->rx_bytes += bytes; + stats->rx_packets += received; + u64_stats_update_end(&stats->rx_syncp); + return received; } -static int virtnet_poll(struct napi_struct *napi, int budget) +static void free_old_xmit_skbs(struct send_queue *sq) { - struct receive_queue *rq = - container_of(napi, struct receive_queue, napi); - unsigned int r, received; + struct sk_buff *skb; + unsigned int len; + struct virtnet_info *vi = sq->vq->vdev->priv; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + unsigned int packets = 0; + unsigned int bytes = 0; - received = virtnet_receive(rq, budget); + while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { + pr_debug("Sent skb %p\n", skb); - /* Out of packets? */ - if (received < budget) { - r = virtqueue_enable_cb_prepare(rq->vq); - napi_complete_done(napi, received); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - __napi_schedule(napi); - } + bytes += skb->len; + packets++; + + dev_kfree_skb_any(skb); } - return received; + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + u64_stats_update_end(&stats->tx_syncp); } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int virtnet_busy_poll(struct napi_struct *napi) +static void virtnet_poll_cleantx(struct receive_queue *rq) { - struct receive_queue *rq = - container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; - int r, received = 0, budget = 4; + unsigned int index = vq2rxq(rq->vq); + struct send_queue *sq = &vi->sq[index]; + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); - if (!(vi->status & VIRTIO_NET_S_LINK_UP)) - return LL_FLUSH_FAILED; + if (!sq->napi.weight) + return; - if (!napi_schedule_prep(napi)) - return LL_FLUSH_BUSY; + if (__netif_tx_trylock(txq)) { + free_old_xmit_skbs(sq); + __netif_tx_unlock(txq); + } - virtqueue_disable_cb(rq->vq); + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); +} -again: - received += virtnet_receive(rq, budget); +static int virtnet_poll(struct napi_struct *napi, int budget) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + unsigned int received; - r = virtqueue_enable_cb_prepare(rq->vq); - clear_bit(NAPI_STATE_SCHED, &napi->state); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - if (received < budget) { - budget -= received; - goto again; - } else { - __napi_schedule(napi); - } - } + virtnet_poll_cleantx(rq); + + received = virtnet_receive(rq, budget); + + /* Out of packets? */ + if (received < budget) + virtqueue_napi_complete(napi, rq->vq, received); return received; } -#endif /* CONFIG_NET_RX_BUSY_POLL */ static int virtnet_open(struct net_device *dev) { @@ -1065,29 +1120,29 @@ static int virtnet_open(struct net_device *dev) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - virtnet_napi_enable(&vi->rq[i]); + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } return 0; } -static void free_old_xmit_skbs(struct send_queue *sq) +static int virtnet_poll_tx(struct napi_struct *napi, int budget) { - struct sk_buff *skb; - unsigned int len; + struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + __netif_tx_lock(txq, raw_smp_processor_id()); + free_old_xmit_skbs(sq); + __netif_tx_unlock(txq); - u64_stats_update_begin(&stats->tx_syncp); - stats->tx_bytes += skb->len; - stats->tx_packets++; - u64_stats_update_end(&stats->tx_syncp); + virtqueue_napi_complete(napi, sq->vq, 0); - dev_kfree_skb_any(skb); - } + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); + + return 0; } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) @@ -1095,7 +1150,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; - unsigned num_sg; + int num_sg; unsigned hdr_len = vi->hdr_len; bool can_push; @@ -1122,11 +1177,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); - num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; + num_sg++; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } @@ -1139,10 +1199,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); bool kick = !skb->xmit_more; + bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); + if (use_napi && kick) + virtqueue_enable_cb_delayed(sq->vq); + /* timestamp packet in software */ skb_tx_timestamp(skb); @@ -1161,8 +1225,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Don't wait up for transmitted skbs to be freed. */ - skb_orphan(skb); - nf_reset(skb); + if (!use_napi) { + skb_orphan(skb); + nf_reset(skb); + } /* If running out of space, stop queue to avoid getting packets that we * are then unable to transmit. @@ -1176,7 +1242,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + if (!use_napi && + unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { @@ -1244,10 +1311,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) struct sockaddr *addr; struct scatterlist sg; - addr = kmalloc(sizeof(*addr), GFP_KERNEL); + addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; - memcpy(addr, p, sizeof(*addr)); ret = eth_prepare_mac_addr_change(dev, addr); if (ret) @@ -1281,8 +1347,8 @@ out: return ret; } -static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void virtnet_stats(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; @@ -1315,8 +1381,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; - - return tot; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1339,7 +1403,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) rtnl_unlock(); } -static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct net_device *dev = vi->dev; @@ -1365,6 +1429,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) return 0; } +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +{ + int err; + + rtnl_lock(); + err = _virtnet_set_queues(vi, queue_pairs); + rtnl_unlock(); + return err; +} + static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1373,8 +1447,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } return 0; } @@ -1617,7 +1693,7 @@ static int virtnet_set_channels(struct net_device *dev, return -EINVAL; get_online_cpus(); - err = virtnet_set_queues(vi, queue_pairs); + err = _virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); @@ -1643,47 +1719,57 @@ static void virtnet_get_channels(struct net_device *dev, } /* Check if the user is trying to change anything besides speed/duplex */ -static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) +static bool +virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd diff1 = *cmd; - struct ethtool_cmd diff2 = {}; + struct ethtool_link_ksettings diff1 = *cmd; + struct ethtool_link_ksettings diff2 = {}; /* cmd is always set so we need to clear it, validate the port type * and also without autonegotiation we can ignore advertising */ - ethtool_cmd_speed_set(&diff1, 0); - diff2.port = PORT_OTHER; - diff1.advertising = 0; - diff1.duplex = 0; - diff1.cmd = 0; + diff1.base.speed = 0; + diff2.base.port = PORT_OTHER; + ethtool_link_ksettings_zero_link_mode(&diff1, advertising); + diff1.base.duplex = 0; + diff1.base.cmd = 0; + diff1.base.link_mode_masks_nwords = 0; - return !memcmp(&diff1, &diff2, sizeof(diff1)); + return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && + bitmap_empty(diff1.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS) && + bitmap_empty(diff1.link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) && + bitmap_empty(diff1.link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int virtnet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); u32 speed; - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* don't allow custom speed and duplex */ if (!ethtool_validate_speed(speed) || - !ethtool_validate_duplex(cmd->duplex) || + !ethtool_validate_duplex(cmd->base.duplex) || !virtnet_validate_ethtool_cmd(cmd)) return -EINVAL; vi->speed = speed; - vi->duplex = cmd->duplex; + vi->duplex = cmd->base.duplex; return 0; } -static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int virtnet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); - ethtool_cmd_speed_set(cmd, vi->speed); - cmd->duplex = vi->duplex; - cmd->port = PORT_OTHER; + cmd->base.speed = vi->speed; + cmd->base.duplex = vi->duplex; + cmd->base.port = PORT_OTHER; return 0; } @@ -1703,11 +1789,95 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, - .get_settings = virtnet_get_settings, - .set_settings = virtnet_set_settings, + .get_link_ksettings = virtnet_get_link_ksettings, + .set_link_ksettings = virtnet_set_link_ksettings, }; -static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) +static void virtnet_freeze_down(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int i; + + /* Make sure no work handler is accessing the device */ + flush_work(&vi->config_work); + + netif_device_detach(vi->dev); + netif_tx_disable(vi->dev); + cancel_delayed_work_sync(&vi->refill); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } + } +} + +static int init_vqs(struct virtnet_info *vi); +static void _remove_vq_common(struct virtnet_info *vi); + +static int virtnet_restore_up(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int err, i; + + err = init_vqs(vi); + if (err) + return err; + + virtio_device_ready(vdev); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } + } + + netif_device_attach(vi->dev); + return err; +} + +static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) +{ + struct virtio_device *dev = vi->vdev; + int ret; + + virtio_config_disable(dev); + dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; + virtnet_freeze_down(dev); + _remove_vq_common(vi); + + virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); + + ret = virtio_finalize_features(dev); + if (ret) + goto err; + + vi->xdp_queue_pairs = xdp_qp; + ret = virtnet_restore_up(dev); + if (ret) + goto err; + ret = _virtnet_set_queues(vi, curr_qp); + if (ret) + goto err; + + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + virtio_config_enable(dev); + return 0; +err: + virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); + return ret; +} + +static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); struct virtnet_info *vi = netdev_priv(dev); @@ -1715,25 +1885,21 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) u16 xdp_qp = 0, curr_qp; int i, err; - if (prog && prog->xdp_adjust_head) { - netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { - netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); return -EOPNOTSUPP; } if (vi->mergeable_rx_bufs && !vi->any_header_sg) { - netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); + NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); return -EINVAL; } if (dev->mtu > max_sz) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); return -EINVAL; } @@ -1744,26 +1910,31 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { + NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); netdev_warn(dev, "request %i queues but max is %i\n", curr_qp + xdp_qp, vi->max_queue_pairs); return -ENOMEM; } - err = virtnet_set_queues(vi, curr_qp + xdp_qp); - if (err) { - dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); - return err; - } - if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); - if (IS_ERR(prog)) { - virtnet_set_queues(vi, curr_qp); + if (IS_ERR(prog)) return PTR_ERR(prog); + } + + /* Changing the headroom in buffers is a disruptive operation because + * existing buffers must be flushed and reallocated. This will happen + * when a xdp program is initially added or xdp is disabled by removing + * the xdp program resulting in number of XDP queues changing. + */ + if (vi->xdp_queue_pairs != xdp_qp) { + err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp); + if (err) { + dev_warn(&dev->dev, "XDP reset failure.\n"); + goto virtio_reset_err; } } - vi->xdp_queue_pairs = xdp_qp; netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); for (i = 0; i < vi->max_queue_pairs; i++) { @@ -1774,27 +1945,39 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) } return 0; + +virtio_reset_err: + /* On reset error do our best to unwind XDP changes inflight and return + * error up to user space for resolution. The underlying reset hung on + * us so not much we can do here. + */ + if (prog) + bpf_prog_sub(prog, vi->max_queue_pairs - 1); + return err; } -static bool virtnet_xdp_query(struct net_device *dev) +static u32 virtnet_xdp_query(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); + const struct bpf_prog *xdp_prog; int i; for (i = 0; i < vi->max_queue_pairs; i++) { - if (vi->rq[i].xdp_prog) - return true; + xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); + if (xdp_prog) + return xdp_prog->aux->id; } - return false; + return 0; } static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: - return virtnet_xdp_set(dev, xdp->prog); + return virtnet_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: - xdp->prog_attached = virtnet_xdp_query(dev); + xdp->prog_id = virtnet_xdp_query(dev); + xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; @@ -1814,10 +1997,8 @@ static const struct net_device_ops virtnet_netdev = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = virtnet_busy_poll, -#endif .ndo_xdp = virtnet_xdp, + .ndo_features_check = passthru_features_check, }; static void virtnet_config_changed_work(struct work_struct *work) @@ -1866,6 +2047,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + netif_napi_del(&vi->sq[i].napi); } /* We called napi_hash_del() before netif_napi_del(), @@ -1877,12 +2059,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) kfree(vi->sq); } -static void free_receive_bufs(struct virtnet_info *vi) +static void _free_receive_bufs(struct virtnet_info *vi) { struct bpf_prog *old_prog; int i; - rtnl_lock(); for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); @@ -1892,6 +2073,12 @@ static void free_receive_bufs(struct virtnet_info *vi) if (old_prog) bpf_prog_put(old_prog); } +} + +static void free_receive_bufs(struct virtnet_info *vi) +{ + rtnl_lock(); + _free_receive_bufs(vi); rtnl_unlock(); } @@ -1905,10 +2092,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) { - /* For small receive mode always use kfree_skb variants */ - if (!vi->mergeable_rx_bufs) - return false; - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) return false; else if (q < vi->curr_queue_pairs) @@ -1937,13 +2120,11 @@ static void free_unused_bufs(struct virtnet_info *vi) while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs) { - unsigned long ctx = (unsigned long)buf; - void *base = mergeable_ctx_to_buf_address(ctx); - put_page(virt_to_head_page(base)); + put_page(virt_to_head_page(buf)); } else if (vi->big_packets) { give_pages(&vi->rq[i], buf); } else { - dev_kfree_skb(buf); + put_page(virt_to_head_page(buf)); } } } @@ -1960,6 +2141,22 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single buffer be so a queue full of these can fit at + * least one full packet? + * Logic below assumes the mergeable buffer header is used. + */ +static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) +{ + const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); + unsigned int rq_size = virtqueue_get_vring_size(vq); + unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; + unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; + unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); + + return max(max(min_buf_len, hdr_len) - hdr_len, + (unsigned int)GOOD_PACKET_LEN); +} + static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; @@ -1967,6 +2164,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) int ret = -ENOMEM; int i, total_vqs; const char **names; + bool *ctx; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by @@ -1985,6 +2183,13 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; + if (vi->mergeable_rx_bufs) { + ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); + if (!ctx) + goto err_ctx; + } else { + ctx = NULL; + } /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { @@ -2000,10 +2205,12 @@ static int virtnet_find_vqs(struct virtnet_info *vi) sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; + if (ctx) + ctx[rxq2vq(i)] = true; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, ctx, NULL); if (ret) goto err_find; @@ -2015,16 +2222,20 @@ static int virtnet_find_vqs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; + vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); + kfree(ctx); return 0; err_find: + kfree(ctx); +err_ctx: kfree(names); err_names: kfree(callbacks); @@ -2050,6 +2261,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); + netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, + napi_tx ? napi_weight : 0); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); @@ -2099,7 +2312,8 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; - return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); + return sprintf(buf, "%u\n", + get_mergeable_buf_len(&vi->rq[queue_index], avg)); } static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = @@ -2153,14 +2367,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev) #define MIN_MTU ETH_MIN_MTU #define MAX_MTU ETH_MAX_MTU -static int virtnet_probe(struct virtio_device *vdev) +static int virtnet_validate(struct virtio_device *vdev) { - int i, err; - struct net_device *dev; - struct virtnet_info *vi; - u16 max_queue_pairs; - int mtu; - if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); @@ -2170,6 +2378,25 @@ static int virtnet_probe(struct virtio_device *vdev) if (!virtnet_validate_features(vdev)) return -EINVAL; + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { + int mtu = virtio_cread16(vdev, + offsetof(struct virtio_net_config, + mtu)); + if (mtu < MIN_MTU) + __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } + + return 0; +} + +static int virtnet_probe(struct virtio_device *vdev) +{ + int i, err; + struct net_device *dev; + struct virtnet_info *vi; + u16 max_queue_pairs; + int mtu; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, @@ -2285,11 +2512,20 @@ static int virtnet_probe(struct virtio_device *vdev) offsetof(struct virtio_net_config, mtu)); if (mtu < dev->min_mtu) { - __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); - } else { - dev->mtu = mtu; - dev->max_mtu = mtu; + /* Should never trigger: MTU was previously validated + * in virtnet_validate. + */ + dev_err(&vdev->dev, "device MTU appears to have changed " + "it is now %d < %d", mtu, dev->min_mtu); + goto free_stats; } + + dev->mtu = mtu; + dev->max_mtu = mtu; + + /* TODO: size buffers correctly in this case. */ + if (dev->mtu > ETH_DATA_LEN) + vi->big_packets = true; } if (vi->any_header_sg) @@ -2330,9 +2566,7 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_unregister_netdev; } - rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); - rtnl_unlock(); /* Assume link up if device can't report link status, otherwise get link status from config. */ @@ -2364,6 +2598,15 @@ free: return err; } +static void _remove_vq_common(struct virtnet_info *vi) +{ + vi->vdev->config->reset(vi->vdev); + free_unused_bufs(vi); + _free_receive_bufs(vi); + free_receive_page_frags(vi); + virtnet_del_vqs(vi); +} + static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); @@ -2399,21 +2642,9 @@ static void virtnet_remove(struct virtio_device *vdev) static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; virtnet_cpu_notif_remove(vi); - - /* Make sure no work handler is accessing the device */ - flush_work(&vi->config_work); - - netif_device_detach(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); - } - + virtnet_freeze_down(vdev); remove_vq_common(vi); return 0; @@ -2422,28 +2653,12 @@ static int virtnet_freeze(struct virtio_device *vdev) static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; - err = init_vqs(vi); + err = virtnet_restore_up(vdev); if (err) return err; - - virtio_device_ready(vdev); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(&vi->rq[i]); - } - - netif_device_attach(vi->dev); - - rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); - rtnl_unlock(); err = virtnet_cpu_notif_add(vi); if (err) @@ -2488,6 +2703,7 @@ static struct virtio_driver virtio_net_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtnet_validate, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, |