aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c415
1 files changed, 270 insertions, 145 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f36584616e7d..9320d96a1632 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -29,13 +29,15 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
+#include <net/route.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -53,17 +55,6 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 0, 64)
-/* With mergeable buffers we align buffer address and use the low bits to
- * encode its true size. Buffer size is up to 1 page so we need to align to
- * square root of page size to ensure we reserve enough bits to encode the true
- * size.
- */
-#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
-
-/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
- 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
-
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
@@ -86,6 +77,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -109,6 +102,9 @@ struct receive_queue {
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
+ /* Min single buffer size for mergeable buffers case. */
+ unsigned int min_buf_len;
+
/* Name of this receive queue: input.$index */
char name[40];
};
@@ -239,33 +235,39 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
-static void skb_xmit_done(struct virtqueue *vq)
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
{
- struct virtnet_info *vi = vq->vdev->priv;
-
- /* Suppress further interrupts. */
- virtqueue_disable_cb(vq);
-
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
}
-static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
{
- unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
- return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
}
-static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
+static void skb_xmit_done(struct virtqueue *vq)
{
- return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
+ struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
-}
+ /* Suppress further interrupts. */
+ virtqueue_disable_cb(vq);
-static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
-{
- unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
- return (unsigned long)buf | (size - 1);
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
/* Called from bottom half context */
@@ -511,15 +513,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
while (--*num_buf) {
unsigned int buflen;
- unsigned long ctx;
void *buf;
int off;
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
- if (unlikely(!ctx))
+ buf = virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!buf))
goto err_buf;
- buf = mergeable_ctx_to_buf_address(ctx);
p = virt_to_head_page(buf);
off = buf - page_address(p);
@@ -548,10 +548,10 @@ err_buf:
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
- unsigned long ctx,
+ void *buf,
+ void *ctx,
unsigned int len)
{
- void *buf = mergeable_ctx_to_buf_address(ctx);
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
@@ -639,7 +639,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ if (unlikely(len > (unsigned long)ctx)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)ctx);
+ dev->stats.rx_length_errors++;
+ goto err_skb;
+ }
+ truesize = (unsigned long)ctx;
head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
curr_skb = head_skb;
@@ -648,7 +654,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
while (--num_buf) {
int num_skb_frags;
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+ buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
@@ -658,8 +664,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_buf;
}
- buf = mergeable_ctx_to_buf_address(ctx);
page = virt_to_head_page(buf);
+ if (unlikely(len > (unsigned long)ctx)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)ctx);
+ dev->stats.rx_length_errors++;
+ goto err_skb;
+ }
+ truesize = (unsigned long)ctx;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
@@ -675,7 +687,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
- truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
@@ -700,14 +711,14 @@ err_xdp:
err_skb:
put_page(page);
while (--num_buf) {
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!ctx)) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
- page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
+ page = virt_to_head_page(buf);
put_page(page);
}
err_buf:
@@ -718,7 +729,7 @@ xdp_xmit:
}
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len)
+ void *buf, unsigned int len, void **ctx)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
@@ -729,9 +740,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs) {
- unsigned long ctx = (unsigned long)buf;
- void *base = mergeable_ctx_to_buf_address(ctx);
- put_page(virt_to_head_page(base));
+ put_page(virt_to_head_page(buf));
} else if (vi->big_packets) {
give_pages(rq, buf);
} else {
@@ -741,7 +750,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len);
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len);
else
@@ -853,14 +862,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
return err;
}
-static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
+ struct ewma_pkt_len *avg_pkt_len)
{
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int len;
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
- GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
- return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
+ rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+ return ALIGN(len, L1_CACHE_BYTES);
}
static int add_recvbuf_mergeable(struct virtnet_info *vi,
@@ -869,17 +879,17 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
struct page_frag *alloc_frag = &rq->alloc_frag;
unsigned int headroom = virtnet_get_headroom(vi);
char *buf;
- unsigned long ctx;
+ void *ctx;
int err;
unsigned int len, hole;
- len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
+ len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len);
if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- ctx = mergeable_buf_to_ctx(buf, len);
+ ctx = (void *)(unsigned long)len;
get_page(alloc_frag->page);
alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset;
@@ -894,7 +904,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
}
sg_init_one(rq->sg, buf, len);
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -936,27 +946,44 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
+}
+
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
+ struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ if (!napi->weight)
+ return;
+
+ /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
+ * enable the feature if this is likely affine with the transmit path.
+ */
+ if (!vi->affinity_hint_set) {
+ napi->weight = 0;
+ return;
}
+
+ return virtnet_napi_enable(vq, napi);
+}
+
+static void virtnet_napi_tx_disable(struct napi_struct *napi)
+{
+ if (napi->weight)
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -971,7 +998,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -988,10 +1015,20 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
void *buf;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- while (received < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- bytes += receive_buf(vi, rq, buf, len);
- received++;
+ if (vi->mergeable_rx_bufs) {
+ void *ctx;
+
+ while (received < budget &&
+ (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
+ bytes += receive_buf(vi, rq, buf, len, ctx);
+ received++;
+ }
+ } else {
+ while (received < budget &&
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ bytes += receive_buf(vi, rq, buf, len, NULL);
+ received++;
+ }
}
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
@@ -1007,25 +1044,68 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ packets++;
+
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&stats->tx_syncp);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ u64_stats_update_end(&stats->tx_syncp);
+}
+
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
+
+ virtnet_poll_cleantx(rq);
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,40 +1120,29 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
- struct sk_buff *skb;
- unsigned int len;
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- unsigned int packets = 0;
- unsigned int bytes = 0;
-
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- pr_debug("Sent skb %p\n", skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
- bytes += skb->len;
- packets++;
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
- dev_kfree_skb_any(skb);
- }
+ virtqueue_napi_complete(napi, sq->vq, 0);
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
+ return 0;
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
@@ -1125,10 +1194,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
+ if (use_napi && kick)
+ virtqueue_enable_cb_delayed(sq->vq);
+
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1147,8 +1220,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1162,7 +1237,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1366,8 +1442,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1636,47 +1714,57 @@ static void virtnet_get_channels(struct net_device *dev,
}
/* Check if the user is trying to change anything besides speed/duplex */
-static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+static bool
+virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
{
- struct ethtool_cmd diff1 = *cmd;
- struct ethtool_cmd diff2 = {};
+ struct ethtool_link_ksettings diff1 = *cmd;
+ struct ethtool_link_ksettings diff2 = {};
/* cmd is always set so we need to clear it, validate the port type
* and also without autonegotiation we can ignore advertising
*/
- ethtool_cmd_speed_set(&diff1, 0);
- diff2.port = PORT_OTHER;
- diff1.advertising = 0;
- diff1.duplex = 0;
- diff1.cmd = 0;
+ diff1.base.speed = 0;
+ diff2.base.port = PORT_OTHER;
+ ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+ diff1.base.duplex = 0;
+ diff1.base.cmd = 0;
+ diff1.base.link_mode_masks_nwords = 0;
- return !memcmp(&diff1, &diff2, sizeof(diff1));
+ return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
+ bitmap_empty(diff1.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(diff1.link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(diff1.link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
u32 speed;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
/* don't allow custom speed and duplex */
if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->duplex) ||
+ !ethtool_validate_duplex(cmd->base.duplex) ||
!virtnet_validate_ethtool_cmd(cmd))
return -EINVAL;
vi->speed = speed;
- vi->duplex = cmd->duplex;
+ vi->duplex = cmd->base.duplex;
return 0;
}
-static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
- ethtool_cmd_speed_set(cmd, vi->speed);
- cmd->duplex = vi->duplex;
- cmd->port = PORT_OTHER;
+ cmd->base.speed = vi->speed;
+ cmd->base.duplex = vi->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
@@ -1696,8 +1784,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_settings = virtnet_get_settings,
- .set_settings = virtnet_set_settings,
+ .get_link_ksettings = virtnet_get_link_ksettings,
+ .set_link_ksettings = virtnet_set_link_ksettings,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -1712,8 +1800,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1736,8 +1826,11 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1754,7 +1847,6 @@ static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp)
virtnet_freeze_down(dev);
_remove_vq_common(vi);
- dev->config->reset(dev);
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
@@ -1778,7 +1870,8 @@ err:
return ret;
}
-static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
struct virtnet_info *vi = netdev_priv(dev);
@@ -1790,16 +1883,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
- netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
+ NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
return -EOPNOTSUPP;
}
if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
- netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
+ NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
return -EINVAL;
}
if (dev->mtu > max_sz) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
return -EINVAL;
}
@@ -1810,6 +1904,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
netdev_warn(dev, "request %i queues but max is %i\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
return -ENOMEM;
@@ -1871,7 +1966,7 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return virtnet_xdp_set(dev, xdp->prog);
+ return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_attached = virtnet_xdp_query(dev);
return 0;
@@ -1942,6 +2037,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2014,9 +2110,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->mergeable_rx_bufs) {
- unsigned long ctx = (unsigned long)buf;
- void *base = mergeable_ctx_to_buf_address(ctx);
- put_page(virt_to_head_page(base));
+ put_page(virt_to_head_page(buf));
} else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
} else {
@@ -2037,6 +2131,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
virtnet_free_queues(vi);
}
+/* How large should a single buffer be so a queue full of these can fit at
+ * least one full packet?
+ * Logic below assumes the mergeable buffer header is used.
+ */
+static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
+{
+ const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ unsigned int rq_size = virtqueue_get_vring_size(vq);
+ unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
+ unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
+ unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
+
+ return max(min_buf_len, hdr_len);
+}
+
static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
@@ -2044,6 +2153,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
+ bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
* possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
@@ -2062,6 +2172,13 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
+ if (vi->mergeable_rx_bufs) {
+ ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ goto err_ctx;
+ } else {
+ ctx = NULL;
+ }
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
@@ -2077,10 +2194,12 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
sprintf(vi->sq[i].name, "output.%d", i);
names[rxq2vq(i)] = vi->rq[i].name;
names[txq2vq(i)] = vi->sq[i].name;
+ if (ctx)
+ ctx[rxq2vq(i)] = true;
}
ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
- names, NULL);
+ names, ctx, NULL);
if (ret)
goto err_find;
@@ -2092,6 +2211,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].vq = vqs[rxq2vq(i)];
+ vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
vi->sq[i].vq = vqs[txq2vq(i)];
}
@@ -2102,6 +2222,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
return 0;
err_find:
+ kfree(ctx);
+err_ctx:
kfree(names);
err_names:
kfree(callbacks);
@@ -2127,6 +2249,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
@@ -2176,7 +2300,8 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
BUG_ON(queue_index >= vi->max_queue_pairs);
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
- return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
+ return sprintf(buf, "%u\n",
+ get_mergeable_buf_len(&vi->rq[queue_index], avg));
}
static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =