aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sun/sunvnet_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet_common.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c173
1 files changed, 101 insertions, 72 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8878b75d68b4..9e86833249d4 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1,7 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#include <linux/module.h>
@@ -37,8 +37,12 @@
*/
#define VNET_MAX_RETRIES 10
+MODULE_AUTHOR("David S. Miller ([email protected])");
+MODULE_DESCRIPTION("Sun LDOM virtual network support library");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1");
+
static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
-static void vnet_port_reset(struct vnet_port *port);
static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
{
@@ -181,6 +185,7 @@ static int handle_attr_info(struct vio_driver_state *vio,
} else {
pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
pkt->ipv4_lso_maxlen = 0;
+ port->tsolen = 0;
}
/* for version >= 1.6, ACK packet mode we support */
@@ -404,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+ if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
+ dev->stats.multicast++;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+ port->stats.rx_packets++;
+ port->stats.rx_bytes += len;
napi_gro_receive(&port->napi, skb);
return 0;
@@ -714,12 +723,8 @@ static void maybe_tx_wakeup(struct vnet_port *port)
txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
port->q_index);
__netif_tx_lock(txq, smp_processor_id());
- if (likely(netif_tx_queue_stopped(txq))) {
- struct vio_dring_state *dr;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (likely(netif_tx_queue_stopped(txq)))
netif_tx_wake_queue(txq);
- }
__netif_tx_unlock(txq);
}
@@ -737,41 +742,53 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
struct vio_driver_state *vio = &port->vio;
int tx_wakeup, err;
int npkts = 0;
- int event = (port->rx_event & LDC_EVENT_RESET);
-
-ldc_ctrl:
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
- vio_link_state_change(vio, event);
-
- if (event == LDC_EVENT_RESET) {
- vnet_port_reset(port);
- vio_port_up(vio);
-
- /* If the device is running but its tx queue was
- * stopped (due to flow control), restart it.
- * This is necessary since vnet_port_reset()
- * clears the tx drings and thus we may never get
- * back a VIO_TYPE_DATA ACK packet - which is
- * the normal mechanism to restart the tx queue.
- */
- if (netif_running(dev))
- maybe_tx_wakeup(port);
+
+ /* we don't expect any other bits */
+ BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY |
+ LDC_EVENT_RESET |
+ LDC_EVENT_UP));
+
+ /* RESET takes precedent over any other event */
+ if (port->rx_event & LDC_EVENT_RESET) {
+ /* a link went down */
+
+ if (port->vsw == 1) {
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
}
+
+ vio_link_state_change(vio, LDC_EVENT_RESET);
+ vnet_port_reset(port);
+ vio_port_up(vio);
+
+ /* If the device is running but its tx queue was
+ * stopped (due to flow control), restart it.
+ * This is necessary since vnet_port_reset()
+ * clears the tx drings and thus we may never get
+ * back a VIO_TYPE_DATA ACK packet - which is
+ * the normal mechanism to restart the tx queue.
+ */
+ if (netif_running(dev))
+ maybe_tx_wakeup(port);
+
port->rx_event = 0;
+ port->stats.event_reset++;
return 0;
}
- /* We may have multiple LDC events in rx_event. Unroll send_events() */
- event = (port->rx_event & LDC_EVENT_UP);
- port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
- if (event == LDC_EVENT_UP)
- goto ldc_ctrl;
- event = port->rx_event;
- if (!(event & LDC_EVENT_DATA_READY))
- return 0;
- /* we dont expect any other bits than RESET, UP, DATA_READY */
- BUG_ON(event != LDC_EVENT_DATA_READY);
+ if (port->rx_event & LDC_EVENT_UP) {
+ /* a link came up */
+
+ if (port->vsw == 1) {
+ netif_carrier_on(port->dev);
+ netif_tx_start_all_queues(port->dev);
+ }
+
+ vio_link_state_change(vio, LDC_EVENT_UP);
+ port->rx_event = 0;
+ port->stats.event_up++;
+ return 0;
+ }
err = 0;
tx_wakeup = 0;
@@ -794,25 +811,25 @@ ldc_ctrl:
pkt->start_idx = vio_dring_next(dr,
port->napi_stop_idx);
pkt->end_idx = -1;
- goto napi_resume;
- }
- err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
- if (unlikely(err < 0)) {
- if (err == -ECONNRESET)
- vio_conn_reset(vio);
- break;
+ } else {
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
}
- if (err == 0)
- break;
- viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
- msgbuf.tag.type,
- msgbuf.tag.stype,
- msgbuf.tag.stype_env,
- msgbuf.tag.sid);
- err = vio_validate_sid(vio, &msgbuf.tag);
- if (err < 0)
- break;
-napi_resume:
+
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
if (!sunvnet_port_is_up_common(port)) {
@@ -860,7 +877,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget)
int processed = vnet_event_napi(port, budget);
if (processed < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, processed);
port->rx_event &= ~LDC_EVENT_DATA_READY;
vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
}
@@ -1256,10 +1273,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
port = vnet_tx_port(skb, dev);
- if (unlikely(!port)) {
- rcu_read_unlock();
+ if (unlikely(!port))
goto out_dropped;
- }
if (skb_is_gso(skb) && skb->len > port->tsolen) {
err = vnet_handle_offloads(port, skb, vnet_tx_port);
@@ -1284,7 +1299,6 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
fl4.saddr = ip_hdr(skb)->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
- rcu_read_unlock();
if (!IS_ERR(rt)) {
skb_dst_set(skb, &rt->dst);
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -1422,10 +1436,13 @@ ldc_start_done:
dev->stats.tx_packets++;
dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
+ port->stats.tx_packets++;
+ port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
netif_tx_stop_queue(txq);
+ smp_rmb();
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
netif_tx_wake_queue(txq);
}
@@ -1443,8 +1460,7 @@ out_dropped:
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
del_timer(&port->clean_timer);
- if (port)
- rcu_read_unlock();
+ rcu_read_unlock();
if (skb)
dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
@@ -1636,14 +1652,15 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
}
EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
-static void vnet_port_reset(struct vnet_port *port)
+void vnet_port_reset(struct vnet_port *port)
{
del_timer(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
port->rmtu = 0;
- port->tso = true;
+ port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
port->tsolen = 0;
}
+EXPORT_SYMBOL_GPL(vnet_port_reset);
static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
@@ -1713,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
void sunvnet_port_add_txq_common(struct vnet_port *port)
{
struct vnet *vp = port->vp;
- int n;
+ int smallest = 0;
+ int i;
+
+ /* find the first least-used q
+ * When there are more ldoms than q's, we start to
+ * double up on ports per queue.
+ */
+ for (i = 0; i < VNET_MAX_TXQS; i++) {
+ if (vp->q_used[i] == 0) {
+ smallest = i;
+ break;
+ }
+ if (vp->q_used[i] < vp->q_used[smallest])
+ smallest = i;
+ }
- n = vp->nports++;
- n = n & (VNET_MAX_TXQS - 1);
- port->q_index = n;
- netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ vp->nports++;
+ vp->q_used[smallest]++;
+ port->q_index = smallest;
}
EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
void sunvnet_port_rm_txq_common(struct vnet_port *port)
{
port->vp->nports--;
- netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ port->vp->q_used[port->q_index]--;
+ port->q_index = 0;
}
EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);