aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/genet/bcmgenet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/genet/bcmgenet.c')
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c783
1 files changed, 497 insertions, 286 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a..7b0b399aaedd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static int bcmgenet_begin(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn on the clock */
+ return clk_prepare_enable(priv->clk);
+}
+
+static void bcmgenet_complete(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn off the clock */
+ clk_disable_unprepare(priv->clk);
+}
+
static int bcmgenet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -461,7 +477,9 @@ static int bcmgenet_get_link_ksettings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(priv->phydev, cmd);
+ phy_ethtool_ksettings_get(priv->phydev, cmd);
+
+ return 0;
}
static int bcmgenet_set_link_ksettings(struct net_device *dev,
@@ -605,7 +623,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* GENET TDMA hardware does not support a configurable timeout, but will
* always generate an interrupt either after MBDONE packets have been
- * transmitted, or when the ring is emtpy.
+ * transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -691,6 +709,19 @@ struct bcmgenet_stats {
.reg_offset = offset, \
}
+#define STAT_GENET_Q(num) \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
+ tx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
+ tx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
+ rx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
+ rx_rings[num].errors), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].dropped)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -778,12 +809,19 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
/* Misc UniMAC counters */
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
- UMAC_RBUF_OVFL_CNT),
- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ UMAC_RBUF_OVFL_CNT_V1),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+ UMAC_RBUF_ERR_CNT_V1),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX queues */
+ STAT_GENET_Q(0),
+ STAT_GENET_Q(1),
+ STAT_GENET_Q(2),
+ STAT_GENET_Q(3),
+ STAT_GENET_Q(16),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -821,6 +859,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
}
}
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+ u16 new_offset;
+ u32 val;
+
+ switch (offset) {
+ case UMAC_RBUF_OVFL_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_OVFL_CNT_V2;
+ else
+ new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ case UMAC_RBUF_ERR_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_ERR_CNT_V2;
+ else
+ new_offset = RBUF_ERR_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ default:
+ val = bcmgenet_umac_readl(priv, offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, offset);
+ break;
+ }
+
+ return val;
+}
+
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
{
int i, j = 0;
@@ -836,19 +913,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
case BCMGENET_STAT_NETDEV:
case BCMGENET_STAT_SOFT:
continue;
- case BCMGENET_STAT_MIB_RX:
- case BCMGENET_STAT_MIB_TX:
case BCMGENET_STAT_RUNT:
- if (s->type != BCMGENET_STAT_MIB_RX)
- offset = BCMGENET_STAT_OFFSET;
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_TX:
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
+ offset = 0; /* Reset Offset */
break;
case BCMGENET_STAT_MISC:
- val = bcmgenet_umac_readl(priv, s->reg_offset);
- /* clear if overflowed */
- if (val == ~0)
- bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ if (GENET_IS_V1(priv)) {
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0,
+ s->reg_offset);
+ } else {
+ val = bcmgenet_update_stat_misc(priv,
+ s->reg_offset);
+ }
break;
}
@@ -973,6 +1059,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .begin = bcmgenet_begin,
+ .complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1011,8 +1099,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= (EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv))
+ reg |= EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR;
+ else
+ reg |= EXT_PWR_DOWN_PHY;
+
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, false);
@@ -1037,12 +1134,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_BIAS);
- /* fallthrough */
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv)) {
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR);
+ reg |= EXT_PHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ mdelay(1);
+
+ reg &= ~EXT_PHY_RESET;
+ } else {
+ reg &= ~EXT_PWR_DOWN_PHY;
+ reg |= EXT_PWR_DN_EN_LD;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ break;
+
case GENET_POWER_CABLE_SENSE:
/* enable APD */
- reg |= EXT_PWR_DN_EN_LD;
+ if (!GENET_IS_V5(priv)) {
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
break;
case GENET_POWER_WOL_MAGIC:
bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1050,39 +1169,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
default:
break;
}
-
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE) {
- bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
- }
}
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int val = 0;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (!priv->phydev)
- val = -ENODEV;
- else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
- break;
-
- default:
- val = -EINVAL;
- break;
- }
+ if (!priv->phydev)
+ return -ENODEV;
- return val;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1102,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-/* Simple helper to free a control block's resources */
-static void bcmgenet_free_cb(struct enet_cb *cb)
+static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
{
- dev_kfree_skb_any(cb->skb);
- cb->skb = NULL;
- dma_unmap_addr_set(cb, dma_addr, 0);
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+
+ /* Rewinding local write pointer */
+ if (ring->write_ptr == ring->cb_ptr)
+ ring->write_ptr = ring->end_ptr;
+ else
+ ring->write_ptr--;
+
+ return tx_cb_ptr;
}
static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1160,28 +1269,85 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
INTRL2_CPU_MASK_SET);
}
+/* Simple helper to free a transmit control block's resources
+ * Returns an skb when the last transmit control block associated with the
+ * skb is freed. The skb should be freed by the caller if necessary.
+ */
+static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+
+ if (skb) {
+ cb->skb = NULL;
+ if (cb == GENET_CB(skb)->first_cb)
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+
+ if (cb == GENET_CB(skb)->last_cb)
+ return skb;
+
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_page(dev,
+ dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return 0;
+}
+
+/* Simple helper to free a receive control block's resources */
+static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+ cb->skb = NULL;
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return skb;
+}
+
/* Unlocked version of the reclaim routine */
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- struct netdev_queue *txq;
- unsigned int pkts_compl = 0;
+ unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
- unsigned int c_index;
+ unsigned int pkts_compl = 0;
unsigned int txbds_ready;
- unsigned int txbds_processed = 0;
-
- /* Compute how many buffers are transmitted since last xmit call */
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- c_index &= DMA_C_INDEX_MASK;
+ unsigned int c_index;
+ struct sk_buff *skb;
- if (likely(c_index >= ring->c_index))
- txbds_ready = c_index - ring->c_index;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX)
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_CLEAR);
else
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_CLEAR);
+
+ /* Compute how many buffers are transmitted since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+ & DMA_C_INDEX_MASK;
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, tx_done, dev,
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1189,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (txbds_processed < txbds_ready) {
- tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
- if (tx_cb_ptr->skb) {
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
+ &priv->tx_cbs[ring->clean_ptr]);
+ if (skb) {
pkts_compl++;
- bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
- dma_unmap_single(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- bcmgenet_free_cb(tx_cb_ptr);
- } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dma_unmap_page(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ bytes_compl += GENET_CB(skb)->bytes_sent;
+ dev_kfree_skb_any(skb);
}
txbds_processed++;
@@ -1214,20 +1371,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
}
ring->free_bds += txbds_processed;
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ ring->c_index = c_index;
- dev->stats.tx_packets += pkts_compl;
- dev->stats.tx_bytes += bytes_compl;
+ ring->packets += pkts_compl;
+ ring->bytes += bytes_compl;
- txq = netdev_get_tx_queue(dev, ring->queue);
- netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-
- if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
- }
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ pkts_compl, bytes_compl);
- return pkts_compl;
+ return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1248,8 +1400,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
struct bcmgenet_tx_ring *ring =
container_of(napi, struct bcmgenet_tx_ring, napi);
unsigned int work_done = 0;
+ struct netdev_queue *txq;
+ unsigned long flags;
- work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ spin_lock_irqsave(&ring->lock, flags);
+ work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ netif_tx_wake_queue(txq);
+ }
+ spin_unlock_irqrestore(&ring->lock, flags);
if (work_done == 0) {
napi_complete(napi);
@@ -1274,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
}
-/* Transmits a single SKB (either head of a fragment or a single SKB)
- * caller must hold priv->lock
- */
-static int bcmgenet_xmit_single(struct net_device *dev,
- struct sk_buff *skb,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int skb_len;
- dma_addr_t mapping;
- u32 length_status;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = skb;
-
- skb_len = skb_headlen(skb);
-
- mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
- dev_kfree_skb(skb);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
- length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
- DMA_TX_APPEND_CRC;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- length_status |= DMA_TX_DO_CSUM;
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
-
- return 0;
-}
-
-/* Transmit a SKB fragment */
-static int bcmgenet_xmit_frag(struct net_device *dev,
- skb_frag_t *frag,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int frag_size;
- dma_addr_t mapping;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = NULL;
-
- frag_size = skb_frag_size(frag);
-
- mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
- __func__);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
-
- return 0;
-}
-
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
@@ -1429,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
struct bcmgenet_tx_ring *ring = NULL;
+ struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned long flags = 0;
int nr_frags, index;
- u16 dma_desc_flags;
+ dma_addr_t mapping;
+ unsigned int size;
+ skb_frag_t *frag;
+ u32 len_stat;
int ret;
int i;
@@ -1486,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dma_desc_flags = DMA_SOP;
- if (nr_frags == 0)
- dma_desc_flags |= DMA_EOP;
+ for (i = 0; i <= nr_frags; i++) {
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
- /* Transmit single SKB or head of fragment list */
- ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
- if (ret) {
- ret = NETDEV_TX_OK;
- goto out;
- }
+ if (unlikely(!tx_cb_ptr))
+ BUG();
- /* xmit fragment */
- for (i = 0; i < nr_frags; i++) {
- ret = bcmgenet_xmit_frag(dev,
- &skb_shinfo(skb)->frags[i],
- (i == nr_frags - 1) ? DMA_EOP : 0,
- ring);
+ if (!i) {
+ /* Transmit single SKB or head of fragment list */
+ GENET_CB(skb)->first_cb = tx_cb_ptr;
+ size = skb_headlen(skb);
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ /* xmit fragment */
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
+
+ ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
ret = NETDEV_TX_OK;
- goto out;
+ goto out_unmap_frags;
}
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, size);
+
+ tx_cb_ptr->skb = skb;
+
+ len_stat = (size << DMA_BUFLENGTH_SHIFT) |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+
+ if (!i) {
+ len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_stat |= DMA_TX_DO_CSUM;
+ }
+ if (i == nr_frags)
+ len_stat |= DMA_EOP;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
}
+ GENET_CB(skb)->last_cb = tx_cb_ptr;
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -1529,6 +1629,19 @@ out:
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
+
+out_unmap_frags:
+ /* Back up for failed control block mapping */
+ bcmgenet_put_txcb(priv, ring);
+
+ /* Unmap successfully mapped control blocks */
+ while (i-- > 0) {
+ tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
+ bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
+ }
+
+ dev_kfree_skb(skb);
+ goto out;
}
static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1560,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
}
/* Grab the current Rx skb from the ring and DMA-unmap it */
- rx_skb = cb->skb;
- if (likely(rx_skb))
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ rx_skb = bcmgenet_free_rx_cb(kdev, cb);
/* Put the new Rx skb on the ring */
cb->skb = skb;
dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
dmadesc_set_addr(priv, cb->bd_addr, mapping);
/* Return the current Rx skb to caller */
@@ -1588,18 +1699,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
- unsigned int p_index;
+ unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX) {
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+ INTRL2_CPU_CLEAR);
+ } else {
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv,
+ mask,
+ INTRL2_CPU_CLEAR);
+ }
+
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- dev->stats.rx_missed_errors += discards;
- dev->stats.rx_errors += discards;
+ ring->errors += discards;
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1611,12 +1732,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
}
p_index &= DMA_P_INDEX_MASK;
-
- if (likely(p_index >= ring->c_index))
- rxpkttoprocess = p_index - ring->c_index;
- else
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
- p_index;
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1627,7 +1743,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
@@ -1655,7 +1771,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_errors++;
+ ring->errors++;
dev_kfree_skb_any(skb);
goto next;
}
@@ -1704,8 +1820,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ ring->packets++;
+ ring->bytes += len;
if (dma_flag & DMA_RX_MULT)
dev->stats.multicast++;
@@ -1769,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
{
- struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
struct enet_cb *cb;
int i;
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
- if (dma_unmap_addr(cb, dma_addr)) {
- dma_unmap_single(kdev,
- dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
- dma_unmap_addr_set(cb, dma_addr, 0);
- }
-
- if (cb->skb)
- bcmgenet_free_cb(cb);
+ skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb_any(skb);
}
}
@@ -1843,10 +1953,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
/* Mask all interrupts.*/
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1873,8 +1981,6 @@ static int init_umac(struct bcmgenet_priv *priv)
int ret;
u32 reg;
u32 int0_enable = 0;
- u32 int1_enable = 0;
- int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1901,12 +2007,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- /* Enable Rx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
- /* Enable Tx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
/* Configure backpressure vectors for MoCA */
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
@@ -1924,18 +2024,8 @@ static int init_umac(struct bcmgenet_priv *priv)
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- /* Enable Rx priority queue interrupts */
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
- /* Enable Tx priority queue interrupts */
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
- int1_enable |= (1 << i);
-
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- /* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
return 0;
@@ -2067,22 +2157,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << i);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2195,22 +2296,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2366,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
- int i;
struct netdev_queue *txq;
+ struct sk_buff *skb;
+ struct enet_cb *cb;
+ int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2376,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
- if (priv->tx_cbs[i].skb != NULL) {
- dev_kfree_skb(priv->tx_cbs[i].skb);
- priv->tx_cbs[i].skb = NULL;
- }
+ cb = priv->tx_cbs + i;
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb(skb);
}
for (i = 0; i < priv->hw_params->tx_queues; i++) {
@@ -2457,24 +2571,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Interrupt bottom half */
static void bcmgenet_irq_task(struct work_struct *work)
{
+ unsigned long flags;
+ unsigned int status;
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->irq0_stat;
+ priv->irq0_stat = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status & UMAC_IRQ_MPD_R) {
netif_dbg(priv, wol, priv->dev,
"magic packet detected, waking up\n");
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
}
/* Link UP/DOWN event */
- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
- }
+ !!(status & UMAC_IRQ_LINK_UP));
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2601,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
- unsigned int index;
+ unsigned int index, status;
- /* Save irq status for bottom-half processing. */
- priv->irq1_stat =
- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ "%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
for (index = 0; index < priv->hw_params->rx_queues; index++) {
- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2628,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
/* Check Tx priority queue interrupts */
for (index = 0; index < priv->hw_params->tx_queues; index++) {
- if (!(priv->irq1_stat & BIT(index)))
+ if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2648,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
+ unsigned int status;
+ unsigned long flags;
- /* Save irq status for bottom-half processing. */
- priv->irq0_stat =
- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "IRQ=0x%x\n", priv->irq0_stat);
+ "IRQ=0x%x\n", status);
- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+ if (status & UMAC_IRQ_RXDMA_DONE) {
rx_ring = &priv->rx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2670,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+ if (status & UMAC_IRQ_TXDMA_DONE) {
tx_ring = &priv->tx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2565,18 +2683,28 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
UMAC_IRQ_PHY_DET_F |
UMAC_IRQ_LINK_EVENT |
UMAC_IRQ_HFB_SM |
- UMAC_IRQ_HFB_MM |
- UMAC_IRQ_MPD_R)) {
+ UMAC_IRQ_HFB_MM)) {
/* all other interested interrupts handled in bottom half */
schedule_work(&priv->bcmgenet_irq_work);
}
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
}
+ /* all other interested interrupts handled in bottom half */
+ status &= (UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_MPD_R);
+ if (status) {
+ /* Save irq status for bottom-half processing. */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->irq0_stat |= status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
return IRQ_HANDLED;
}
@@ -2801,6 +2929,8 @@ err_irq0:
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2845,7 +2975,7 @@ static int bcmgenet_close(struct net_device *dev)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3025,6 +3155,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ unsigned long rx_bytes = 0, rx_packets = 0;
+ unsigned long rx_errors = 0, rx_dropped = 0;
+ struct bcmgenet_tx_ring *tx_ring;
+ struct bcmgenet_rx_ring *rx_ring;
+ unsigned int q;
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ tx_ring = &priv->tx_rings[q];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+ }
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+
+ for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ rx_ring = &priv->rx_rings[q];
+
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+ }
+ rx_ring = &priv->rx_rings[DESC_INDEX];
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_errors = rx_errors;
+ dev->stats.rx_missed_errors = rx_errors;
+ return &dev->stats;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -3037,6 +3209,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
#endif
+ .ndo_get_stats = bcmgenet_get_stats,
};
/* Array of GENET hardware parameters/characteristics */
@@ -3110,6 +3283,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
+ [GENET_V5] = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+ },
};
/* Infer hardware parameters from the detected GENET version */
@@ -3120,26 +3312,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
u8 major;
u16 gphy_rev;
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V4;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V3;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V2;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V1;
}
/* enum genet_version starts at 1 */
@@ -3149,7 +3337,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 5)
+ if (major == 6)
+ major = 5;
+ else if (major == 5)
major = 4;
else if (major == 0)
major = 1;
@@ -3177,18 +3367,24 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ if (GENET_IS_V5(priv)) {
+ /* The EPHY revision should come from the MDIO registers of
+ * the PHY not from GENET.
+ */
+ if (gphy_rev != 0) {
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+ gphy_rev);
+ }
+ /* This is reserved so should require special treatment */
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
/* This is the good old scheme, just GPHY major, no minor nor patch */
- if ((gphy_rev & 0xf0) != 0)
+ } else if ((gphy_rev & 0xf0) != 0) {
priv->gphy_rev = gphy_rev << 8;
-
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
- else if ((gphy_rev & 0xff00) != 0)
+ } else if ((gphy_rev & 0xff00) != 0) {
priv->gphy_rev = gphy_rev;
-
- /* This is reserved so should require special treatment */
- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
- return;
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -3219,6 +3415,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3233,6 +3430,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
const void *macaddr;
struct resource *r;
int err = -EIO;
+ const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3474,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->lock);
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3538,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_eee = NULL;
}
+ /* If this is an internal GPHY, power it on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+ !strcasecmp(phy_mode_str, "internal"))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -3395,7 +3602,8 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
- phy_suspend(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_suspend(priv->phydev);
netif_device_detach(dev);
@@ -3406,7 +3614,7 @@ static int bcmgenet_suspend(struct device *d)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3492,7 +3700,8 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
- phy_resume(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_resume(priv->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
@@ -3502,6 +3711,8 @@ static int bcmgenet_resume(struct device *d)
return 0;
out_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}