diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 10:05:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 10:05:34 -0700 |
commit | 1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe (patch) | |
tree | 552e03de245cdbd0780ca1215914edc4a26540f7 /drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |
parent | 6b5f04b6cf8ebab9a65d9c0026c650bb2538fd0f (diff) | |
parent | fe30937b65354c7fec244caebbdaae68e28ca797 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Highlights:
1) Support more Realtek wireless chips, from Jes Sorenson.
2) New BPF types for per-cpu hash and arrap maps, from Alexei
Starovoitov.
3) Make several TCP sysctls per-namespace, from Nikolay Borisov.
4) Allow the use of SO_REUSEPORT in order to do per-thread processing
of incoming TCP/UDP connections. The muxing can be done using a
BPF program which hashes the incoming packet. From Craig Gallek.
5) Add a multiplexer for TCP streams, to provide a messaged based
interface. BPF programs can be used to determine the message
boundaries. From Tom Herbert.
6) Add 802.1AE MACSEC support, from Sabrina Dubroca.
7) Avoid factorial complexity when taking down an inetdev interface
with lots of configured addresses. We were doing things like
traversing the entire address less for each address removed, and
flushing the entire netfilter conntrack table for every address as
well.
8) Add and use SKB bulk free infrastructure, from Jesper Brouer.
9) Allow offloading u32 classifiers to hardware, and implement for
ixgbe, from John Fastabend.
10) Allow configuring IRQ coalescing parameters on a per-queue basis,
from Kan Liang.
11) Extend ethtool so that larger link mode masks can be supported.
From David Decotigny.
12) Introduce devlink, which can be used to configure port link types
(ethernet vs Infiniband, etc.), port splitting, and switch device
level attributes as a whole. From Jiri Pirko.
13) Hardware offload support for flower classifiers, from Amir Vadai.
14) Add "Local Checksum Offload". Basically, for a tunneled packet
the checksum of the outer header is 'constant' (because with the
checksum field filled into the inner protocol header, the payload
of the outer frame checksums to 'zero'), and we can take advantage
of that in various ways. From Edward Cree"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1548 commits)
bonding: fix bond_get_stats()
net: bcmgenet: fix dma api length mismatch
net/mlx4_core: Fix backward compatibility on VFs
phy: mdio-thunder: Fix some Kconfig typos
lan78xx: add ndo_get_stats64
lan78xx: handle statistics counter rollover
RDS: TCP: Remove unused constant
RDS: TCP: Add sysctl tunables for sndbuf/rcvbuf on rds-tcp socket
net: smc911x: convert pxa dma to dmaengine
team: remove duplicate set of flag IFF_MULTICAST
bonding: remove duplicate set of flag IFF_MULTICAST
net: fix a comment typo
ethernet: micrel: fix some error codes
ip_tunnels, bpf: define IP_TUNNEL_OPTS_MAX and use it
bpf, dst: add and use dst_tclassid helper
bpf: make skb->tc_classid also readable
net: mvneta: bm: clarify dependencies
cls_bpf: reset class and reuse major in da
ldmvsw: Checkpatch sunvnet.c and sunvnet_common.c
ldmvsw: Add ldmvsw.c driver code
...
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 473 |
1 files changed, 276 insertions, 197 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c21015b68097..4c5ce9848ca9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -71,15 +71,8 @@ static int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); -#define DMA_TX_SIZE 256 -static int dma_txsize = DMA_TX_SIZE; -module_param(dma_txsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); - -#define DMA_RX_SIZE 256 -static int dma_rxsize = DMA_RX_SIZE; -module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); +#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) +#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) static int flow_ctrl = FLOW_OFF; module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); @@ -99,6 +92,8 @@ static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); +#define STMMAC_RX_COPYBREAK 256 + static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); @@ -134,10 +129,6 @@ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; - if (unlikely(dma_rxsize < 0)) - dma_rxsize = DMA_RX_SIZE; - if (unlikely(dma_txsize < 0)) - dma_txsize = DMA_TX_SIZE; if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) @@ -197,12 +188,28 @@ static void print_pkt(unsigned char *buf, int len) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } -/* minimum number of free TX descriptors required to wake up TX process */ -#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) - static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { - return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; + unsigned avail; + + if (priv->dirty_tx > priv->cur_tx) + avail = priv->dirty_tx - priv->cur_tx - 1; + else + avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; + + return avail; +} + +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) +{ + unsigned dirty; + + if (priv->dirty_rx <= priv->cur_rx) + dirty = priv->cur_rx - priv->dirty_rx; + else + dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; + + return dirty; } /** @@ -862,6 +869,12 @@ static int stmmac_init_phy(struct net_device *dev) phy_disconnect(phydev); return -ENODEV; } + + /* If attached to a switch, there is no reason to poll phy handler */ + if (priv->plat->phy_bus_name) + if (!strcmp(priv->plat->phy_bus_name, "fixed")) + phydev->irq = PHY_IGNORE_INTERRUPT; + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); @@ -906,19 +919,16 @@ static void stmmac_display_ring(void *head, int size, int extend_desc) static void stmmac_display_rings(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; - if (priv->extend_desc) { pr_info("Extended RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); pr_info("Extended TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); } else { pr_info("RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); pr_info("TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); } } @@ -947,28 +957,26 @@ static int stmmac_set_bfsize(int mtu, int bufsize) static void stmmac_clear_descriptors(struct stmmac_priv *priv) { int i; - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; /* Clear the Rx/Tx descriptors */ - for (i = 0; i < rxsize; i++) + for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == rxsize - 1)); + (i == DMA_RX_SIZE - 1)); else priv->hw->desc->init_rx_desc(&priv->dma_rx[i], priv->use_riwt, priv->mode, - (i == rxsize - 1)); - for (i = 0; i < txsize; i++) + (i == DMA_RX_SIZE - 1)); + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); } /** @@ -1031,8 +1039,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; int ret = -ENOMEM; @@ -1044,10 +1050,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) priv->dma_buf_sz = bfsize; - if (netif_msg_probe(priv)) - pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, - txsize, rxsize, bfsize); - if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1055,7 +1057,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) /* RX INITIALIZATION */ pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); } - for (i = 0; i < rxsize; i++) { + for (i = 0; i < DMA_RX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_erx + i)->basic); @@ -1072,26 +1074,26 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; - priv->dirty_rx = (unsigned int)(i - rxsize); + priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); buf_sz = bfsize; /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) { priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, - rxsize, 1); + DMA_RX_SIZE, 1); priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, - txsize, 1); + DMA_TX_SIZE, 1); } else { priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, - rxsize, 0); + DMA_RX_SIZE, 0); priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, - txsize, 0); + DMA_TX_SIZE, 0); } } /* TX INITIALIZATION */ - for (i = 0; i < txsize; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_etx + i)->basic); @@ -1100,6 +1102,8 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) p->des2 = 0; priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; + priv->tx_skbuff_dma[i].len = 0; + priv->tx_skbuff_dma[i].last_segment = false; priv->tx_skbuff[i] = NULL; } @@ -1123,7 +1127,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) + for (i = 0; i < DMA_RX_SIZE; i++) stmmac_free_rx_buffers(priv, i); } @@ -1131,7 +1135,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_tx_size; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1143,12 +1147,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); } @@ -1171,33 +1175,31 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) */ static int alloc_dma_desc_resources(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; int ret = -ENOMEM; - priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), GFP_KERNEL); if (!priv->rx_skbuff_dma) return -ENOMEM; - priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(txsize, + priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, sizeof(*priv->tx_skbuff_dma), GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; - priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skbuff) goto err_tx_skbuff; if (priv->extend_desc) { - priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_rx_phy, @@ -1205,31 +1207,31 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->dma_erx) goto err_dma; - priv->dma_etx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_etx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); goto err_dma; } } else { - priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); if (!priv->dma_rx) goto err_dma; - priv->dma_tx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_tx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); goto err_dma; @@ -1258,16 +1260,16 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) { dma_free_coherent(priv->device, - priv->dma_tx_size * sizeof(struct dma_desc), + DMA_TX_SIZE * sizeof(struct dma_desc), priv->dma_tx, priv->dma_tx_phy); dma_free_coherent(priv->device, - priv->dma_rx_size * sizeof(struct dma_desc), + DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); } else { - dma_free_coherent(priv->device, priv->dma_tx_size * + dma_free_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), priv->dma_etx, priv->dma_tx_phy); - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); } @@ -1312,62 +1314,59 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) */ static void stmmac_tx_clean(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry = priv->dirty_tx; spin_lock(&priv->tx_lock); priv->xstats.tx_clean++; - while (priv->dirty_tx != priv->cur_tx) { - int last; - unsigned int entry = priv->dirty_tx % txsize; + while (entry != priv->cur_tx) { struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p; + int status; if (priv->extend_desc) p = (struct dma_desc *)(priv->dma_etx + entry); else p = priv->dma_tx + entry; - /* Check if the descriptor is owned by the DMA. */ - if (priv->hw->desc->get_tx_owner(p)) - break; - - /* Verify tx error by looking at the last segment. */ - last = priv->hw->desc->get_tx_ls(p); - if (likely(last)) { - int tx_error = - priv->hw->desc->tx_status(&priv->dev->stats, + status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, priv->ioaddr); - if (likely(tx_error == 0)) { + /* Check if the descriptor is owned by the DMA */ + if (unlikely(status & tx_dma_own)) + break; + + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ + if (unlikely(status & tx_err)) { + priv->dev->stats.tx_errors++; + } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; - } else - priv->dev->stats.tx_errors++; - + } stmmac_get_tx_hwtstamp(priv, entry, skb); } - if (netif_msg_tx_done(priv)) - pr_debug("%s: curr %d, dirty %d\n", __func__, - priv->cur_tx, priv->dirty_tx); if (likely(priv->tx_skbuff_dma[entry].buf)) { if (priv->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry].buf = 0; priv->tx_skbuff_dma[entry].map_as_page = false; } priv->hw->mode->clean_desc3(priv, p); + priv->tx_skbuff_dma[entry].last_segment = false; + priv->tx_skbuff_dma[entry].is_jumbo = false; if (likely(skb != NULL)) { pkts_compl++; @@ -1378,16 +1377,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->hw->desc->release_tx_desc(p, priv->mode); - priv->dirty_tx++; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); } + priv->dirty_tx = entry; netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { if (netif_msg_tx_done(priv)) pr_debug("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); @@ -1421,20 +1421,19 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) static void stmmac_tx_err(struct stmmac_priv *priv) { int i; - int txsize = priv->dma_tx_size; netif_stop_queue(priv->dev); priv->hw->dma->stop_tx(priv->ioaddr); dma_free_tx_skbufs(priv); - for (i = 0; i < txsize; i++) + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); priv->dirty_tx = 0; priv->cur_tx = 0; netdev_reset_queue(priv->dev); @@ -1635,23 +1634,35 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { - int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0; int mixed_burst = 0; int atds = 0; + int ret = 0; if (priv->plat->dma_cfg) { pbl = priv->plat->dma_cfg->pbl; fixed_burst = priv->plat->dma_cfg->fixed_burst; mixed_burst = priv->plat->dma_cfg->mixed_burst; - burst_len = priv->plat->dma_cfg->burst_len; + aal = priv->plat->dma_cfg->aal; } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, - burst_len, priv->dma_tx_phy, - priv->dma_rx_phy, atds); + ret = priv->hw->dma->reset(priv->ioaddr); + if (ret) { + dev_err(priv->device, "Failed to reset the dma\n"); + return ret; + } + + priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, + aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); + + if ((priv->synopsys_id >= DWMAC_CORE_3_50) && + (priv->plat->axi && priv->hw->dma->axi)) + priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); + + return ret; } /** @@ -1799,10 +1810,8 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - /* Create and initialize the TX/RX descriptors chains. */ - priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); - priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -1943,13 +1952,12 @@ static int stmmac_release(struct net_device *dev) static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - int entry; + unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int entry, first_entry; struct dma_desc *desc, *first; - unsigned int nopaged_len = skb_headlen(skb); - unsigned int enh_desc = priv->plat->enh_desc; + unsigned int enh_desc; spin_lock(&priv->tx_lock); @@ -1966,31 +1974,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); - entry = priv->cur_tx % txsize; + entry = priv->cur_tx; + first_entry = entry; csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); - if (priv->extend_desc) + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; first = desc; + priv->tx_skbuff[first_entry] = skb; + + enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); - if (likely(!is_jumbo)) { - desc->des2 = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, desc->des2)) - goto dma_map_err; - priv->tx_skbuff_dma[entry].buf = desc->des2; - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, - csum_insertion, priv->mode); - } else { - desc = first; + if (unlikely(is_jumbo)) { entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; @@ -1999,10 +2002,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); - priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; - if (priv->extend_desc) + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; @@ -2012,53 +2016,37 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, desc->des2)) goto dma_map_err; /* should reuse desc w/o issues */ + priv->tx_skbuff[entry] = NULL; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].map_as_page = true; + priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].last_segment = last_segment; + + /* Prepare the descriptor and set the own bit too */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode); - wmb(); - priv->hw->desc->set_tx_owner(desc); - wmb(); + priv->mode, 1, last_segment); } - priv->tx_skbuff[entry] = skb; - - /* Finalize the latest segment. */ - priv->hw->desc->close_tx_desc(desc); + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - wmb(); - /* According to the coalesce parameter the IC bit for the latest - * segment could be reset and the timer re-started to invoke the - * stmmac_tx function. This approach takes care about the fragments. - */ - priv->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames > priv->tx_count_frames) { - priv->hw->desc->clear_tx_ic(desc); - priv->xstats.tx_reset_ic_bit++; - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else - priv->tx_count_frames = 0; - - /* To avoid raise condition */ - priv->hw->desc->set_tx_owner(first); - wmb(); - - priv->cur_tx++; + priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { - pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", - __func__, (priv->cur_tx % txsize), - (priv->dirty_tx % txsize), entry, first, nfrags); + pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + entry, first, nfrags); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, + DMA_TX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, + DMA_TX_SIZE, 0); pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { if (netif_msg_hw(priv)) pr_debug("%s: stop transmitted packets\n", __func__); @@ -2067,16 +2055,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - /* declare that device is doing timestamping */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + priv->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else { + priv->tx_count_frames = 0; + priv->hw->desc->set_tx_ic(desc); + priv->xstats.tx_set_ic_bit++; } if (!priv->hwts_tx_en) skb_tx_timestamp(skb); + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be + * passed to the DMA engine. + */ + if (likely(!is_jumbo)) { + bool last_segment = (nfrags == 0); + + first->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, first->des2)) + goto dma_map_err; + + priv->tx_skbuff_dma[first_entry].buf = first->des2; + priv->tx_skbuff_dma[first_entry].len = nopaged_len; + priv->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + /* Prepare the first descriptor setting the OWN bit too */ + priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, + csum_insertion, priv->mode, 1, + last_segment); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + smp_wmb(); + } + netdev_sent_queue(dev, skb->len); priv->hw->dma->enable_dma_transmission(priv->ioaddr); @@ -2108,6 +2139,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) } +static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) +{ + if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) + return 0; + + return 1; +} + /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -2116,11 +2155,11 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) */ static inline void stmmac_rx_refill(struct stmmac_priv *priv) { - unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; + unsigned int entry = priv->dirty_rx; + int dirty = stmmac_rx_dirty(priv); - for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { - unsigned int entry = priv->dirty_rx % rxsize; + while (dirty-- > 0) { struct dma_desc *p; if (priv->extend_desc) @@ -2132,9 +2171,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); - - if (unlikely(skb == NULL)) + if (unlikely(!skb)) { + /* so for a while no zero-copy! */ + priv->rx_zeroc_thresh = STMMAC_RX_THRESH; + if (unlikely(net_ratelimit())) + dev_err(priv->device, + "fail to alloc skb entry %d\n", + entry); break; + } priv->rx_skbuff[entry] = skb; priv->rx_skbuff_dma[entry] = @@ -2150,13 +2195,20 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->hw->mode->refill_desc3(priv, p); + if (priv->rx_zeroc_thresh > 0) + priv->rx_zeroc_thresh--; + if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); } + wmb(); priv->hw->desc->set_rx_owner(p); wmb(); + + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } + priv->dirty_rx = entry; } /** @@ -2168,8 +2220,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) */ static int stmmac_rx(struct stmmac_priv *priv, int limit) { - unsigned int rxsize = priv->dma_rx_size; - unsigned int entry = priv->cur_rx % rxsize; + unsigned int entry = priv->cur_rx; unsigned int next_entry; unsigned int count = 0; int coe = priv->hw->rx_csum; @@ -2177,9 +2228,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, + DMA_RX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, + DMA_RX_SIZE, 0); } while (count < limit) { int status; @@ -2190,20 +2243,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) else p = priv->dma_rx + entry; - if (priv->hw->desc->get_rx_owner(p)) + /* read the status of the incoming frame */ + status = priv->hw->desc->rx_status(&priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) break; count++; - next_entry = (++priv->cur_rx) % rxsize; + priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); + next_entry = priv->cur_rx; + if (priv->extend_desc) prefetch(priv->dma_erx + next_entry); else prefetch(priv->dma_rx + next_entry); - /* read the status of the incoming frame */ - status = priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p); if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, &priv->xstats, @@ -2248,23 +2304,54 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) pr_debug("\tframe size %d, COE: %d\n", frame_len, status); } - skb = priv->rx_skbuff[entry]; - if (unlikely(!skb)) { - pr_err("%s: Inconsistent Rx descriptor chain\n", - priv->dev->name); - priv->dev->stats.rx_dropped++; - break; + + if (unlikely((frame_len < priv->rx_copybreak) || + stmmac_rx_threshold_count(priv))) { + skb = netdev_alloc_skb_ip_align(priv->dev, + frame_len); + if (unlikely(!skb)) { + if (net_ratelimit()) + dev_warn(priv->device, + "packet dropped\n"); + priv->dev->stats.rx_dropped++; + break; + } + + dma_sync_single_for_cpu(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, + priv-> + rx_skbuff[entry]->data, + frame_len); + + skb_put(skb, frame_len); + dma_sync_single_for_device(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + } else { + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + priv->rx_zeroc_thresh++; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); } - prefetch(skb->data - NET_IP_ALIGN); - priv->rx_skbuff[entry] = NULL; stmmac_get_rx_hwtstamp(priv, entry, skb); - skb_put(skb, frame_len); - dma_unmap_single(priv->device, - priv->rx_skbuff_dma[entry], - priv->dma_buf_sz, DMA_FROM_DEVICE); - if (netif_msg_pktdata(priv)) { pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); @@ -2555,19 +2642,17 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; if (priv->extend_desc) { seq_printf(seq, "Extended RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq); + sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); seq_printf(seq, "Extended TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq); + sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); } else { seq_printf(seq, "RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); + sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); seq_printf(seq, "TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); + sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); } return 0; @@ -3137,12 +3222,6 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "phyaddr:", 8)) { if (kstrtoint(opt + 8, 0, &phyaddr)) goto err; - } else if (!strncmp(opt, "dma_txsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_txsize)) - goto err; - } else if (!strncmp(opt, "dma_rxsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_rxsize)) - goto err; } else if (!strncmp(opt, "buf_sz:", 7)) { if (kstrtoint(opt + 7, 0, &buf_sz)) goto err; |