aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c82
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c202
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c17
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_input.c38
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/tcp_timer.c25
25 files changed, 433 insertions, 240 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ea304a23c8d7..924bd51327b7 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -525,6 +525,19 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
+tcp_comp_sack_delay_ns - LONG INTEGER
+ TCP tries to reduce number of SACK sent, using a timer
+ based on 5% of SRTT, capped by this sysctl, in nano seconds.
+ The default is 1ms, based on TSO autosizing period.
+
+ Default : 1,000,000 ns (1 ms)
+
+tcp_comp_sack_nr - INTEGER
+ Max numer of SACK that can be compressed.
+ Using 0 disables SACK compression.
+
+ Detault : 44
+
tcp_slow_start_after_idle - BOOLEAN
If set, provide RFC2861 behavior and time out the congestion
window after an idle period. An idle period is defined at
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
index aa7267d5ca77..d8c63aa761be 100644
--- a/drivers/net/ethernet/mscc/ocelot_qsys.h
+++ b/drivers/net/ethernet/mscc/ocelot_qsys.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Microsemi Ocelot Switch driver
*
- * License: Dual MIT/GPL
* Copyright (c) 2017 Microsemi Corporation
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2f7f0915f071..2e6e2a96b4f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -276,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
- /* Write TX and RX descriptors address */
- writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
- writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
-
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
}
+static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* Write RX descriptors address */
+ writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
+}
+
+static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* Write TX descriptors address */
+ writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
+}
+
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
* Called from stmmac_dma_ops->dump_regs
* Used for ethtool
@@ -437,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (mode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (mode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (mode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (mode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (mode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 v;
v = readl(ioaddr + EMAC_TX_CTL1);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
v |= EMAC_TX_MD;
/* Undocumented bit (called TX_NEXT_FRM in BSP), the original
* comment is
@@ -454,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
} else {
v &= ~EMAC_TX_MD;
v &= ~EMAC_TX_TH_MASK;
- if (txmode < 64)
+ if (mode < 64)
v |= EMAC_TX_TH_64;
- else if (txmode < 128)
+ else if (mode < 128)
v |= EMAC_TX_TH_128;
- else if (txmode < 192)
+ else if (mode < 192)
v |= EMAC_TX_TH_192;
- else if (txmode < 256)
+ else if (mode < 256)
v |= EMAC_TX_TH_256;
}
writel(v, ioaddr + EMAC_TX_CTL1);
-
- v = readl(ioaddr + EMAC_RX_CTL1);
- if (rxmode == SF_DMA_MODE) {
- v |= EMAC_RX_MD;
- } else {
- v &= ~EMAC_RX_MD;
- v &= ~EMAC_RX_TH_MASK;
- if (rxmode < 32)
- v |= EMAC_RX_TH_32;
- else if (rxmode < 64)
- v |= EMAC_RX_TH_64;
- else if (rxmode < 96)
- v |= EMAC_RX_TH_96;
- else if (rxmode < 128)
- v |= EMAC_RX_TH_128;
- }
- writel(v, ioaddr + EMAC_RX_CTL1);
}
static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.reset = sun8i_dwmac_dma_reset,
.init = sun8i_dwmac_dma_init,
+ .init_rx_chan = sun8i_dwmac_dma_init_rx,
+ .init_tx_chan = sun8i_dwmac_dma_init_tx,
.dump_regs = sun8i_dwmac_dump_regs,
- .dma_mode = sun8i_dwmac_dma_operation_mode,
+ .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx,
+ .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx,
.enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
.enable_dma_irq = sun8i_dwmac_enable_dma_irq,
.disable_dma_irq = sun8i_dwmac_disable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 7ecf549c7f1c..aacc4aa80e3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
}
static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base address lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base address list must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base address list must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
return csr6;
}
-static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (mode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ /* Configure flow control based on rx fifo size */
+ csr6 = dwmac1000_configure_fc(csr6, fifosz);
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
@@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
*/
csr6 |= DMA_CONTROL_OSF;
} else {
- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
/* Set the transmit threshold */
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
- else if (txmode <= 128)
+ else if (mode <= 128)
csr6 |= DMA_CONTROL_TTC_128;
- else if (txmode <= 192)
+ else if (mode <= 192)
csr6 |= DMA_CONTROL_TTC_192;
else
csr6 |= DMA_CONTROL_TTC_256;
}
- if (rxmode == SF_DMA_MODE) {
- pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
- } else {
- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
- csr6 &= ~DMA_CONTROL_RSF;
- csr6 &= DMA_CONTROL_TC_RX_MASK;
- if (rxmode <= 32)
- csr6 |= DMA_CONTROL_RTC_32;
- else if (rxmode <= 64)
- csr6 |= DMA_CONTROL_RTC_64;
- else if (rxmode <= 96)
- csr6 |= DMA_CONTROL_RTC_96;
- else
- csr6 |= DMA_CONTROL_RTC_128;
- }
-
- /* Configure flow control based on rx fifo size */
- csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
-
writel(csr6, ioaddr + DMA_CONTROL);
}
@@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
+ .init_rx_chan = dwmac1000_dma_init_rx,
+ .init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs,
- .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
+ .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 6502b9aa3bf5..21dee25ee570 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -29,8 +29,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base addr lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac100_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base addr lists must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac100_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base addr lists must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
/* Store and Forward capability is not used at all.
@@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
* The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register.
*/
-static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
else
csr6 |= DMA_CONTROL_TTC_128;
@@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
const struct stmmac_dma_ops dwmac100_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac100_dma_init,
+ .init_rx_chan = dwmac100_dma_init_rx,
+ .init_tx_chan = dwmac100_dma_init_tx,
.dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
+ .dma_tx_mode = dwmac100_dma_operation_mode_tx,
.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 65ed896c13cb..20299f6f65fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
p->des3 |= cpu_to_le32(TDES3_OWN);
}
-static void dwmac4_set_rx_owner(struct dma_desc *p)
+static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN);
+ p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
@@ -292,10 +295,7 @@ exit:
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
-
- if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ dwmac4_set_rx_owner(p, disable_rx_ic);
}
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
@@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
+static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwmac4_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt,
+ .get_addr = dwmac4_get_addr,
+ .set_addr = dwmac4_set_addr,
+ .clear = dwmac4_clear,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 117c3a5288f0..bf8e5a16f11c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+
+ /* Enable OSP to get best performance */
+ value |= DMA_CONTROL_OSP;
+
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
@@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 8474bf961dd0..c63c1fe3f26b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -184,7 +184,6 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 3bfb3f584be2..77914c89d749 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(ETDES0_OWN);
}
-static void enh_desc_set_rx_owner(struct dma_desc *p)
+static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void enh_desc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring,
+ .get_addr = enh_desc_get_addr,
+ .set_addr = enh_desc_set_addr,
+ .clear = enh_desc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 9acc8d2f1039..23a12649e247 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -6,6 +6,7 @@
#include "common.h"
#include "stmmac.h"
+#include "stmmac_ptp.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -72,6 +73,7 @@ static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
u32 min_id;
+ const struct stmmac_regs_off regs;
const void *desc;
const void *dma;
const void *mac;
@@ -86,6 +88,10 @@ static const struct stmmac_hwif_entry {
.gmac = false,
.gmac4 = false,
.min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
.desc = NULL,
.dma = &dwmac100_dma_ops,
.mac = &dwmac100_ops,
@@ -98,6 +104,10 @@ static const struct stmmac_hwif_entry {
.gmac = true,
.gmac4 = false,
.min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
.desc = NULL,
.dma = &dwmac1000_dma_ops,
.mac = &dwmac1000_ops,
@@ -110,6 +120,10 @@ static const struct stmmac_hwif_entry {
.gmac = false,
.gmac4 = true,
.min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
@@ -122,6 +136,10 @@ static const struct stmmac_hwif_entry {
.gmac = false,
.gmac4 = true,
.min_id = DWMAC_CORE_4_00,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
@@ -134,6 +152,10 @@ static const struct stmmac_hwif_entry {
.gmac = false,
.gmac4 = true,
.min_id = DWMAC_CORE_4_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
@@ -146,6 +168,10 @@ static const struct stmmac_hwif_entry {
.gmac = false,
.gmac4 = true,
.min_id = DWMAC_CORE_5_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
@@ -175,6 +201,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
/* Save ID for later use */
priv->synopsys_id = id;
+ /* Lets assume some safe values first */
+ priv->ptpaddr = priv->ioaddr +
+ (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
+ priv->mmcaddr = priv->ioaddr +
+ (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+
/* Check for HW specific setup first */
if (priv->plat->setup) {
priv->hw = priv->plat->setup(priv);
@@ -206,6 +238,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->tc = entry->tc;
priv->hw = mac;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
/* Entry found */
ret = entry->setup(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index b7539a14e6ad..f499a7fad6f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -59,7 +59,7 @@ struct stmmac_desc_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*set_rx_owner)(struct dma_desc *p);
+ void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
/* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */
@@ -79,6 +79,12 @@ struct stmmac_desc_ops {
void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */
void (*set_mss)(struct dma_desc *p, unsigned int mss);
+ /* get descriptor skbuff address */
+ void (*get_addr)(struct dma_desc *p, unsigned int *addr);
+ /* set descriptor skbuff address */
+ void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
+ /* clear descriptor */
+ void (*clear)(struct dma_desc *p);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -123,6 +129,12 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, display_ring, __args)
#define stmmac_set_mss(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_mss, __args)
+#define stmmac_get_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_addr, __args)
+#define stmmac_set_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_addr, __args)
+#define stmmac_clear_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, clear, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -132,7 +144,7 @@ struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds);
+ int atds);
void (*init_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(void __iomem *ioaddr,
@@ -145,10 +157,6 @@ struct stmmac_dma_ops {
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
- int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz, u8 qmode);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
@@ -191,8 +199,6 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, axi, __args)
#define stmmac_dump_dma_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dump_regs, __args)
-#define stmmac_dma_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, dma_mode, __args)
#define stmmac_dma_rx_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
#define stmmac_dma_tx_mode(__priv, __args...) \
@@ -440,6 +446,11 @@ struct stmmac_tc_ops {
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+struct stmmac_regs_off {
+ u32 ptp_off;
+ u32 mmc_off;
+};
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 7b1d901bf5bc..de65bb29feba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(TDES0_OWN);
}
-static void ndesc_set_rx_owner(struct dma_desc *p)
+static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void ndesc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring,
+ .get_addr = ndesc_get_addr,
+ .set_addr = ndesc_set_addr,
+ .clear = ndesc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 42fc76e76bf9..4d425b1a0c59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -105,6 +105,7 @@ struct stmmac_priv {
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
+ bool tx_timer_armed;
int tx_coalesce;
int hwts_tx_en;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9dbe1355896..c32de53a00d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1156,10 +1156,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
- else
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
@@ -1344,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
else
p = tx_q->dma_tx + i;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
- }
+ stmmac_clear_desc(priv, p);
tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false;
@@ -1797,22 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/* configure all channels */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (chan = 0; chan < rx_channels_count; chan++) {
- qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
- rxfifosz, qmode);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+ }
- for (chan = 0; chan < tx_channels_count; chan++) {
- qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
- txfifosz, qmode);
- }
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
}
}
@@ -1981,23 +1967,14 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
- rxqmode);
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
- txqmode);
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
}
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
- int ret = false;
+ int ret;
- /* Safety features are only available in cores >= 5.10 */
- if (priv->synopsys_id < DWMAC_CORE_5_10)
- return ret;
ret = stmmac_safety_feat_irq_status(priv, priv->dev,
priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
if (ret && (ret != -EINVAL)) {
@@ -2108,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- } else {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- }
-
dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
@@ -2169,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
- u32 dummy_dma_rx_phy = 0;
- u32 dummy_dma_tx_phy = 0;
u32 chan = 0;
int atds = 0;
int ret = 0;
@@ -2191,48 +2159,39 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
-
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
-
- stmmac_init_rx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, rx_q->dma_rx_phy,
- chan);
-
- rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
- rx_q->rx_tail_addr, chan);
- }
-
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
- stmmac_init_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, chan);
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
- stmmac_init_tx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, tx_q->dma_tx_phy,
- chan);
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, chan);
- }
- } else {
- rx_q = &priv->rx_queue[chan];
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
tx_q = &priv->tx_queue[chan];
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
}
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -2515,12 +2474,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_core_init(priv, priv->hw, dev);
/* Initialize MTL*/
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_mtl_configuration(priv);
+ stmmac_mtl_configuration(priv);
/* Initialize Safety Features */
- if (priv->synopsys_id >= DWMAC_CORE_5_10)
- stmmac_safety_feat_configuration(priv);
+ stmmac_safety_feat_configuration(priv);
ret = stmmac_rx_ipc(priv, priv->hw);
if (!ret) {
@@ -3074,10 +3031,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
- if (unlikely(is_jumbo) && likely(priv->synopsys_id <
- DWMAC_CORE_4_00)) {
+ if (unlikely(is_jumbo)) {
entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
- if (unlikely(entry < 0))
+ if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err;
}
@@ -3100,10 +3056,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff_dma[entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- desc->des0 = cpu_to_le32(des);
- else
- desc->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, desc, des);
tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len;
@@ -3158,13 +3112,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+ !priv->tx_timer_armed) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);
@@ -3182,10 +3139,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- first->des0 = cpu_to_le32(des);
- else
- first->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, first, des);
tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -3211,11 +3166,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- if (priv->synopsys_id < DWMAC_CORE_4_00)
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
- else
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
- queue);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3299,13 +3251,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- p->des1 = 0;
- } else {
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- }
-
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
stmmac_refill_desc3(priv, rx_q, p);
if (rx_q->rx_zeroc_thresh > 0)
@@ -3316,10 +3262,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
dma_wmb();
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
- else
- stmmac_set_rx_owner(priv, p);
+ stmmac_set_rx_owner(priv, p, priv->use_riwt);
dma_wmb();
@@ -3407,11 +3350,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int frame_len;
unsigned int des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- des = le32_to_cpu(p->des0);
- else
- des = le32_to_cpu(p->des2);
-
+ stmmac_get_desc_addr(priv, p, &des);
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
/* If frame length is greater than skb buffer size
@@ -3705,6 +3644,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+ int mtl_status;
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -3714,20 +3654,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = false;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (queue = 0; queue < queues_count; queue++) {
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[queue];
+ for (queue = 0; queue < queues_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- status |= stmmac_host_mtl_irq_status(priv,
- priv->hw, queue);
+ mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
+ if (mtl_status != -EINVAL)
+ status |= mtl_status;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
- stmmac_set_rx_tail_ptr(priv,
- priv->ioaddr,
- rx_q->rx_tail_addr,
- queue);
- }
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr,
+ queue);
}
/* PCS link status */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 807776928cb8..72705eaf4b84 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -218,6 +218,7 @@ struct tcp_sock {
reord:1; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
+ u8 compressed_ack;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -297,6 +298,7 @@ struct tcp_sock {
u32 sacked_out; /* SACK'd packets */
struct hrtimer pacing_timer;
+ struct hrtimer compressed_ack_timer;
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 8491bc9c86b1..661348f23ea5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -160,6 +160,8 @@ struct netns_ipv4 {
int sysctl_tcp_pacing_ca_ratio;
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
+ int sysctl_tcp_comp_sack_nr;
+ unsigned long sysctl_tcp_comp_sack_delay_ns;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6deb540297cc..952d842a604a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -559,7 +559,10 @@ void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
- sock_put(sk);
+ __sock_put(sk);
+
+ if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
+ __sock_put(sk);
inet_csk_clear_xmit_timers(sk);
}
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index d02e859301ff..750d89120335 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,7 @@ enum
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
+ LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
__LINUX_MIB_MAX
};
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 261b71d0ccc5..6c1ff89a60fa 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -298,6 +298,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED),
SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE),
+ SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4b195bac8ac0..d2eed3ddcb0a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -46,6 +46,7 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int comp_sack_nr_max = 255;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -1152,6 +1153,22 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = &one,
},
{
+ .procname = "tcp_comp_sack_delay_ns",
+ .data = &init_net.ipv4.sysctl_tcp_comp_sack_delay_ns,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "tcp_comp_sack_nr",
+ .data = &init_net.ipv4.sysctl_tcp_comp_sack_nr,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &comp_sack_nr_max,
+ },
+ {
.procname = "udp_rmem_min",
.data = &init_net.ipv4.sysctl_udp_rmem_min,
.maxlen = sizeof(init_net.ipv4.sysctl_udp_rmem_min),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 62b776f90037..0a2ea0bbf867 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2595,6 +2595,7 @@ int tcp_disconnect(struct sock *sk, int flags)
dst_release(sk->sk_rx_dst);
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
+ tp->compressed_ack = 0;
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0bf032839548..aebb29ab2fdf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4249,6 +4249,8 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
* If the sack array is full, forget about the last one.
*/
if (this_sack >= TCP_NUM_SACKS) {
+ if (tp->compressed_ack)
+ tcp_send_ack(sk);
this_sack--;
tp->rx_opt.num_sacks--;
sp--;
@@ -4715,8 +4717,6 @@ drop:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
goto out_of_window;
- tcp_enter_quickack_mode(sk);
-
if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
/* Partial packet, seq < rcv_next < end_seq */
SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
@@ -5083,6 +5083,7 @@ static inline void tcp_data_snd_check(struct sock *sk)
static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
{
struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long rtt, delay;
/* More than one full frame received... */
if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
@@ -5094,15 +5095,36 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
(tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
__tcp_select_window(sk) >= tp->rcv_wnd)) ||
/* We ACK each frame or... */
- tcp_in_quickack_mode(sk) ||
- /* We have out of order data. */
- (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
- /* Then ack it now */
+ tcp_in_quickack_mode(sk)) {
+send_now:
tcp_send_ack(sk);
- } else {
- /* Else, send delayed ack. */
+ return;
+ }
+
+ if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_send_delayed_ack(sk);
+ return;
}
+
+ if (!tcp_is_sack(tp) ||
+ tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
+ goto send_now;
+ tp->compressed_ack++;
+
+ if (hrtimer_is_queued(&tp->compressed_ack_timer))
+ return;
+
+ /* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */
+
+ rtt = tp->rcv_rtt_est.rtt_us;
+ if (tp->srtt_us && tp->srtt_us < rtt)
+ rtt = tp->srtt_us;
+
+ delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
+ rtt * (NSEC_PER_USEC >> 3)/20);
+ sock_hold(sk);
+ hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
+ HRTIMER_MODE_REL_PINNED_SOFT);
}
static inline void tcp_ack_snd_check(struct sock *sk)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index caf23de88f8a..adbdb503db0c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2572,6 +2572,8 @@ static int __net_init tcp_sk_init(struct net *net)
init_net.ipv4.sysctl_tcp_wmem,
sizeof(init_net.ipv4.sysctl_tcp_wmem));
}
+ net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
+ net->ipv4.sysctl_tcp_comp_sack_nr = 44;
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0d8f950a9006..437bb7ceba7f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -162,6 +162,15 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (unlikely(tp->compressed_ack)) {
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
+ tp->compressed_ack);
+ tp->compressed_ack = 0;
+ if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
+ __sock_put(sk);
+ }
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 92bdf64fffae..3b3611729928 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -708,6 +708,27 @@ out:
sock_put(sk);
}
+static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
+{
+ struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
+ struct sock *sk = (struct sock *)tp;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ if (tp->compressed_ack)
+ tcp_send_ack(sk);
+ } else {
+ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
+ &sk->sk_tsq_flags))
+ sock_hold(sk);
+ }
+ bh_unlock_sock(sk);
+
+ sock_put(sk);
+
+ return HRTIMER_NORESTART;
+}
+
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
@@ -715,4 +736,8 @@ void tcp_init_xmit_timers(struct sock *sk)
hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED_SOFT);
tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
+
+ hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
}