aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c72
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c276
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h88
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c387
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h196
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c184
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h18
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c110
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c63
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c125
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
38 files changed, 1261 insertions, 714 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f58a1c0144ba..884d64114bff 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,6 +34,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
+ select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0e39d199ff06..2cad76d0a50e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3549,6 +3549,8 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
netdev_tx_reset_queue(nq);
+ txq->buf = NULL;
+ txq->tso_hdrs = NULL;
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 41d935d1aaf6..40aeaa7bd739 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3ea00bc9b91c..adc953611913 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6089,18 +6089,19 @@ static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
return true;
}
-static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct fwnode_handle *fwnode,
- char **mac_from)
+static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
+ struct fwnode_handle *fwnode,
+ char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
+ int ret;
if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
eth_hw_addr_set(dev, fw_mac_addr);
- return;
+ return 0;
}
if (priv->hw_version == MVPP21) {
@@ -6108,19 +6109,24 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
eth_hw_addr_set(dev, hw_mac_addr);
- return;
+ return 0;
}
}
/* Only valid on OF enabled platforms */
- if (!of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr)) {
+ ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (!ret) {
*mac_from = "nvmem cell";
eth_hw_addr_set(dev, fw_mac_addr);
- return;
+ return 0;
}
*mac_from = "random";
eth_hw_addr_random(dev);
+
+ return 0;
}
static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
@@ -6823,7 +6829,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ if (err < 0)
+ goto err_free_stats;
port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 75ba57bd1d46..9af22f497a40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
if (!priv->prs_double_vlans)
return -ENOMEM;
- /* Double VLAN: 0x8100, 0x88A8 */
- err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+ /* Double VLAN: 0x88A8, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
if (err)
return err;
@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
-
- /* IPv4 over PPPoE with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, PPP_IP);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ int tid, ihl;
- /* IPv4 over PPPoE without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
+ /* IPv4 over PPPoE with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- pe.index = tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD |
- MVPP2_PRS_IPV4_IHL_MIN,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
+ mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* IPv6 over PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index 6ad88d0fe43f..90c3a419932d 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -13,6 +13,12 @@
#include "octep_main.h"
#include "octep_regs_cn9k_pf.h"
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+#define FW_HB_INTERVAL_IN_SECS 1
+#define FW_HB_MISS_COUNT 10
+
/* Names of Hardware non-queue generic interrupts */
static char *cn93_non_ioq_msix_names[] = {
"epf_ire_rint",
@@ -198,7 +204,9 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
{
struct octep_config *conf = oct->conf;
struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
u64 val;
+ int pos;
/* Read ring configuration:
* PF ring count, number of VFs and rings per VF supported
@@ -234,7 +242,20 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
- conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7);
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ (0x400000ull * 7) +
+ (link * CTRL_MBOX_SZ);
+
+ conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
+ conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
+
}
/* Setup registers for a hardware Tx Queue */
@@ -352,19 +373,30 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
}
-/* Mailbox Interrupt handler */
-static void cn93_handle_pf_mbox_intr(struct octep_device *oct)
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
{
- u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL;
+ bool handled = false;
+ u64 reg0;
- mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
- for (qno = 0; qno < OCTEP_MAX_VF; qno++) {
- val = readq(oct->mbox[qno]->mbox_read_reg);
- dev_dbg(&oct->pdev->dev,
- "PF MBOX READ: val:%llx from VF:%llx\n", val, qno);
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ dev_info(&oct->pdev->dev,
+ "Received OEI_RINT intr: 0x%llx\n",
+ reg0);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+
+ handled = true;
}
- writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+ return handled;
}
/* Interrupts handler for all non-queue generic interrupts. */
@@ -434,24 +466,9 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
goto irq_handled;
}
- /* Check for MBOX INTR */
- reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
- if (reg_val) {
- dev_info(&pdev->dev,
- "Received MBOX_RINT intr: 0x%llx\n", reg_val);
- cn93_handle_pf_mbox_intr(oct);
- goto irq_handled;
- }
-
- /* Check for OEI INTR */
- reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
- if (reg_val) {
- dev_info(&pdev->dev,
- "Received OEI_EINT intr: 0x%llx\n", reg_val);
- octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val);
- queue_work(octep_wq, &oct->ctrl_mbox_task);
+ /* Check for MBOX INTR and OEI INTR */
+ if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
goto irq_handled;
- }
/* Check for DMA INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
@@ -712,6 +729,7 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf;
oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index f208f3f9a447..df7cd39d9fce 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -200,5 +200,11 @@ struct octep_config {
/* ctrl mbox config */
struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+
+ /* Configured maximum heartbeat miss count */
+ u32 max_hb_miss_cnt;
+
+ /* Configured firmware heartbeat interval in secs */
+ u32 hb_interval;
};
#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
index 39322e4dd100..035ead7935c7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -24,41 +24,49 @@
/* Time in msecs to wait for message response */
#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
-#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m)
-#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8)
-#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24)
-#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144)
-
-#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
-#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m))
-#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4)
-#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8)
-#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12)
-
-#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \
- OCTEP_CTRL_MBOX_INFO_SZ + \
- OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
-#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m))
-#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4)
-#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8)
-#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12)
-
-#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \
- (sizeof(struct octep_ctrl_mbox_msg) * (i)))
-
-static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask)
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to fw queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox fw to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+
+#define OCTEP_CTRL_MBOX_TOTAL_INFO_SZ (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ)
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD(m) (OCTEP_CTRL_MBOX_H2FQ_INFO(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_SZ(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 8)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD(m) (OCTEP_CTRL_MBOX_F2HQ_INFO(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_SZ(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 8)
+
+static const u32 mbox_hdr_sz = sizeof(union octep_ctrl_mbox_msg_hdr);
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 inc, u32 sz)
{
- return (index + 1) & mask;
+ return (index + inc) % sz;
}
-static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask)
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 sz)
{
- return mask - ((pi - ci) & mask);
+ return sz - (abs(pi - ci) % sz);
}
-static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask)
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz)
{
- return ((pi - ci) & mask);
+ return (abs(pi - ci) % sz);
}
int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
@@ -73,153 +81,170 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
return -EINVAL;
}
- magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem));
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(mbox->barmem));
if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
return -EINVAL;
}
- status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem));
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem));
if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
return -EINVAL;
}
- mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem));
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem));
- writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
- mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem));
- mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem));
- mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1);
- mutex_init(&mbox->h2fq_lock);
+ mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem));
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ;
- mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem));
- mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem));
- mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1);
- mutex_init(&mbox->f2hq_lock);
-
- mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem);
- mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem);
- mbox->h2fq.hw_q = mbox->barmem +
- OCTEP_CTRL_MBOX_INFO_SZ +
- OCTEP_CTRL_MBOX_H2FQ_INFO_SZ +
- OCTEP_CTRL_MBOX_F2HQ_INFO_SZ;
-
- mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem);
- mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem);
- mbox->f2hq.hw_q = mbox->h2fq.hw_q +
- ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) *
- mbox->h2fq.elem_cnt);
+ mbox->f2hq.sz = readl(OCTEP_CTRL_MBOX_F2HQ_SZ(mbox->barmem));
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_TOTAL_INFO_SZ +
+ mbox->h2fq.sz;
/* ensure ready state is seen after everything is initialized */
wmb();
- writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
pr_info("Octep ctrl mbox : Init successful.\n");
return 0;
}
+static void
+octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough write space */
+ qbuf = (q->hw_q + *pi);
+ if (*pi < ci) {
+ /* copy entire w_sz */
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *pi), w_sz);
+ memcpy_toio(qbuf, buf, cp_sz);
+ w_sz -= cp_sz;
+ *pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz);
+ if (w_sz) {
+ /* roll over and copy remaining w_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *pi);
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ }
+ }
+}
+
int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
{
- unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS);
- unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS);
+ struct octep_ctrl_mbox_msg_buf *sg;
struct octep_ctrl_mbox_q *q;
- unsigned long expire;
- u64 *mbuf, *word0;
- u8 __iomem *qidx;
- u16 pi, ci;
- int i;
+ u32 pi, ci, buf_sz, w_sz;
+ int s;
if (!mbox || !msg)
return -EINVAL;
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+
+ mutex_lock(&mbox->h2fq_lock);
q = &mbox->h2fq;
pi = readl(q->hw_prod);
ci = readl(q->hw_cons);
- if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask))
- return -ENOMEM;
-
- qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi);
- mbuf = (u64 *)msg->msg;
- word0 = &msg->hdr.word0;
-
- mutex_lock(&mbox->h2fq_lock);
- for (i = 1; i <= msg->hdr.sizew; i++)
- writeq(*mbuf++, (qidx + (i * 8)));
-
- writeq(*word0, qidx);
+ if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) {
+ mutex_unlock(&mbox->h2fq_lock);
+ return -EAGAIN;
+ }
- pi = octep_ctrl_mbox_circq_inc(pi, q->mask);
+ octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ w_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz);
+ buf_sz -= w_sz;
+ }
writel(pi, q->hw_prod);
mutex_unlock(&mbox->h2fq_lock);
- /* don't check for notification response */
- if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
- return 0;
-
- expire = jiffies + timeout;
- while (true) {
- *word0 = readq(qidx);
- if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
- break;
- schedule_timeout_interruptible(period);
- if (signal_pending(current) || time_after(jiffies, expire)) {
- pr_info("octep_ctrl_mbox: Timed out\n");
- return -EBUSY;
+ return 0;
+}
+
+static void
+octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough read space */
+ qbuf = (q->hw_q + *ci);
+ if (*ci < pi) {
+ /* copy entire r_sz */
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *ci), r_sz);
+ memcpy_fromio(buf, qbuf, cp_sz);
+ r_sz -= cp_sz;
+ *ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz);
+ if (r_sz) {
+ /* roll over and copy remaining r_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *ci);
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
}
}
- mbuf = (u64 *)msg->msg;
- for (i = 1; i <= msg->hdr.sizew; i++)
- *mbuf++ = readq(qidx + (i * 8));
-
- return 0;
}
int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
{
+ struct octep_ctrl_mbox_msg_buf *sg;
+ u32 pi, ci, r_sz, buf_sz, q_depth;
struct octep_ctrl_mbox_q *q;
- u32 count, pi, ci;
- u8 __iomem *qidx;
- u64 *mbuf;
- int i;
+ int s;
- if (!mbox || !msg)
- return -EINVAL;
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+ mutex_lock(&mbox->f2hq_lock);
q = &mbox->f2hq;
pi = readl(q->hw_prod);
ci = readl(q->hw_cons);
- count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask);
- if (!count)
- return -EAGAIN;
-
- qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci);
- mbuf = (u64 *)msg->msg;
- mutex_lock(&mbox->f2hq_lock);
-
- msg->hdr.word0 = readq(qidx);
- for (i = 1; i <= msg->hdr.sizew; i++)
- *mbuf++ = readq(qidx + (i * 8));
+ q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz);
+ if (q_depth < mbox_hdr_sz) {
+ mutex_unlock(&mbox->f2hq_lock);
+ return -EAGAIN;
+ }
- ci = octep_ctrl_mbox_circq_inc(ci, q->mask);
+ octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ r_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz);
+ buf_sz -= r_sz;
+ }
writel(ci, q->hw_cons);
-
mutex_unlock(&mbox->f2hq_lock);
- if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req)
- return 0;
-
- mbox->process_req(mbox->user_ctx, msg);
- mbuf = (u64 *)msg->msg;
- for (i = 1; i <= msg->hdr.sizew; i++)
- writeq(*mbuf++, (qidx + (i * 8)));
-
- writeq(msg->hdr.word0, qidx);
-
return 0;
}
@@ -227,18 +252,17 @@ int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
{
if (!mbox)
return -EINVAL;
+ if (!mbox->barmem)
+ return -EINVAL;
- writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT,
- OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
/* ensure uninit state is written before uninitialization */
wmb();
mutex_destroy(&mbox->h2fq_lock);
mutex_destroy(&mbox->f2hq_lock);
- writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
- OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
-
pr_info("Octep ctrl mbox : Uninit successful.\n");
return 0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 2dc5753cfec6..9c4ff0fba6a0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -27,50 +27,39 @@
* |-------------------------------------------|
* |producer index (4 bytes) |
* |consumer index (4 bytes) |
- * |element size (4 bytes) |
- * |element count (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
* |===========================================|
* |Fw to Host Queue info (16 bytes) |
* |-------------------------------------------|
* |producer index (4 bytes) |
* |consumer index (4 bytes) |
- * |element size (4 bytes) |
- * |element count (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
* |===========================================|
- * |Host to Fw Queue |
+ * |Host to Fw Queue ((total size-288/2) bytes)|
* |-------------------------------------------|
- * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * | |
* |===========================================|
* |===========================================|
- * |Fw to Host Queue |
+ * |Fw to Host Queue ((total size-288/2) bytes)|
* |-------------------------------------------|
- * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * | |
* |===========================================|
*/
#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
-/* Size of mbox info in bytes */
-#define OCTEP_CTRL_MBOX_INFO_SZ 256
-/* Size of mbox host to target queue info in bytes */
-#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
-/* Size of mbox target to host queue info in bytes */
-#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
-/* Size of mbox queue in bytes */
-#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt))
-/* Size of mbox in bytes */
-#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \
- OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
- OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \
- OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \
- OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt))
-
/* Valid request message */
#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
/* Valid response message */
#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
/* Valid notification, no response required */
#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+/* Valid custom message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_CUSTOM BIT(3)
+
+#define OCTEP_CTRL_MBOX_MSG_DESC_MAX 4
enum octep_ctrl_mbox_status {
OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
@@ -81,31 +70,48 @@ enum octep_ctrl_mbox_status {
/* mbox message */
union octep_ctrl_mbox_msg_hdr {
- u64 word0;
+ u64 words[2];
struct {
+ /* must be 0 */
+ u16 reserved1:15;
+ /* vf_idx is valid if 1 */
+ u16 is_vf:1;
+ /* sender vf index 0-(n-1), 0 if (is_vf==0) */
+ u16 vf_idx;
+ /* total size of message excluding header */
+ u32 sz;
/* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
u32 flags;
- /* size of message in words excluding header */
- u32 sizew;
- };
+ /* identifier to match responses */
+ u16 msg_id;
+ u16 reserved2;
+ } s;
+};
+
+/* mbox message buffer */
+struct octep_ctrl_mbox_msg_buf {
+ u32 reserved1;
+ u16 reserved2;
+ /* size of buffer */
+ u16 sz;
+ /* pointer to message buffer */
+ void *msg;
};
/* mbox message */
struct octep_ctrl_mbox_msg {
/* mbox transaction header */
union octep_ctrl_mbox_msg_hdr hdr;
- /* pointer to message buffer */
- void *msg;
+ /* number of sg buffer's */
+ int sg_num;
+ /* message buffer's */
+ struct octep_ctrl_mbox_msg_buf sg_list[OCTEP_CTRL_MBOX_MSG_DESC_MAX];
};
/* Mbox queue */
struct octep_ctrl_mbox_q {
- /* q element size, should be aligned to unsigned long */
- u16 elem_sz;
- /* q element count, should be power of 2 */
- u16 elem_cnt;
- /* q mask */
- u16 mask;
+ /* size of queue buffer */
+ u32 sz;
/* producer address in bar mem */
u8 __iomem *hw_prod;
/* consumer address in bar mem */
@@ -115,16 +121,10 @@ struct octep_ctrl_mbox_q {
};
struct octep_ctrl_mbox {
- /* host driver version */
- u64 version;
/* size of bar memory */
u32 barmem_sz;
/* pointer to BAR memory */
u8 __iomem *barmem;
- /* user context for callback, can be null */
- void *user_ctx;
- /* callback handler for processing request, called from octep_ctrl_mbox_recv */
- int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg);
/* host-to-fw queue */
struct octep_ctrl_mbox_q h2fq;
/* fw-to-host queue */
@@ -146,6 +146,8 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
/* Send mbox message.
*
* @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
*
* return value: 0 on success, -errno on failure.
*/
@@ -154,6 +156,8 @@ int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_ms
/* Retrieve mbox message.
*
* @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
*
* return value: 0 on success, -errno on failure.
*/
@@ -161,7 +165,7 @@ int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_ms
/* Uninitialize control mbox.
*
- * @param ep: non-null pointer to struct octep_ctrl_mbox.
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
*
* return value: 0 on success, -errno on failure.
*/
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 7c00c896ab98..1cc6af2feb38 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -8,187 +8,328 @@
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/wait.h>
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
-int octep_get_link_status(struct octep_device *oct)
+static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr);
+static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
+static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
+static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
+static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static atomic_t ctrl_net_msg_id;
+
+static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf,
+ u16 sz, int vfid)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_net_h2f_resp *resp;
- struct octep_ctrl_mbox_msg msg = {};
- int err;
+ msg->hdr.s.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg->hdr.s.msg_id = atomic_inc_return(&ctrl_net_msg_id) &
+ GENMASK(sizeof(msg->hdr.s.msg_id) * BITS_PER_BYTE, 0);
+ msg->hdr.s.sz = req_hdr_sz + sz;
+ msg->sg_num = 1;
+ msg->sg_list[0].msg = buf;
+ msg->sg_list[0].sz = msg->hdr.s.sz;
+ if (vfid != OCTEP_CTRL_NET_INVALID_VFID) {
+ msg->hdr.s.is_vf = 1;
+ msg->hdr.s.vf_idx = vfid;
+ }
+}
+
+static int octep_send_mbox_req(struct octep_device *oct,
+ struct octep_ctrl_net_wait_data *d,
+ bool wait_for_response)
+{
+ int err, ret;
+
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_response)
+ return 0;
+
+ d->done = 0;
+ INIT_LIST_HEAD(&d->list);
+ list_add_tail(&d->list, &oct->ctrl_req_wait_list);
+ ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
+ (d->done != 0),
+ jiffies + msecs_to_jiffies(500));
+ list_del(&d->list);
+ if (ret == 0 || ret == 1)
+ return -EAGAIN;
+
+ /**
+ * (ret == 0) cond = false && timeout, return 0
+ * (ret < 0) interrupted by signal, return 0
+ * (ret == 1) cond = true && timeout, return 1
+ * (ret >= 1) cond = true && !timeout, return 1
+ */
+
+ if (d->data.resp.hdr.s.reply != OCTEP_CTRL_NET_REPLY_OK)
+ return -EAGAIN;
+
+ return 0;
+}
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
- req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+int octep_ctrl_net_init(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int ret;
+
+ init_waitqueue_head(&oct->ctrl_req_wait_q);
+ INIT_LIST_HEAD(&oct->ctrl_req_wait_list);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return ret;
+ }
+ oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz;
+
+ return 0;
+}
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
- msg.msg = &req;
- err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
- if (err)
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ int err;
+
+ init_send_req(&d.msg, (void *)req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
return err;
- resp = (struct octep_ctrl_net_h2f_resp *)&req;
- return resp->link.state;
+ return d.data.resp.link.state;
}
-void octep_set_link_status(struct octep_device *oct, bool up)
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
- req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
- req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
- msg.msg = &req;
- octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ return octep_send_mbox_req(oct, &d, wait_for_response);
}
-void octep_set_rx_state(struct octep_device *oct, bool up)
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
- req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
- req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
- msg.msg = &req;
- octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ return octep_send_mbox_req(oct, &d, wait_for_response);
}
-int octep_get_mac_addr(struct octep_device *oct, u8 *addr)
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_net_h2f_resp *resp;
- struct octep_ctrl_mbox_msg msg = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
- req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
-
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
- msg.msg = &req;
- err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
- if (err)
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
return err;
- resp = (struct octep_ctrl_net_h2f_resp *)&req;
- memcpy(addr, resp->mac.addr, ETH_ALEN);
+ memcpy(addr, d.data.resp.mac.addr, ETH_ALEN);
- return err;
+ return 0;
}
-int octep_set_mac_addr(struct octep_device *oct, u8 *addr)
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
-
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
- req.mac.cmd = OCTEP_CTRL_NET_CMD_SET;
- memcpy(&req.mac.addr, addr, ETH_ALEN);
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
- msg.msg = &req;
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req->mac.addr, addr, ETH_ALEN);
- return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ return octep_send_mbox_req(oct, &d, wait_for_response);
}
-int octep_set_mtu(struct octep_device *oct, int mtu)
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
- req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
- req.mtu.val = mtu;
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->mtu.val = mtu;
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW;
- msg.msg = &req;
-
- return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ return octep_send_mbox_req(oct, &d, wait_for_response);
}
-int octep_get_if_stats(struct octep_device *oct)
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats)
{
- void __iomem *iface_rx_stats;
- void __iomem *iface_tx_stats;
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ struct octep_ctrl_net_h2f_resp *resp;
int err;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
- req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
- req.get_stats.offset = oct->ctrl_mbox_ifstats_offset;
-
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW;
- msg.msg = &req;
- err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
- if (err)
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
return err;
- iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset;
- iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset +
- sizeof(struct octep_iface_rx_stats);
- memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats));
- memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats));
-
- return err;
+ resp = &d.data.resp;
+ memcpy(rx_stats, &resp->if_stats.rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy(tx_stats, &resp->if_stats.tx_stats, sizeof(struct octep_iface_tx_stats));
+ return 0;
}
-int octep_get_link_info(struct octep_device *oct)
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info)
{
- struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
- struct octep_ctrl_mbox_msg msg = {};
int err;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
- req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
-
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
- msg.msg = &req;
- err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
- if (err)
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
return err;
- resp = (struct octep_ctrl_net_h2f_resp *)&req;
- oct->link_info.supported_modes = resp->link_info.supported_modes;
- oct->link_info.advertised_modes = resp->link_info.advertised_modes;
- oct->link_info.autoneg = resp->link_info.autoneg;
- oct->link_info.pause = resp->link_info.pause;
- oct->link_info.speed = resp->link_info.speed;
+ resp = &d.data.resp;
+ link_info->supported_modes = resp->link_info.supported_modes;
+ link_info->advertised_modes = resp->link_info.advertised_modes;
+ link_info->autoneg = resp->link_info.autoneg;
+ link_info->pause = resp->link_info.pause;
+ link_info->speed = resp->link_info.speed;
+
+ return 0;
+}
+
+int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link_info.info.advertised_modes = link_info->advertised_modes;
+ req->link_info.info.autoneg = link_info->autoneg;
+ req->link_info.info.pause = link_info->pause;
+ req->link_info.info.speed = link_info->speed;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+static void process_mbox_resp(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) {
+ if (pos->msg.hdr.s.msg_id == msg->hdr.s.msg_id) {
+ memcpy(&pos->data.resp,
+ msg->sg_list[0].msg,
+ msg->hdr.s.sz);
+ pos->done = 1;
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
+ break;
+ }
+ }
+}
- return err;
+static int process_mbox_notify(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req *req;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ switch (req->hdr.s.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req->link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req->hdr.s.cmd);
+ break;
+ }
+
+ return 0;
}
-int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info)
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
{
- struct octep_ctrl_net_h2f_req req = {};
- struct octep_ctrl_mbox_msg msg = {};
+ static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
+ union octep_ctrl_net_max_data data = {0};
+ struct octep_ctrl_mbox_msg msg = {0};
+ int ret;
+
+ msg.hdr.s.sz = msg_sz;
+ msg.sg_num = 1;
+ msg.sg_list[0].sz = msg_sz;
+ msg.sg_list[0].msg = &data;
+ while (true) {
+ /* mbox will overwrite msg.hdr.s.sz so initialize it */
+ msg.hdr.s.sz = msg_sz;
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, (struct octep_ctrl_mbox_msg *)&msg);
+ if (ret < 0)
+ break;
+
+ if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ process_mbox_resp(oct, &msg);
+ else if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ process_mbox_notify(oct, &msg);
+ }
+}
+
+int octep_ctrl_net_uninit(struct octep_device *oct)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
+ pos->done = 1;
- req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
- req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
- req.link_info.info.advertised_modes = link_info->advertised_modes;
- req.link_info.info.autoneg = link_info->autoneg;
- req.link_info.info.pause = link_info->pause;
- req.link_info.info.speed = link_info->speed;
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
- msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
- msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
- msg.msg = &req;
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
- return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index f23b58381322..37880dd79116 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -7,6 +7,8 @@
#ifndef __OCTEP_CTRL_NET_H__
#define __OCTEP_CTRL_NET_H__
+#define OCTEP_CTRL_NET_INVALID_VFID (-1)
+
/* Supported commands */
enum octep_ctrl_net_cmd {
OCTEP_CTRL_NET_CMD_GET = 0,
@@ -45,15 +47,18 @@ enum octep_ctrl_net_f2h_cmd {
OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
};
-struct octep_ctrl_net_req_hdr {
- /* sender id */
- u16 sender;
- /* receiver id */
- u16 receiver;
- /* octep_ctrl_net_h2t_cmd */
- u16 cmd;
- /* reserved */
- u16 rsvd0;
+union octep_ctrl_net_req_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+ } s;
};
/* get/set mtu request */
@@ -72,12 +77,6 @@ struct octep_ctrl_net_h2f_req_cmd_mac {
u8 addr[ETH_ALEN];
};
-/* get if_stats, xstats, q_stats request */
-struct octep_ctrl_net_h2f_req_cmd_get_stats {
- /* offset into barmem where fw should copy over stats */
- u32 offset;
-};
-
/* get/set link state, rx state */
struct octep_ctrl_net_h2f_req_cmd_state {
/* enum octep_ctrl_net_cmd */
@@ -110,26 +109,28 @@ struct octep_ctrl_net_h2f_req_cmd_link_info {
/* Host to fw request data */
struct octep_ctrl_net_h2f_req {
- struct octep_ctrl_net_req_hdr hdr;
+ union octep_ctrl_net_req_hdr hdr;
union {
struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
struct octep_ctrl_net_h2f_req_cmd_mac mac;
- struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats;
struct octep_ctrl_net_h2f_req_cmd_state link;
struct octep_ctrl_net_h2f_req_cmd_state rx;
struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
};
} __packed;
-struct octep_ctrl_net_resp_hdr {
- /* sender id */
- u16 sender;
- /* receiver id */
- u16 receiver;
- /* octep_ctrl_net_h2t_cmd */
- u16 cmd;
- /* octep_ctrl_net_reply */
- u16 reply;
+union octep_ctrl_net_resp_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+ } s;
};
/* get mtu response */
@@ -144,6 +145,12 @@ struct octep_ctrl_net_h2f_resp_cmd_mac {
u8 addr[ETH_ALEN];
};
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_resp_cmd_get_stats {
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+};
+
/* get link state, rx state response */
struct octep_ctrl_net_h2f_resp_cmd_state {
/* enum octep_ctrl_net_state */
@@ -152,10 +159,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state {
/* Host to fw response data */
struct octep_ctrl_net_h2f_resp {
- struct octep_ctrl_net_resp_hdr hdr;
+ union octep_ctrl_net_resp_hdr hdr;
union {
struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_get_stats if_stats;
struct octep_ctrl_net_h2f_resp_cmd_state link;
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
@@ -170,7 +178,7 @@ struct octep_ctrl_net_f2h_req_cmd_state {
/* Fw to host request data */
struct octep_ctrl_net_f2h_req {
- struct octep_ctrl_net_req_hdr hdr;
+ union octep_ctrl_net_req_hdr hdr;
union {
struct octep_ctrl_net_f2h_req_cmd_state link;
};
@@ -178,122 +186,152 @@ struct octep_ctrl_net_f2h_req {
/* Fw to host response data */
struct octep_ctrl_net_f2h_resp {
- struct octep_ctrl_net_resp_hdr hdr;
+ union octep_ctrl_net_resp_hdr hdr;
};
-/* Size of host to fw octep_ctrl_mbox queue element */
-union octep_ctrl_net_h2f_data_sz {
+/* Max data size to be transferred over mbox */
+union octep_ctrl_net_max_data {
struct octep_ctrl_net_h2f_req h2f_req;
struct octep_ctrl_net_h2f_resp h2f_resp;
-};
-
-/* Size of fw to host octep_ctrl_mbox queue element */
-union octep_ctrl_net_f2h_data_sz {
struct octep_ctrl_net_f2h_req f2h_req;
struct octep_ctrl_net_f2h_resp f2h_resp;
};
-/* size of host to fw data in words */
-#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \
- (sizeof(unsigned long)))
-
-/* size of fw to host data in words */
-#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \
- (sizeof(unsigned long)))
-
-/* size in words of get/set mtu request */
-#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2
-/* size in words of get/set mac request */
-#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2
-/* size in words of get stats request */
-#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2
-/* size in words of get/set state request */
-#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2
-/* size in words of get/set link info request */
-#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4
-
-/* size in words of get mtu response */
-#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2
-/* size in words of set mtu response */
-#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1
-/* size in words of get mac response */
-#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2
-/* size in words of set mac response */
-#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1
-/* size in words of get state request */
-#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2
-/* size in words of set state request */
-#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1
-/* size in words of get link info request */
-#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4
-/* size in words of set link info request */
-#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1
+struct octep_ctrl_net_wait_data {
+ struct list_head list;
+ int done;
+ struct octep_ctrl_mbox_msg msg;
+ union {
+ struct octep_ctrl_net_h2f_req req;
+ struct octep_ctrl_net_h2f_resp resp;
+ } data;
+};
+
+/** Initialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
+ */
+int octep_ctrl_net_init(struct octep_device *oct);
/** Get link status from firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
*
* return value: link status 0=down, 1=up.
*/
-int octep_get_link_status(struct octep_device *oct);
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
/** Set link status in firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
* @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure
*/
-void octep_set_link_status(struct octep_device *oct, bool up);
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
/** Set rx state in firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
* @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
*/
-void octep_set_rx_state(struct octep_device *oct, bool up);
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
/** Get mac address from firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
* @param addr: non-null pointer to mac address.
*
* return value: 0 on success, -errno on failure.
*/
-int octep_get_mac_addr(struct octep_device *oct, u8 *addr);
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
/** Set mac address in firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
* @param addr: non-null pointer to mac address.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
*/
-int octep_set_mac_addr(struct octep_device *oct, u8 *addr);
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response);
/** Set mtu in firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
* @param mtu: mtu.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
*/
-int octep_set_mtu(struct octep_device *oct, int mtu);
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response);
/** Get interface statistics from firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
*
* return value: 0 on success, -errno on failure.
*/
-int octep_get_if_stats(struct octep_device *oct);
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats);
/** Get link info from firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
*
* return value: 0 on success, -errno on failure.
*/
-int octep_get_link_info(struct octep_device *oct);
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info);
/** Set link info in firmware.
*
* @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_link_info(struct octep_device *oct,
+ int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response);
+
+/** Poll for firmware messages and process them.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+
+/** Uninitialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
*/
-int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info);
+int octep_ctrl_net_uninit(struct octep_device *oct);
#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 87ef129b269a..7d0124b283da 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -150,9 +150,12 @@ octep_get_ethtool_stats(struct net_device *netdev,
rx_packets = 0;
rx_bytes = 0;
- octep_get_if_stats(oct);
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ iface_rx_stats,
+ iface_tx_stats);
for (q = 0; q < oct->num_oqs; q++) {
struct octep_iq *iq = oct->iq[q];
@@ -283,11 +286,11 @@ static int octep_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- octep_get_link_info(oct);
+ link_info = &oct->link_info;
+ octep_ctrl_net_get_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, link_info);
advertised_modes = oct->link_info.advertised_modes;
supported_modes = oct->link_info.supported_modes;
- link_info = &oct->link_info;
OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
@@ -439,7 +442,8 @@ static int octep_set_link_ksettings(struct net_device *netdev,
link_info_new.speed = cmd->base.speed;
link_info_new.autoneg = autoneg;
- err = octep_set_link_info(oct, &link_info_new);
+ err = octep_ctrl_net_set_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ &link_info_new, true);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 5a898fb88e37..e1853da280f9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@@ -18,6 +17,7 @@
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#define OCTEP_INTR_POLL_TIME_MSECS 100
struct workqueue_struct *octep_wq;
/* Supported Devices */
@@ -507,11 +507,11 @@ static int octep_open(struct net_device *netdev)
octep_napi_enable(oct);
oct->link_info.admin_up = 1;
- octep_set_rx_state(oct, true);
-
- ret = octep_get_link_status(oct);
- if (!ret)
- octep_set_link_status(oct, true);
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ oct->poll_non_ioq_intr = false;
/* Enable the input and output queues for this Octeon device */
oct->hw_ops.enable_io_queues(oct);
@@ -521,7 +521,7 @@ static int octep_open(struct net_device *netdev)
octep_oq_dbell_init(oct);
- ret = octep_get_link_status(oct);
+ ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID);
if (ret > 0)
octep_link_up(netdev);
@@ -551,14 +551,16 @@ static int octep_stop(struct net_device *netdev)
netdev_info(netdev, "Stopping the device ...\n");
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+
/* Stop Tx from stack */
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- octep_set_link_status(oct, false);
- octep_set_rx_state(oct, false);
-
oct->link_info.admin_up = 0;
oct->link_info.oper_up = 0;
@@ -573,6 +575,11 @@ static int octep_stop(struct net_device *netdev)
oct->hw_ops.reset_io_queues(oct);
octep_free_oqs(oct);
octep_free_iqs(oct);
+
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
netdev_info(netdev, "Device stopped !!\n");
return 0;
}
@@ -755,7 +762,12 @@ static void octep_get_stats64(struct net_device *netdev,
struct octep_device *oct = netdev_priv(netdev);
int q;
- octep_get_if_stats(oct);
+ if (netif_running(netdev))
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &oct->iface_rx_stats,
+ &oct->iface_tx_stats);
+
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
@@ -826,7 +838,8 @@ static int octep_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- err = octep_set_mac_addr(oct, addr->sa_data);
+ err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ addr->sa_data, true);
if (err)
return err;
@@ -846,7 +859,8 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
if (link_info->mtu == new_mtu)
return 0;
- err = octep_set_mtu(oct, new_mtu);
+ err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu,
+ true);
if (!err) {
oct->link_info.mtu = new_mtu;
netdev->mtu = new_mtu;
@@ -866,6 +880,59 @@ static const struct net_device_ops octep_netdev_ops = {
};
/**
+ * octep_intr_poll_task - work queue task to process non-ioq interrupts.
+ *
+ * @work: pointer to mbox work_struct
+ *
+ * Process non-ioq interrupts to handle control mailbox, pfvf mailbox.
+ **/
+static void octep_intr_poll_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ intr_poll_task.work);
+
+ if (!oct->poll_non_ioq_intr) {
+ dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n");
+ return;
+ }
+
+ oct->hw_ops.poll_non_ioq_interrupts(oct);
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+}
+
+/**
+ * octep_hb_timeout_task - work queue task to check firmware heartbeat.
+ *
+ * @work: pointer to hb work_struct
+ *
+ * Check for heartbeat miss count. Uninitialize oct device if miss count
+ * exceeds configured max heartbeat miss count.
+ *
+ **/
+static void octep_hb_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ hb_task.work);
+
+ int miss_cnt;
+
+ miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
+ if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ return;
+ }
+
+ dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n",
+ miss_cnt);
+ rtnl_lock();
+ if (netif_running(oct->netdev))
+ octep_stop(oct->netdev);
+ rtnl_unlock();
+}
+
+/**
* octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
*
* @work: pointer to ctrl mbox work_struct
@@ -876,34 +943,8 @@ static void octep_ctrl_mbox_task(struct work_struct *work)
{
struct octep_device *oct = container_of(work, struct octep_device,
ctrl_mbox_task);
- struct net_device *netdev = oct->netdev;
- struct octep_ctrl_net_f2h_req req = {};
- struct octep_ctrl_mbox_msg msg;
- int ret = 0;
-
- msg.msg = &req;
- while (true) {
- ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg);
- if (ret)
- break;
-
- switch (req.hdr.cmd) {
- case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
- if (netif_running(netdev)) {
- if (req.link.state) {
- dev_info(&oct->pdev->dev, "netif_carrier_on\n");
- netif_carrier_on(netdev);
- } else {
- dev_info(&oct->pdev->dev, "netif_carrier_off\n");
- netif_carrier_off(netdev);
- }
- }
- break;
- default:
- pr_info("Unknown mbox req : %u\n", req.hdr.cmd);
- break;
- }
- }
+
+ octep_ctrl_net_recv_fw_messages(oct);
}
static const char *octep_devid_to_str(struct octep_device *oct)
@@ -927,7 +968,6 @@ static const char *octep_devid_to_str(struct octep_device *oct)
*/
int octep_device_setup(struct octep_device *oct)
{
- struct octep_ctrl_mbox *ctrl_mbox;
struct pci_dev *pdev = oct->pdev;
int i, ret;
@@ -964,19 +1004,14 @@ int octep_device_setup(struct octep_device *oct)
oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
- /* Initialize control mbox */
- ctrl_mbox = &oct->ctrl_mbox;
- ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
- ret = octep_ctrl_mbox_init(ctrl_mbox);
- if (ret) {
- dev_err(&pdev->dev, "Failed to initialize control mbox\n");
- goto unsupported_dev;
- }
- oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
- ctrl_mbox->h2fq.elem_cnt,
- ctrl_mbox->f2hq.elem_sz,
- ctrl_mbox->f2hq.elem_cnt);
+ ret = octep_ctrl_net_init(oct);
+ if (ret)
+ return ret;
+ atomic_set(&oct->hb_miss_cnt, 0);
+ INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
return 0;
unsupported_dev:
@@ -1005,7 +1040,8 @@ static void octep_device_cleanup(struct octep_device *oct)
oct->mbox[i] = NULL;
}
- octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+ octep_ctrl_net_uninit(oct);
+ cancel_delayed_work_sync(&oct->hb_task);
oct->hw_ops.soft_reset(oct);
for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
@@ -1017,6 +1053,26 @@ static void octep_device_cleanup(struct octep_device *oct)
oct->conf = NULL;
}
+static bool get_fw_ready_status(struct pci_dev *pdev)
+{
+ u32 pos = 0;
+ u16 vsec_id;
+ u8 status;
+
+ while ((pos = pci_find_next_ext_capability(pdev, pos,
+ PCI_EXT_CAP_ID_VNDR))) {
+ pci_read_config_word(pdev, pos + 4, &vsec_id);
+#define FW_STATUS_VSEC_ID 0xA3
+ if (vsec_id != FW_STATUS_VSEC_ID)
+ continue;
+
+ pci_read_config_byte(pdev, (pos + 8), &status);
+ dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
+ return status;
+ }
+ return false;
+}
+
/**
* octep_probe() - Octeon PCI device probe handler.
*
@@ -1050,9 +1106,14 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_regions;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
+ if (!get_fw_ready_status(pdev)) {
+ dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n");
+ err = -EPROBE_DEFER;
+ goto err_alloc_netdev;
+ }
+
netdev = alloc_etherdev_mq(sizeof(struct octep_device),
OCTEP_MAX_QUEUES);
if (!netdev) {
@@ -1075,6 +1136,10 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
+ octep_dev->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
netdev->netdev_ops = &octep_netdev_ops;
octep_set_ethtool_ops(netdev);
@@ -1086,7 +1151,8 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->max_mtu = OCTEP_MAX_MTU;
netdev->mtu = OCTEP_DEFAULT_MTU;
- err = octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ octep_dev->mac_addr);
if (err) {
dev_err(&pdev->dev, "Failed to get mac address\n");
goto register_dev_err;
@@ -1106,7 +1172,6 @@ register_dev_err:
err_octep_config:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_regions:
err_dma_mask:
@@ -1136,10 +1201,11 @@ static void octep_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ oct->poll_non_ioq_intr = false;
+ cancel_delayed_work_sync(&oct->intr_poll_task);
octep_device_cleanup(oct);
pci_release_mem_regions(pdev);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 123ffc13754d..e0907a719133 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -73,6 +73,7 @@ struct octep_hw_ops {
void (*enable_interrupts)(struct octep_device *oct);
void (*disable_interrupts)(struct octep_device *oct);
+ bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
void (*enable_io_queues)(struct octep_device *oct);
void (*disable_io_queues)(struct octep_device *oct);
@@ -270,7 +271,22 @@ struct octep_device {
/* Work entry to handle ctrl mbox interrupt */
struct work_struct ctrl_mbox_task;
-
+ /* Wait queue for host to firmware requests */
+ wait_queue_head_t ctrl_req_wait_q;
+ /* List of objects waiting for h2f response */
+ struct list_head ctrl_req_wait_list;
+
+ /* Enable non-ioq interrupt polling */
+ bool poll_non_ioq_intr;
+ /* Work entry to poll non-ioq interrupts */
+ struct delayed_work intr_poll_task;
+
+ /* Firmware heartbeat timer */
+ struct timer_list hb_timer;
+ /* Firmware heartbeat miss count tracked by timer */
+ atomic_t hb_miss_cnt;
+ /* Task to reset device on heartbeat miss */
+ struct delayed_work hb_task;
};
static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index 3d5d39a52fe6..b25c3093dc7b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -364,4 +364,10 @@
/* Number of non-queue interrupts in CN93xx */
#define CN93_NUM_NON_IOQ_INTR 16
+
+/* bit 0 for control mbox interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 724df6398bbe..bd77152bb8d7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1231,6 +1231,14 @@ static inline void link_status_user_format(u64 lstat,
linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
+
+ if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
+ dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
+ linfo->lmac_type_id, cgx->cgx_id, lmac_id);
+ strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ return;
+ }
+
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 2898931d5260..9690ac01f02c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
*/
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
struct pci_dev *pdev, void *reg_base,
- int direction, int ndevs)
+ int direction, int ndevs, unsigned long *pf_bmap)
{
struct otx2_mbox_dev *mdev;
int devid, err;
@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
mbox->hwbase = hwbase[0];
for (devid = 0; devid < ndevs; devid++) {
+ if (!test_bit(devid, pf_bmap))
+ continue;
+
mdev = &mbox->dev[devid];
mdev->mbase = hwbase[devid];
mdev->hwbase = hwbase[devid];
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 5727d67e0259..6389ed83637d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
- int direction, int ndevs);
+ int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -245,9 +246,9 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
-M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \
- npc_get_secret_key_req, \
- npc_get_secret_key_rsp) \
+M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \
+ npc_get_field_hash_info_req, \
+ npc_get_field_hash_info_rsp) \
M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \
npc_get_field_status_req, \
npc_get_field_status_rsp) \
@@ -936,7 +937,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
- u64 prof;
+ struct nix_bandprof_s prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -944,7 +945,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
- u64 prof_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -1524,14 +1525,20 @@ struct npc_mcam_get_stats_rsp {
u8 stat_ena; /* enabled */
};
-struct npc_get_secret_key_req {
+struct npc_get_field_hash_info_req {
struct mbox_msghdr hdr;
u8 intf;
};
-struct npc_get_secret_key_rsp {
+struct npc_get_field_hash_info_rsp {
struct mbox_msghdr hdr;
u64 secret_key[3];
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
};
enum ptp_op {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
index f68a6a0e3aa4..c43f19dfbd74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -473,6 +473,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
for (reg_id = 0; reg_id < 4; reg_id++) {
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
mcs_reg_write(mcs, reg, mask[reg_id]);
}
@@ -480,6 +482,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
for (reg_id = 0; reg_id < 4; reg_id++) {
reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
mcs_reg_write(mcs, reg, mask[reg_id]);
}
@@ -494,6 +498,9 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
/* Flow entry */
flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(flow_id, mcs->rx.flow_ids.bmap);
+ __set_bit(flow_id, mcs->tx.flow_ids.bmap);
+
for (reg_id = 0; reg_id < 4; reg_id++) {
reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
@@ -504,6 +511,8 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
}
/* secy */
secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(secy_id, mcs->rx.secy.bmap);
+ __set_bit(secy_id, mcs->tx.secy.bmap);
/* Set validate frames to NULL and enable control port */
plcy = 0x7ull;
@@ -528,6 +537,7 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
/* Enable Flowid entry */
mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+
return 0;
}
@@ -926,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
mcs_add_intr_wq_entry(mcs, &event);
}
-static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
{
- struct mcs_intr_event event = { 0 };
- int i;
+ u64 val, reg;
+ int lmac;
- if (!(intr & MCS_BBE_INT_MASK))
+ if (!(intr & 0x6ULL))
return;
- event.mcs_id = mcs->mcs_id;
- event.pcifunc = mcs->pf_map[0];
+ if (intr & BIT_ULL(1))
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
+ else
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
+ val = mcs_reg_read(mcs, reg);
- for (i = 0; i < MCS_MAX_BBE_INT; i++) {
- if (!(intr & BIT_ULL(i)))
+ /* policy/data over flow occurred */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (!(val & BIT_ULL(lmac)))
continue;
-
- /* Lower nibble denotes data fifo overflow interrupts and
- * upper nibble indicates policy fifo overflow interrupts.
- */
- if (intr & 0xFULL)
- event.intr_mask = (dir == MCS_RX) ?
- MCS_BBE_RX_DFIFO_OVERFLOW_INT :
- MCS_BBE_TX_DFIFO_OVERFLOW_INT;
- else
- event.intr_mask = (dir == MCS_RX) ?
- MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
- MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
-
- /* Notify the lmac_id info which ran into BBE fatal error */
- event.lmac_id = i & 0x3ULL;
- mcs_add_intr_wq_entry(mcs, &event);
+ dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
}
}
-static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
{
- struct mcs_intr_event event = { 0 };
- int i;
+ int lmac;
- if (!(intr & MCS_PAB_INT_MASK))
+ if (!(intr & 0xFFFFFULL))
return;
- event.mcs_id = mcs->mcs_id;
- event.pcifunc = mcs->pf_map[0];
-
- for (i = 0; i < MCS_MAX_PAB_INT; i++) {
- if (!(intr & BIT_ULL(i)))
- continue;
-
- event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
- MCS_PAB_TX_CHAN_OVERFLOW_INT;
-
- /* Notify the lmac_id info which ran into PAB fatal error */
- event.lmac_id = i;
- mcs_add_intr_wq_entry(mcs, &event);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (intr & BIT_ULL(lmac))
+ dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
}
}
@@ -988,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
struct mcs *mcs = (struct mcs *)mcs_irq;
u64 intr, cpm_intr, bbe_intr, pab_intr;
- /* Disable and clear the interrupt */
+ /* Disable the interrupt */
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
- mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
/* Check which block has interrupt*/
intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
@@ -1037,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* BBE RX */
if (intr & MCS_BBE_RX_INT_ENA) {
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
- mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
/* Clear the interrupt */
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
@@ -1047,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* BBE TX */
if (intr & MCS_BBE_TX_INT_ENA) {
bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
- mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
/* Clear the interrupt */
mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
@@ -1057,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* PAB RX */
if (intr & MCS_PAB_RX_INT_ENA) {
pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
- mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
/* Clear the interrupt */
mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
@@ -1067,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
/* PAB TX */
if (intr & MCS_PAB_TX_INT_ENA) {
pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
- mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
/* Clear the interrupt */
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
}
- /* Enable the interrupt */
+ /* Clear and enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
return IRQ_HANDLED;
@@ -1156,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
return ret;
}
- ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
+ ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
mcs_ip_intr_handler, 0, "MCS_IP", mcs);
if (ret) {
dev_err(mcs->dev, "MCS IP irq registration failed\n");
@@ -1175,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
- mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
- mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
- mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
- mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
if (!mcs->tx_sa_active) {
@@ -1190,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
return ret;
free_irq:
- free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
+ free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
exit:
pci_free_irq_vectors(mcs->pdev);
mcs->num_vec = 0;
@@ -1325,8 +1317,11 @@ void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
{
u64 reg;
+ int id = lmac_id * 2;
- reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
+ mcs_reg_write(mcs, reg, (u64)mode);
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
mcs_reg_write(mcs, reg, (u64)mode);
}
@@ -1484,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 5; /* x2p clabration intf */
hw->mcs_blks = 1; /* MCS blocks */
+ hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
}
static struct mcs_ops cn10kb_mcs_ops = {
@@ -1492,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
.mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
};
static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1592,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)
/* Set MCS to external bypass */
mcs_set_external_bypass(mcs, true);
- free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
+ free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
index 64dc2b80e15d..0f89dcb76465 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -43,24 +43,15 @@
/* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT 1
-/* MCS Interrupt Vector Enumeration */
-enum mcs_int_vec_e {
- MCS_INT_VEC_MIL_RX_GBL = 0x0,
- MCS_INT_VEC_MIL_RX_LMACX = 0x1,
- MCS_INT_VEC_MIL_TX_LMACX = 0x5,
- MCS_INT_VEC_HIL_RX_GBL = 0x9,
- MCS_INT_VEC_HIL_RX_LMACX = 0xa,
- MCS_INT_VEC_HIL_TX_GBL = 0xe,
- MCS_INT_VEC_HIL_TX_LMACX = 0xf,
- MCS_INT_VEC_IP = 0x13,
- MCS_INT_VEC_CNT = 0x14,
-};
+/* MCS Interrupt Vector */
+#define MCS_CNF10KB_INT_VEC_IP 0x13
+#define MCS_CN10KB_INT_VEC_IP 0x53
#define MCS_MAX_BBE_INT 8ULL
#define MCS_BBE_INT_MASK 0xFFULL
-#define MCS_MAX_PAB_INT 4ULL
-#define MCS_PAB_INT_MASK 0xFULL
+#define MCS_MAX_PAB_INT 8ULL
+#define MCS_PAB_INT_MASK 0xFULL
#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
@@ -137,6 +128,7 @@ struct hwinfo {
u8 lmac_cnt;
u8 mcs_blks;
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+ u16 ip_vec;
};
struct mcs {
@@ -165,6 +157,8 @@ struct mcs_ops {
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
};
extern struct pci_driver mcs_driver;
@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
/* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void);
@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
/* Stats APIs */
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
index 7b6205414428..9f9b904ab2cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops = {
.mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
};
struct mcs_ops *cnf10kb_get_mac_ops(void)
@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 1; /* x2p clabration intf */
hw->mcs_blks = 7; /* MCS blocks */
+ hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
}
void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
mcs_add_intr_wq_entry(mcs, &event);
}
}
+
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
index c95a8b8f5eaf..f3ab01fc363c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -97,6 +97,7 @@
#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull)
#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
u64 offset; \
\
@@ -275,7 +276,10 @@
#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
-
+#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
+#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
+#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
+#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
u64 offset; \
\
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index eb25e458266c..dfd23580e3b8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -11,6 +11,7 @@
#include "mcs.h"
#include "rvu.h"
+#include "mcs_reg.h"
#include "lmac_common.h"
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
@@ -32,6 +33,42 @@ static struct _req_type __maybe_unused \
MBOX_UP_MCS_MESSAGES
#undef M
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
+{
+ struct mcs *mcs;
+ u64 cfg;
+ u8 port;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ /* When ptp is enabled, RPM appends 8B header for all
+ * RX packets. MCS PEX need to configure to skip 8B
+ * during packet parsing.
+ */
+
+ /* CNF10K-B */
+ if (rvu->mcs_blk_cnt > 1) {
+ mcs = mcs_get_pdata(rpm_id);
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+ if (ena)
+ cfg |= BIT_ULL(lmac_id);
+ else
+ cfg &= ~BIT_ULL(lmac_id);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
+ return;
+ }
+ /* CN10KB */
+ mcs = mcs_get_pdata(0);
+ port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
+ if (ena)
+ cfg |= BIT_ULL(0);
+ else
+ cfg &= ~BIT_ULL(0);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
+}
+
int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
struct mcs_set_lmac_mode *req,
struct msg_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 8683ce57ed3f..9f673bda9dbd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2282,7 +2282,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
}
static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
- int num, int type)
+ int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
@@ -2294,6 +2294,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
*/
if (type == TYPE_AFVF) {
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(0)) +
@@ -2315,6 +2318,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
* RVU_AF_PF_BAR4_ADDR register.
*/
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(region));
@@ -2343,20 +2349,41 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int err = -EINVAL, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
+ unsigned long *pf_bmap;
void **mbox_regions;
const char *name;
+ u64 cfg;
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
- if (!mbox_regions)
+ pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
+ if (!pf_bmap)
return -ENOMEM;
+ /* RVU VFs */
+ if (type == TYPE_AFVF)
+ bitmap_set(pf_bmap, 0, num);
+
+ if (type == TYPE_AFPF) {
+ /* Mark enabled PFs in bitmap */
+ for (i = 0; i < num; i++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
+ if (cfg & BIT_ULL(20))
+ set_bit(i, pf_bmap);
+ }
+ }
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2365,7 +2392,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2396,16 +2423,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
- reg_base, dir, num);
+ reg_base, dir, num, pf_bmap);
if (err)
goto exit;
err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
- reg_base, dir_up, num);
+ reg_base, dir_up, num, pf_bmap);
if (err)
goto exit;
for (i = 0; i < num; i++) {
+ if (!test_bit(i, pf_bmap))
+ continue;
+
mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_handler);
@@ -2414,8 +2444,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- kfree(mbox_regions);
- return 0;
+ goto free_regions;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2424,6 +2453,8 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_bitmap:
+ bitmap_free(pf_bmap);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index ef721caeac49..d655bf04a483 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -920,6 +920,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
/* CN10K MCS */
int rvu_mcs_init(struct rvu *rvu);
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 438b212fb54a..83b342fa8d75 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -773,6 +773,8 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
/* This flag is required to clean up CGX conf if app gets killed */
pfvf->hw_rx_tstamp_en = enable;
+ /* Inform MCS about 8B RX header */
+ rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 4ad9ff025c96..0e74c5a2231e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -60,13 +60,14 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
u64 iova, u64 *lmt_addr)
{
u64 pa, val, pf;
- int err;
+ int err = 0;
if (!iova) {
dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
return -EINVAL;
}
+ mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
pf = rvu_get_pf(pcifunc) & 0x1F;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
@@ -76,12 +77,13 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
if (err) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
- return err;
+ goto exit;
}
val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
if (val & ~0x1ULL) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
- return -EIO;
+ err = -EIO;
+ goto exit;
}
/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
* PA[11:0] = IOVA[11:0]
@@ -89,8 +91,9 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
pa &= GENMASK_ULL(39, 0);
*lmt_addr = (pa << 12) | (iova & 0xFFF);
-
- return 0;
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
}
static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 26cfa501f1a1..9533b1d92960 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -497,8 +497,9 @@ static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused
stats.octet_validated_cnt);
seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
stats.pkt_port_disabled_cnt);
- seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
- seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
+ stats.pkt_nosa_cnt);
seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
stats.pkt_nosaerror_cnt);
seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 006beb5cf98d..952319453701 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -13,11 +13,6 @@
#include "rvu_npc_fs.h"
#include "rvu_npc_hash.h"
-#define NPC_BYTESM GENMASK_ULL(19, 16)
-#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
-#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
-#define NPC_LDATA_EN BIT_ULL(7)
-
static const char * const npc_flow_names[] = {
[NPC_DMAC] = "dmac",
[NPC_SMAC] = "smac",
@@ -442,6 +437,7 @@ done:
static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
u8 lt, u64 cfg, u8 intf)
{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 hdr, key, nr_bytes, bit_offset;
u8 la_ltype, la_start;
@@ -490,8 +486,21 @@ do { \
NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
- NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
- NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ if (rvu->hw->cap.npc_hash_extract) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
+ else
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
+ else
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ } else {
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ }
+
NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
@@ -594,8 +603,7 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
*/
masked_cfg = cfg & NPC_EXACT_NIBBLE;
bitnr = NPC_EXACT_NIBBLE_START;
- for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
- NPC_EXACT_NIBBLE_START) {
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
index bdd65ce56a32..3f5c9042d10e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -9,6 +9,10 @@
#define __RVU_NPC_FS_H
#define IPV6_WORDS 4
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
void npc_update_entry(struct rvu *rvu, enum key_fields type,
struct mcam_entry *entry, u64 val_lo,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index 20ebb9c95c73..51209119f0f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -78,42 +78,43 @@ static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
return hash_out;
}
-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
- u64 *secret_key, u8 intf, u8 hash_idx)
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx)
{
u64 hash_key[3];
u64 data_padded[2];
u32 field_hash;
- hash_key[0] = secret_key[1] << 31;
- hash_key[0] |= secret_key[2];
- hash_key[1] = secret_key[1] >> 33;
- hash_key[1] |= secret_key[0] << 31;
- hash_key[2] = secret_key[0] >> 33;
+ hash_key[0] = rsp.secret_key[1] << 31;
+ hash_key[0] |= rsp.secret_key[2];
+ hash_key[1] = rsp.secret_key[1] >> 33;
+ hash_key[1] |= rsp.secret_key[0] << 31;
+ hash_key[2] = rsp.secret_key[0] >> 33;
- data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
- data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
+ data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
- field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
- field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
+ field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
+ field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
return field_hash;
}
-static u64 npc_update_use_hash(int lt, int ld)
+static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
+ u8 intf, int lid, int lt, int ld)
{
- u64 cfg = 0;
-
- switch (lt) {
- case NPC_LT_LC_IP6:
- /* Update use_hash(bit-20) and bytesm1 (bit-16:19)
- * in KEX_LD_CFG
- */
- cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
- ld ? 0x8 : 0x18,
- 0x1, 0x0, 0x10);
- break;
- }
+ u8 hdr, key;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* Update use_hash(bit-20) to 'true' and
+ * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ hdr, 0x1, 0x0, key);
return cfg;
}
@@ -132,12 +133,13 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
for (lt = 0; lt < NPC_MAX_LT; lt++) {
for (ld = 0; ld < NPC_MAX_LD; ld++) {
if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
- u64 cfg = npc_update_use_hash(lt, ld);
+ u64 cfg;
- hash_cnt++;
if (hash_cnt == NPC_MAX_HASH)
return;
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
/* Set updated KEX configuration */
SET_KEX_LD(intf, lid, lt, ld, cfg);
/* Set HASH configuration */
@@ -149,6 +151,8 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
mkex_hash->hash_mask[intf][ld][1]);
SET_KEX_LD_HASH_CTRL(intf, ld,
mkex_hash->hash_ctrl[intf][ld]);
+
+ hash_cnt++;
}
}
}
@@ -169,12 +173,13 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
for (lt = 0; lt < NPC_MAX_LT; lt++) {
for (ld = 0; ld < NPC_MAX_LD; ld++)
if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
- u64 cfg = npc_update_use_hash(lt, ld);
+ u64 cfg;
- hash_cnt++;
if (hash_cnt == NPC_MAX_HASH)
return;
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
/* Set updated KEX configuration */
SET_KEX_LD(intf, lid, lt, ld, cfg);
/* Set HASH configuration */
@@ -187,8 +192,6 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
SET_KEX_LD_HASH_CTRL(intf, ld,
mkex_hash->hash_ctrl[intf][ld]);
hash_cnt++;
- if (hash_cnt == NPC_MAX_HASH)
- return;
}
}
}
@@ -238,8 +241,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
struct flow_msg *omask)
{
struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
- struct npc_get_secret_key_req req;
- struct npc_get_secret_key_rsp rsp;
+ struct npc_get_field_hash_info_req req;
+ struct npc_get_field_hash_info_rsp rsp;
u64 ldata[2], cfg;
u32 field_hash;
u8 hash_idx;
@@ -250,7 +253,7 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
}
req.intf = intf;
- rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
+ rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
@@ -266,44 +269,45 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
* is hashed to 32 bit value.
*/
case NPC_LT_LC_IP6:
- if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ /* ld[0] == hash_idx[0] == Source IPv6
+ * ld[1] == hash_idx[1] == Destination IPv6
+ */
+ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
u32 src_ip[IPV6_WORDS];
be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
- ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
- ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
+ ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
field_hash = npc_field_hash_calc(ldata,
- mkex_hash,
- rsp.secret_key,
+ rsp,
intf,
hash_idx);
npc_update_entry(rvu, NPC_SIP_IPV6, entry,
- field_hash, 0, 32, 0, intf);
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
memcpy(&opkt->ip6src, &pkt->ip6src,
sizeof(pkt->ip6src));
memcpy(&omask->ip6src, &mask->ip6src,
sizeof(mask->ip6src));
- break;
- }
-
- if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
u32 dst_ip[IPV6_WORDS];
be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
- ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
- ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
field_hash = npc_field_hash_calc(ldata,
- mkex_hash,
- rsp.secret_key,
+ rsp,
intf,
hash_idx);
npc_update_entry(rvu, NPC_DIP_IPV6, entry,
- field_hash, 0, 32, 0, intf);
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
memcpy(&opkt->ip6dst, &pkt->ip6dst,
sizeof(pkt->ip6dst));
memcpy(&omask->ip6dst, &mask->ip6dst,
sizeof(mask->ip6dst));
}
+
break;
}
}
@@ -311,13 +315,13 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
}
}
-int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
- struct npc_get_secret_key_req *req,
- struct npc_get_secret_key_rsp *rsp)
+int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
+ struct npc_get_field_hash_info_req *req,
+ struct npc_get_field_hash_info_rsp *rsp)
{
u64 *secret_key = rsp->secret_key;
u8 intf = req->intf;
- int blkaddr;
+ int i, j, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -329,6 +333,19 @@ int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+ for (i = 0; i < NPC_MAX_HASH; i++) {
+ for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
+ rsp->hash_mask[NIX_INTF_RX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
+ rsp->hash_mask[NIX_INTF_TX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
+ }
+ }
+
+ for (i = 0; i < NPC_MAX_INTF; i++)
+ for (j = 0; j < NPC_MAX_HASH; j++)
+ rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
+
return 0;
}
@@ -1868,9 +1885,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
rvu->hw->table = table;
/* Read table size, ways and depth */
- table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
- table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
__func__, table->mem_table.ways, table->cam_table.depth);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
index 3efeb09c58de..a1c3d987b804 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -31,6 +31,12 @@
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+#define GET_KEX_LD_HASH_CTRL(intf, ld) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
+
+#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
+
#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
@@ -56,8 +62,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
struct flow_msg *omask);
void npc_config_secret_key(struct rvu *rvu, int blkaddr);
void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
- u64 *secret_key, u8 intf, u8 hash_idx);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx);
static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
.lid_lt_ld_hash_en = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 9ec5f38d38a8..a487a98eac88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -9,6 +9,7 @@
#include <net/macsec.h>
#include "otx2_common.h"
+#define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
@@ -149,11 +150,20 @@ static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
enum mcs_rsrc_type type, u16 hw_rsrc_id,
bool all)
{
+ struct mcs_clear_stats *clear_req;
struct mbox *mbox = &pfvf->mbox;
struct mcs_free_rsrc_req *req;
mutex_lock(&mbox->lock);
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req)
+ goto fail;
+
+ clear_req->id = hw_rsrc_id;
+ clear_req->type = type;
+ clear_req->dir = dir;
+
req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
if (!req)
goto fail;
@@ -237,8 +247,10 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
{
struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct macsec_secy *secy = rxsc->sw_secy;
struct mcs_flowid_entry_write_req *req;
struct mbox *mbox = &pfvf->mbox;
+ u64 mac_da;
int ret;
mutex_lock(&mbox->lock);
@@ -249,11 +261,16 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
goto fail;
}
+ mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
+ req->mask[0] = ~0ULL;
+ req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
+
req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
req->mask[1] = ~0ULL;
req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
- req->mask[0] = ~0ULL;
req->mask[2] = ~0ULL;
req->mask[3] = ~0ULL;
@@ -997,7 +1014,7 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
/* Check if sync is really needed */
if (secy->validate_frames == txsc->last_validate_frames &&
- secy->protect_frames == txsc->last_protect_frames)
+ secy->replay_protect == txsc->last_replay_protect)
return;
cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
@@ -1019,19 +1036,19 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
- if (txsc->last_protect_frames)
+ if (txsc->last_replay_protect)
rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
else
rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
- if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
else
rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
}
txsc->last_validate_frames = secy->validate_frames;
- txsc->last_protect_frames = secy->protect_frames;
+ txsc->last_replay_protect = secy->replay_protect;
}
static int cn10k_mdo_open(struct macsec_context *ctx)
@@ -1100,7 +1117,7 @@ static int cn10k_mdo_add_secy(struct macsec_context *ctx)
txsc->sw_secy = secy;
txsc->encoding_sa = secy->tx_sc.encoding_sa;
txsc->last_validate_frames = secy->validate_frames;
- txsc->last_protect_frames = secy->protect_frames;
+ txsc->last_replay_protect = secy->replay_protect;
list_add(&txsc->entry, &cfg->txsc_list);
@@ -1117,6 +1134,7 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
struct macsec_secy *secy = ctx->secy;
struct macsec_tx_sa *sw_tx_sa;
struct cn10k_mcs_txsc *txsc;
+ bool active;
u8 sa_num;
int err;
@@ -1124,15 +1142,19 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
if (!txsc)
return -ENOENT;
- txsc->encoding_sa = secy->tx_sc.encoding_sa;
-
- sa_num = txsc->encoding_sa;
- sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+ /* Encoding SA got changed */
+ if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+ active = sw_tx_sa ? sw_tx_sa->active : false;
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
+ }
if (netif_running(secy->netdev)) {
cn10k_mcs_sync_stats(pfvf, secy, txsc);
- err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
if (err)
return err;
}
@@ -1521,12 +1543,12 @@ static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
- if (secy->protect_frames)
+ if (secy->replay_protect)
rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
else
rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
- if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
else
rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 3d22cc6a2804..0c8fc66ade82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -335,11 +335,11 @@ struct otx2_flow_config {
#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u16 max_flows;
- u8 dmacflt_max_flows;
u32 *bmap_to_dmacindex;
unsigned long *dmacflt_bmap;
struct list_head flow_list;
+ u32 dmacflt_max_flows;
+ u16 max_flows;
};
struct otx2_tc_info {
@@ -389,7 +389,7 @@ struct cn10k_mcs_txsc {
struct cn10k_txsc_stats stats;
struct list_head entry;
enum macsec_validation_type last_validate_frames;
- bool last_protect_frames;
+ bool last_replay_protect;
u16 hw_secy_id_tx;
u16 hw_secy_id_rx;
u16 hw_flow_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 179433d0a54a..18284ad75157 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1835,13 +1835,22 @@ int otx2_open(struct net_device *netdev)
otx2_dmacflt_reinstall_flows(pf);
err = otx2_rxtx_enable(pf, true);
- if (err)
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+ * mcam entries are enabled to receive the packets. Hence disable the
+ * packet I/O.
+ */
+ if (err == EIO)
+ goto err_disable_rxtx;
+ else if (err)
goto err_tx_stop_queues;
otx2_do_set_rx_mode(pf);
return 0;
+err_disable_rxtx:
+ otx2_rxtx_enable(pf, false);
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -3073,8 +3082,6 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf);
}
- cn10k_mcs_free(pf);
-
#ifdef CONFIG_DCB
/* Disable PFC config */
if (pf->pfc_en) {
@@ -3088,6 +3095,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 044cc211424e..8392f63e433f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -544,7 +544,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
if (ntohs(flow_spec->etype) == ETH_P_IP) {
flow_spec->ip_flag = IPV4_FLAG_MORE;
- flow_mask->ip_flag = 0xff;
+ flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
flow_spec->next_header = IPPROTO_FRAGMENT;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index ab126f8706c7..53366dbfbf27 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -621,7 +621,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2vf_realloc_msix_vectors(vf);
if (err)
- goto err_mbox_destroy;
+ goto err_detach_rsrc;
err = otx2_set_real_num_queues(netdev, qcount, qcount);
if (err)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 87fff539d39d..d5691b6a2bc5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1586,7 +1586,7 @@ static struct platform_driver pxa168_eth_driver = {
.suspend = pxa168_eth_suspend,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(pxa168_eth_of_match),
+ .of_match_table = pxa168_eth_of_match,
},
};