aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c294
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h112
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h110
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h3915
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c73
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h104
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c261
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c979
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c675
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c259
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c173
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c419
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c70
33 files changed, 7649 insertions, 1365 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1a3455620b38..cc8ac36cf687 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
- rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fac6474ad694..544c96c8fe1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
mac_ops = cgx_dev->mac_ops;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
- cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 12521262164a..237ba2b56210 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e66109367487..47f5ed006a93 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -197,6 +197,11 @@ enum nix_scheduler {
#define SDP_CHANNELS 256
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 45706fd87120..a8b7b1c7a1d5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
- * struct lmac
+ * struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
* @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
* @name: lmac port name
*/
struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
- bool cmd_pend;
struct cgx *cgx;
+ u8 mcast_filters_count;
u8 lmac_id;
+ bool cmd_pend;
char *name;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index cedb2616c509..f5ec39de026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,9 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -162,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -259,7 +270,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
-M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -396,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@@ -494,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@@ -611,7 +664,12 @@ enum nix_af_status {
NIX_AF_INVAL_SSO_PF_FUNC = -420,
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
NIX_AF_ERR_RX_VTAG_INUSE = -422,
- NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
};
/* For NIX RX vtag action */
@@ -680,6 +738,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
union {
struct nix_cn10k_rq_ctx_s rq_mask;
@@ -687,6 +746,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -698,6 +758,7 @@ struct nix_cn10k_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -713,6 +774,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -720,6 +782,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -731,6 +794,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -913,6 +977,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
u16 mode;
};
@@ -971,6 +1036,31 @@ struct nix_hw_info {
u16 min_mtu;
};
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1228,6 +1318,22 @@ struct ptp_rsp {
u64 clk;
};
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 1e012e787260..243cf8070e77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -33,6 +33,10 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_IH_2_ETHER,
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CH_LEN_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -42,7 +46,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_ITAG,
+ NPC_LT_LB_PPPOE,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
@@ -50,6 +54,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -65,6 +70,7 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
@@ -145,8 +151,18 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
enum npc_pkind_type {
- NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+ NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
/* list of known and supported fields in packet header and
@@ -213,7 +229,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -233,13 +249,13 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- const struct npc_kpu_profile_cam *cam;
- const struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -425,7 +441,19 @@ struct nix_tx_action {
/* NPC MCAM reserved entry index per nixlf */
#define NIXLF_UCAST_ENTRY 0
#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[0];
+};
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -445,6 +473,15 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
@@ -459,6 +496,29 @@ struct npc_lt_def_ipsec {
u8 spi_nz;
};
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
+
struct npc_lt_def_cfg {
struct npc_lt_def rx_ol2;
struct npc_lt_def rx_oip4;
@@ -476,7 +536,41 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_oip4;
struct npc_lt_def pck_oip6;
struct npc_lt_def pck_iip4;
-};
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 2
+ /* KPU Profle Header */
+ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ __le64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
struct rvu_npc_mcam_rule {
struct flow_msg packet;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 5c372d2c24a1..fee655cc7523 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -11,7 +11,10 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100060000
+#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
+#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
+#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -20,6 +23,7 @@
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
#define NPC_ETYPE_MPLSU 0x8847
#define NPC_ETYPE_MPLSM 0x8848
#define NPC_ETYPE_ETAG 0x893f
@@ -33,6 +37,10 @@
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -142,14 +150,15 @@
#define NPC_DSA_EDSA 0x8000
#define NPC_DSA_FDSA 0xc000
-#define NPC_KEXOF_DMAC 8
-#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define NPC_KEXOF_DMAC 9
+#define MKEX_SIGN 0x19bbfdbd15f
#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_ERRCODE | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -170,25 +179,31 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_CUSTOM_L2_24B,
+ NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_ITAG,
NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_NGIO,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
- NPC_S_KPU3_ITAG,
NPC_S_KPU3_CTAG_C,
NPC_S_KPU3_STAG_C,
NPC_S_KPU3_QINQ_C,
NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -198,13 +213,19 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS,
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
NPC_S_KPU7_IP6_EXT,
NPC_S_KPU7_IP6_ROUT,
NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -265,7 +286,6 @@ enum npc_kpu_la_lflag {
NPC_F_LA_L_UNK_ETYPE = 1,
NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
- NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
NPC_F_LA_L_WITH_NSH,
};
@@ -442,7 +462,28 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static const struct npc_kpu_profile_action ikpu_action_entries[] = {
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -950,7 +991,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -958,8 +999,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -967,8 +1008,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 40, 54, 58, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -976,8 +1017,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 102, 106, 110, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -1021,7 +1062,9 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1080,6 +1123,15 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1123,7 +1175,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1132,7 +1184,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1141,7 +1193,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1150,7 +1202,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_NSH,
+ NPC_ETYPE_DSA,
0xffff,
0x0000,
0x0000,
@@ -1159,7 +1211,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_DSA,
+ NPC_ETYPE_PPPOE,
0xffff,
0x0000,
0x0000,
@@ -1294,15 +1346,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH_NIX, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1339,8 +1382,8 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W|NPC_IH_UTAG,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1349,7 +1392,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
NPC_IH_W,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1358,7 +1401,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
0x0000,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1501,15 +1544,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_HIGIG2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1645,7 +1679,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1654,7 +1688,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1663,7 +1697,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1672,6 +1706,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1680,7 +1840,88 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1689,6 +1930,150 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -1699,7 +2084,9 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -1783,6 +2170,24 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -2226,15 +2631,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_S_KPU2_ETAG, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ETAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
@@ -2313,159 +2709,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CTAG2, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -2817,6 +3060,15 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -2827,7 +3079,9 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3243,159 +3497,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU3_CTAG_C, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -3936,6 +4037,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3946,7 +4056,9 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4084,6 +4196,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
{
NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
0x0000,
NPC_DSA_FDSA,
0x0000,
@@ -4092,6 +4276,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4102,7 +4367,9 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4125,116 +4392,116 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_GRE,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP6,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_MPLS,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -4245,7 +4512,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4254,7 +4521,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4263,7 +4530,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4272,7 +4539,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4281,7 +4548,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4290,7 +4557,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4299,7 +4566,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4308,7 +4575,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4317,7 +4584,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4326,7 +4593,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4335,7 +4602,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4344,7 +4611,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4662,6 +4929,429 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4672,7 +5362,9 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -5007,6 +5699,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5017,7 +6033,9 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5226,6 +6244,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5236,7 +6353,9 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5259,8 +6378,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5268,8 +6387,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5277,8 +6396,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5286,8 +6405,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5565,7 +6684,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5574,7 +6693,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5583,7 +6702,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5592,7 +6711,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5637,7 +6756,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5646,7 +6765,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5655,7 +6774,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5664,7 +6783,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5709,7 +6828,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5718,7 +6837,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5727,7 +6846,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5736,7 +6855,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5781,7 +6900,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5790,7 +6909,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5799,7 +6918,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5808,7 +6927,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5853,7 +6972,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5862,7 +6981,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5871,7 +6990,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5880,7 +6999,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5916,7 +7035,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5925,7 +7044,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5934,7 +7053,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5943,7 +7062,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5977,7 +7096,9 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6387,8 +7508,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- 0x0000,
- 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
0x0000,
},
@@ -6448,7 +7569,9 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6613,7 +7736,9 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6922,13 +8047,15 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6936,8 +8063,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6945,8 +8072,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6954,8 +8081,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6963,8 +8090,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6972,8 +8099,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6981,8 +8108,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6990,8 +8117,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -7177,7 +8304,9 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7189,7 +8318,9 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7201,7 +8332,9 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7224,8 +8357,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7233,8 +8366,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7242,8 +8375,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7251,8 +8384,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7402,7 +8535,9 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7459,7 +8594,9 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu1_action_entries[] = {
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7511,6 +8648,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
+ NPC_S_KPU2_NGIO, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7518,7 +8663,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
+ 4, 8, 12, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7550,14 +8695,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -7590,6 +8727,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_8023,
@@ -7707,15 +8852,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 20, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
@@ -7788,7 +8924,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 16, 2, 0,
+ 4, 8, 12, 2, 0,
NPC_S_KPU4_FDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -7897,15 +9033,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 28, 1,
- NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
@@ -8031,15 +9158,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
@@ -8075,6 +9193,326 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -8084,7 +9522,9 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu2_action_entries[] = {
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8159,6 +9599,22 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -8170,7 +9626,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8178,7 +9634,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8186,7 +9642,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8194,7 +9650,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8202,7 +9658,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8210,7 +9666,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8218,7 +9674,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8226,7 +9682,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8234,7 +9690,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8242,7 +9698,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
0, 0, 0, 0,
},
{
@@ -8250,7 +9706,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8258,7 +9714,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG,
0, 0, 0, 0,
},
{
@@ -8266,7 +9722,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8274,7 +9730,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8282,7 +9738,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8290,7 +9746,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8298,7 +9754,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8306,7 +9762,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8314,7 +9770,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8322,7 +9778,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8330,7 +9786,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8338,7 +9794,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8346,7 +9802,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8354,7 +9810,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8546,15 +10002,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 16, 20, 24, 0, 0,
- NPC_S_KPU3_ITAG, 14, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8562,7 +10010,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG,
0, 0, 0, 0,
},
{
@@ -8570,7 +10018,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_QINQ, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ,
0, 0, 0, 0,
},
{
@@ -8578,7 +10026,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8586,7 +10034,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8594,7 +10042,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8602,7 +10050,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8610,7 +10058,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8618,7 +10066,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8632,142 +10080,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9078,6 +10390,14 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9087,11 +10407,13 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 0,
+ NPC_S_KPU5_IP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9099,7 +10421,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 0,
+ NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9107,7 +10429,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 0,
+ NPC_S_KPU5_ARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9115,7 +10437,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 0,
+ NPC_S_KPU5_RARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9123,7 +10445,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 0,
+ NPC_S_KPU5_PTP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9131,7 +10453,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9139,7 +10461,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9147,7 +10469,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9155,7 +10477,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 0,
+ NPC_S_KPU4_NSH, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9458,142 +10780,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
NPC_S_KPU5_IP, 4, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -10073,6 +11259,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10082,7 +11276,9 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu4_action_entries[] = {
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10205,6 +11401,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
@@ -10212,6 +11472,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10221,7 +11553,9 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu5_action_entries[] = {
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10719,6 +12053,382 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10728,7 +12438,9 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu6_action_entries[] = {
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11026,6 +12738,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11035,7 +13035,9 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu7_action_entries[] = {
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11221,6 +13223,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11230,7 +13320,9 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu8_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11889,7 +13981,9 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu9_action_entries[] = {
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12252,10 +14346,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_UNK,
+ 0,
0, 0, 0, 0,
},
{
@@ -12308,7 +14402,9 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu10_action_entries[] = {
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12455,7 +14551,9 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu11_action_entries[] = {
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12730,7 +14828,9 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu12_action_entries[] = {
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12957,7 +15057,9 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu13_action_entries[] = {
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12968,7 +15070,9 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu14_action_entries[] = {
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12979,7 +15083,9 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu15_action_entries[] = {
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13158,7 +15264,9 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu16_action_entries[] = {
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13209,7 +15317,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static const struct npc_kpu_profile npc_kpu_profiles[] = {
+static struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13308,12 +15416,22 @@ static const struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
-static const struct npc_lt_def_cfg npc_lt_defaults = {
+static struct npc_lt_def_cfg npc_lt_defaults = {
.rx_ol2 = {
.lid = NPC_LID_LA,
.ltype_match = NPC_LT_LA_ETHER,
.ltype_mask = 0x0F,
},
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
.rx_oip4 = {
.lid = NPC_LID_LC,
.ltype_match = NPC_LT_LC_IP,
@@ -13392,6 +15510,30 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LG_TU_IP,
.ltype_mask = 0x0F,
},
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
};
static struct npc_mcam_kex npc_mkex_default = {
@@ -13399,7 +15541,7 @@ static struct npc_mcam_kex npc_mkex_default = {
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
- /* nibble: LA..LE (ltype only) + channel */
+ /* nibble: LA..LE (ltype only) + Error code + Channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
@@ -13410,30 +15552,40 @@ static struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LA] = {
/* Layer A: Ethernet: */
[NPC_LT_LA_ETHER] = {
- /* DMAC: 6 bytes, KW1[47:0] */
+ /* DMAC: 6 bytes, KW1[55:8] */
KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* Outer VLAN: 2 bytes, KW0[63:48] */
- KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
},
[NPC_LT_LB_FDSA] = {
- /* SWITCH PORT: 1 byte, KW0[63:48] */
- KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
},
[NPC_LID_LC] = {
@@ -13477,6 +15629,13 @@ static struct npc_mcam_kex npc_mkex_default = {
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ab24a5e8ee8a..5fe277e354f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -57,6 +57,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -180,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
@@ -379,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
- *numvfs = (cfg >> 12) & 0xFF;
- *hwvf = cfg & 0xFFF;
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
@@ -1302,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr = BLKADDR_NIX0, vf;
@@ -1754,6 +1768,48 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -2279,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
@@ -2804,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (!vfs)
return 0;
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
/* Save VFs number for reference in VF interrupts handlers.
* Since interrupts might start arriving during SRIOV enablement
* ordinary API cannot be used to get number of enabled VFs.
@@ -2842,6 +2905,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2944,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ mutex_init(&rvu->rswitch.switch_lock);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c2cc4806d13c..91503fb2762c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -223,13 +223,17 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
- u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
- /* Broadcast pkt replication info */
+ /* Broadcast/Multicast/Promisc pkt replication info */
u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
struct rvu_npc_mcam_rule *def_ucast_rule;
@@ -239,8 +243,19 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
+ unsigned long flags;
};
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
+};
+
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -282,6 +297,13 @@ struct nix_txvlan {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
int blkaddr;
struct rvu *rvu;
@@ -291,6 +313,7 @@ struct nix_hw {
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
};
/* RVU block's capabilities or functionality,
@@ -308,6 +331,7 @@ struct hw_cap {
bool nix_rx_multicast; /* Rx packet replication support */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -386,10 +410,21 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ bool custom;
size_t pkinds;
size_t kpus;
};
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -420,6 +455,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -435,9 +471,13 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
/* NPC KPU data */
struct npc_kpu_profile_adapter kpu;
@@ -448,6 +488,9 @@ struct rvu {
struct rvu_debugfs rvu_dbg;
#endif
struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -543,11 +586,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
/* Function Prototypes
* RVU
*/
-static inline int is_afvf(u16 pcifunc)
+static inline bool is_afvf(u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
/* check if PF_FUNC is AF */
static inline bool is_pffunc_af(u16 pcifunc)
{
@@ -563,6 +611,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
@@ -603,6 +652,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
@@ -616,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -632,10 +689,23 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -646,13 +716,19 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti);
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
@@ -683,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@@ -696,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
@@ -703,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6e2bf4fcd29c..fe99ac4a4dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
unsigned long lmac_bmap;
int size, free_pkind;
int cgx, lmac, iter;
+ int numvfs, hwvfs;
if (!cgx_cnt_max)
return 0;
@@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
pf++;
}
}
@@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7d9e71c6965f..8d48b64485c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ u64 lmt_addr, val;
+ u32 pri_tbl_idx;
+ int err = 0;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ return err;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+}
+
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9bf8eaabf9ab..9b2dfbf90e51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
struct nix_hw *nix_hw;
@@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
@@ -1808,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1819,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -1866,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
}
@@ -1878,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -2063,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
- if (rule->intf == NIX_INTF_TX) {
+ if (is_npc_intf_tx(rule->intf)) {
switch (rule->tx_action.op) {
case NIX_TX_ACTIONOP_DROP:
seq_puts(s, "\taction: Drop\n");
@@ -2132,6 +2364,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
struct rvu *rvu = s->private;
struct npc_mcam *mcam;
int pf, vf = -1;
+ bool enabled;
int blkaddr;
u16 target;
u64 hits;
@@ -2173,7 +2406,9 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
}
rvu_dbg_npc_mcam_show_action(s, iter);
- seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
if (!iter->has_cntr)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 10a98bcb7c54..2688186066d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_nix_health_reporters_destroy(rvu_dl);
}
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req
static const struct devlink_ops rvu_devlink_ops = {
.info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
int rvu_register_dl(struct rvu *rvu)
@@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu)
struct devlink *dl;
int err;
- rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
- if (!rvu_dl)
- return -ENOMEM;
-
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
if (!dl) {
dev_warn(rvu->dev, "devlink_alloc failed\n");
- kfree(rvu_dl);
return -ENOMEM;
}
@@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu)
if (err) {
dev_err(rvu->dev, "devlink register failed with error %d\n", err);
devlink_free(dl);
- kfree(rvu_dl);
return err;
}
+ rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl;
@@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu)
rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
- kfree(rvu_dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0a8bd667cb11..4bfbbdf38770 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -21,6 +21,16 @@
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -132,6 +142,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
return 0;
}
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -170,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -272,9 +309,10 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, false);
+ pfvf->rx_chan_cnt);
break;
}
@@ -285,16 +323,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
-
+ /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
@@ -310,7 +349,7 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->minlen = 0;
/* Remove this PF_FUNC from bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
@@ -319,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
@@ -680,8 +722,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -721,6 +766,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -777,6 +827,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -789,6 +842,8 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -866,6 +921,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -1906,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
@@ -1994,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+
return 0;
}
@@ -2203,8 +2293,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.op = op;
aq_req.qidx = mce;
- /* Forward bcast pkts to RQ0, RSS not needed */
- aq_req.mce.op = 0;
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
aq_req.mce.index = 0;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
@@ -2222,8 +2312,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
-static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, bool add)
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -2234,6 +2324,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
if (mce->pcifunc == pcifunc && !add) {
delete = true;
break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
}
tail = mce;
}
@@ -2261,36 +2354,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
}
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
{
- int err = 0, idx, next_idx, last_idx;
- struct nix_mce_list *mce_list;
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
struct mce *mce;
- int blkaddr;
- /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
- if (is_afvf(pcifunc))
- return 0;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return 0;
-
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return 0;
-
- mcast = &nix_hw->mcast;
+ if (!mce_list)
+ return -EINVAL;
/* Get this PF/VF func's MCE index */
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
- idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
- mce_list = &pfvf->bcast_mce_list;
- if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ if (idx > (mce_idx + mce_list->max)) {
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
@@ -2298,20 +2378,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, add);
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count) {
- rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
goto end;
}
/* Dump the updated list to HW */
- idx = pfvf->bcast_mce_idx;
+ idx = mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
if (idx > last_idx)
@@ -2332,7 +2418,71 @@ end:
return err;
}
-static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+
+ /* skip multicast pkt replication for AF's VFs */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
int err, pf, numvfs, idx;
@@ -2355,11 +2505,18 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
if (pfvf->nix_blkaddr != nix_hw->blkaddr)
continue;
- /* Save the start MCE */
+ /* save start idx of broadcast mce list */
pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
-
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
@@ -2375,6 +2532,22 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
pcifunc, 0, true);
if (err)
return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
}
}
return 0;
@@ -2421,7 +2594,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
- return nix_setup_bcast_tables(rvu, nix_hw);
+ return nix_setup_mce_tables(rvu, nix_hw);
}
static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
@@ -3035,15 +3208,24 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
- /* VF can't overwrite admin(PF) changes */
- if (from_vf && pfvf->pf_set_vf_cfg)
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
return -EPERM;
+ }
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
return 0;
}
@@ -3067,30 +3249,75 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
- bool allmulti = false, disable_promisc = false;
+ bool allmulti, promisc, nix_rx_multicast;
u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
- if (req->mode & NIX_RX_MODE_PROMISC)
- allmulti = false;
- else if (req->mode & NIX_RX_MODE_ALLMULTI)
- allmulti = true;
- else
- disable_promisc = true;
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
- if (disable_promisc)
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- else
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
+
+ /* install/uninstall promisc entry */
+ if (promisc) {
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, allmulti);
+ pfvf->rx_chan_cnt);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ }
+
return 0;
}
@@ -3470,6 +3697,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
@@ -3523,6 +3754,40 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
ltdefs->rx_isctp.ltype_mask);
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
@@ -3584,10 +3849,11 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
kfree(txsch->schq.bmap);
}
+ nix_ipolicer_freemem(nix_hw);
+
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
- devm_kfree(rvu->dev, vlan->entry2pfvf_map);
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
@@ -3614,6 +3880,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3624,6 +3891,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
npc_mcam_enable_flows(rvu, pcifunc);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3631,6 +3903,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3639,6 +3912,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
@@ -3657,6 +3933,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -3681,6 +3959,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
}
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3789,3 +4069,586 @@ void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
if (from_vf)
ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bc4529691ec..52b255426c22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -19,7 +19,7 @@
#include "cgx.h"
#include "npc_profile.h"
-#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
@@ -27,6 +27,8 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
+
static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
@@ -212,8 +214,10 @@ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
*/
if (type == NIXLF_BCAST_ENTRY)
return index;
- else if (type == NIXLF_PROMISC_ENTRY)
+ else if (type == NIXLF_ALLMULTI_ENTRY)
return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
}
return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
@@ -411,37 +415,50 @@ static void npc_fill_entryword(struct mcam_entry *entry, int idx,
}
}
-static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index,
- struct mcam_entry *entry)
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
{
u16 owner, target_func;
struct rvu_pfvf *pfvf;
- int bank, nixlf;
u64 rx_action;
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
- /* return incase target is PF or LBK or rule owner is not PF */
- if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+ /* do nothing when target is LBK/PF or owner is not PF */
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
+ /* save entry2target_pffunc */
pfvf = rvu_get_pfvf(rvu, target_func);
mcam->entry2target_pffunc[index] = target_func;
- /* return if nixlf is not attached or initialized */
- if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
- return;
- /* get VF ucast entry rule */
- nix_get_nixlf(rvu, target_func, &nixlf, NULL);
- index = npc_get_nixlf_mcam_index(mcam, target_func,
- nixlf, NIXLF_UCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
- rx_action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
if (rx_action)
entry->action = rx_action;
}
@@ -452,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
{
int bank = npc_get_bank(mcam, index);
int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
@@ -472,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
*/
for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
/* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
- intf);
+ tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
- ~intf & 0x3);
+ tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -493,10 +521,9 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
- /* copy VF default entry action to the VF mcam entry */
+ /* PF installing VF rule */
if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
- entry);
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -635,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.op = action.op;
req.hdr.pcifunc = 0; /* AF is requester */
@@ -647,30 +675,32 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
- u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action;
u64 relaxed_mask;
- /* Only PF or AF VF can add a promiscuous entry */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- *(u64 *)&action = 0x00;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
@@ -678,19 +708,20 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
- blkaddr, ucast_idx);
+ blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
}
- if (allmulti) {
- mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
- ether_addr_copy(req.packet.dmac, mac_addr);
- ether_addr_copy(req.mask.dmac, mac_addr);
- req.features = BIT_ULL(NPC_DMAC);
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
}
req.chan_mask = 0xFFFU;
@@ -718,8 +749,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, bool enable)
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -728,25 +759,14 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- /* Only PF's have a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
- return;
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
-}
-
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
-}
-
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
@@ -756,8 +776,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, index;
- u32 req_index = 0;
- u8 op;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -770,7 +788,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
- if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
return;
/* Get 'pcifunc' of PF device */
@@ -784,27 +802,27 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
- op = NIX_RX_ACTIONOP_UCAST;
+ req.op = NIX_RX_ACTIONOP_UCAST;
} else {
- op = NIX_RX_ACTIONOP_MCAST;
- req_index = pfvf->bcast_mce_idx;
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
}
eth_broadcast_addr((u8 *)&req.packet.dmac);
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.entry = index;
- req.op = op;
req.hdr.pcifunc = 0; /* AF is requester */
req.vf = pcifunc;
- req.index = req_index;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -816,7 +834,104 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ struct rvu_pfvf *pfvf;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
+ }
+
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -858,6 +973,7 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@@ -913,7 +1029,8 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+ is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -923,12 +1040,47 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
}
}
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
- int index, bank, blkaddr;
+ int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -939,48 +1091,33 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, ena/dis promisc and bcast MCAM match entries.
- * For VFs add/delete from bcast list when RX multicast
- * feature is present.
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
*/
- if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
return;
- /* For bcast, enable/disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF/VF's nixlf is removed from bcast replication
- * list.
- */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
- nixlf, NIXLF_BCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
-
- /* VFs will not have BCAST entry */
- if (action.op != NIX_RX_ACTIONOP_MCAST &&
- !(pcifunc & RVU_PFVF_FUNC_MASK)) {
- npc_enable_mcam_entry(rvu, mcam,
- blkaddr, index, enable);
- } else {
- nix_update_bcast_mce_list(rvu, pcifunc, enable);
- /* Enable PF's BCAST entry for packet replication */
- rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
- }
-
- if (enable)
- rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
}
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
@@ -1000,7 +1137,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
/* Disable MCAM entries directing traffic to this 'pcifunc' */
list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
if (is_npc_intf_rx(rule->intf) &&
- rule->rx_action.pf_func == pcifunc) {
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
npc_enable_mcam_entry(rvu, mcam, blkaddr,
rule->entry, false);
rule->enable = false;
@@ -1134,6 +1272,30 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
+/* strtoull of "mkexprof" with base:36 */
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
@@ -1141,26 +1303,21 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
{
struct device *dev = &rvu->pdev->dev;
struct npc_mcam_kex *mcam_kex;
- void *mkex_prfl_addr = NULL;
- u64 prfl_addr, prfl_sz;
+ void __iomem *mkex_prfl_addr = NULL;
+ u64 prfl_sz;
+ int ret;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
goto program_mkex;
- if (!rvu->fwdata)
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
goto program_mkex;
- prfl_addr = rvu->fwdata->mcam_addr;
- prfl_sz = rvu->fwdata->mcam_sz;
- if (!prfl_addr || !prfl_sz)
- goto program_mkex;
-
- mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
- if (!mkex_prfl_addr)
- goto program_mkex;
-
- mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
@@ -1186,7 +1343,7 @@ program_mkex:
/* Program selected mkex profile */
npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- memunmap(mkex_prfl_addr);
+ iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -1263,6 +1420,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -1286,8 +1444,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -1300,6 +1462,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
{
+ profile->custom = 0;
profile->name = def_pfl_name;
profile->version = NPC_KPU_PROFILE_VER;
profile->ikpu = ikpu_action_entries;
@@ -1312,10 +1475,245 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
return 0;
}
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ for (entry = 0; entry < entries; entry++) {
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
static void npc_load_kpu_profile(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
npc_prepare_default_kpu(profile);
}
@@ -1323,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1341,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1361,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
int rsvd, err;
+ u16 index;
+ int cntr;
u64 cfg;
/* Actual number of MCAM entries vary by entry size */
@@ -1461,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->entry2target_pffunc)
goto free_mem;
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
mutex_init(&mcam->lock);
return 0;
@@ -1483,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
if (npc_const1 & BIT_ULL(63))
npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
- pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
hw->npc_intfs = npc_const & 0xFULL;
@@ -1594,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -1654,6 +2065,10 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -2149,8 +2564,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->free_count = 0;
/* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
+ }
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
@@ -2163,11 +2581,15 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
/* Since list of allocated indices needs to be sent to requester,
* max number of non-contiguous entries per mbox msg is limited.
*/
- if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
return NPC_MCAM_INVALID_REQ;
+ }
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_ALLOC_DENIED;
return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -2187,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* Free request from PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
@@ -2199,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
if (rc)
goto exit;
- mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -2284,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- pcifunc)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
@@ -2441,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* If the request is from a PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Since list of allocated counter IDs needs to be sent to requester,
@@ -2686,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (rc) {
/* Free allocated MCAM entry */
mutex_lock(&mcam->lock);
- mcam->entry2pfvf_map[entry] = 0;
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
npc_mcam_clear_bit(mcam, entry);
mutex_unlock(&mcam->lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 7f35b62eea13..5c01cf4a9c5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -123,11 +123,8 @@ static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
static bool npc_is_same(struct npc_key_field *input,
struct npc_key_field *field)
{
- int ret;
-
- ret = memcmp(&input->layer_mdata, &field->layer_mdata,
- sizeof(struct npc_layer_mdata));
- return ret == 0;
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
}
static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
@@ -913,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
+ struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
- u64 chan_mask;
- chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
- npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
- NIX_INTF_RX);
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
*(u64 *)&action = 0x00;
action.pf_func = target;
@@ -952,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
- 0, ~0ULL, 0, NIX_INTF_TX);
+ 0, mask, 0, NIX_INTF_TX);
*(u64 *)&action = 0x00;
action.op = req->op;
@@ -1005,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
req->intf);
if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
else
npc_update_tx_entry(rvu, pfvf, entry, req, target);
@@ -1103,11 +1110,18 @@ find_rule:
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
}
- if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
return 0;
}
@@ -1160,14 +1174,16 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (err)
return err;
- if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ /* Skip channel validation if AF is installing */
+ if (!is_pffunc_af(req->hdr.pcifunc) &&
+ npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, target);
/* PF installing for its VF */
if (req->hdr.pcifunc && !from_vf && req->vf)
- pfvf->pf_set_vf_cfg = 1;
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
@@ -1176,10 +1192,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
eth_broadcast_addr((u8 *)&req->mask.dmac);
}
+ /* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return -EINVAL;
- /* If interface is uninitialized then do not enable entry */
- if (err || (!req->default_rule && !pfvf->def_ucast_rule))
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
enable = false;
/* Packets reaching NPC in Tx path implies that a
@@ -1193,6 +1213,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (from_vf && !enable)
return -EINVAL;
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
/* If message is from VF then its flow should not overlap with
* reserved unicast flow.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index ac71c0f2f960..8b01ef6e2c99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -171,6 +176,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -181,6 +187,7 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
@@ -208,19 +215,27 @@
#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
#define NIX_AF_TCP_TIMER (0x01E0)
-#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
#define NIX_AF_RX_DEF_OUDP (0x0270)
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
@@ -682,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5e5f45c7eab0..5bbe6727d11d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
- BLK_COUNT = 0x12ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
@@ -286,7 +287,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
- NIX_AQ_CTYPE_BAND_PROF = 0x6,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -665,6 +666,89 @@ struct nix_rx_mce_s {
uint64_t next : 16;
};
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000000..820adf390b8e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index e6609068e81b..64aa7d350df1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -21,7 +21,7 @@ TRACE_EVENT(otx2_msg_alloc,
__field(u16, id)
__field(u64, size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->id = id;
__entry->size = size;
),
@@ -36,7 +36,7 @@ TRACE_EVENT(otx2_msg_send,
__field(u16, num_msgs)
__field(u64, msg_size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
),
@@ -52,7 +52,7 @@ TRACE_EVENT(otx2_msg_check,
__field(u16, rspid)
__field(int, rc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->reqid = reqid;
__entry->rspid = rspid;
__entry->rc = rc;
@@ -69,8 +69,8 @@ TRACE_EVENT(otx2_msg_interrupt,
__string(str, msg)
__field(u64, intr)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
- __assign_str(str, msg)
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __assign_str(str, msg);
__entry->intr = intr;
),
TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
@@ -84,7 +84,7 @@ TRACE_EVENT(otx2_msg_process,
__field(u16, id)
__field(int, err)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->id = id;
__entry->err = err;
),
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 457c94793e63..3254b02205ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 9ec0313f13fc..184de9466286 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
+ struct lmtst_tbl_setup_req *req;
+ int qcount, err;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- pf->hw.lmt_base = ioremap(base, size);
+ pfvf->hw_ops = &cn10k_hw_ops;
+ qcount = pfvf->hw.max_queues;
+ /* LMTST lines allocation
+ * qcount = num_online_cpus();
+ * NPA = TX + RX + XDP.
+ * NIX = TX * 32 (For Burst SQE flush).
+ */
+ pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+ pfvf->npa_lmt_lines = qcount * 3;
+ pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
-
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
+ req->use_local_lmt_region = true;
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
- }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
+ sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
- struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
@@ -179,3 +162,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
sq->head++;
sq->head &= (sq->sqe_cnt - 1);
}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e0bc595cbb78..1a1ae334477d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -12,6 +12,16 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index cf7875d51d87..70fcc1fd962f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}
@@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
aq->aura.nix0_bpid = pfvf->bpid[0];
/* Set backpressure level for RQ's Aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 45730d0d92f2..8fd58cd07f50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -180,6 +180,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
/* HW settings, coalescing etc */
u16 rx_chan_base;
@@ -217,12 +218,17 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
-#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
- void __iomem *lmt_base;
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
+};
+
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
@@ -230,6 +236,7 @@ struct otx2_vf_config {
u8 mac[ETH_ALEN];
u16 vlan;
int tx_vtag_idx;
+ bool trusted;
};
struct flr_work {
@@ -261,24 +268,29 @@ struct otx2_mac_table {
struct otx2_flow_config {
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
- u32 nr_flows;
-#define OTX2_MAX_NTUPLE_FLOWS 32
-#define OTX2_MAX_UNICAST_FLOWS 8
-#define OTX2_MAX_VLAN_FLOWS 1
-#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS
-#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
- u32 ntuple_offset;
- u32 unicast_offset;
- u32 rx_vlan_offset;
- u32 vf_vlan_offset;
-#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+ u16 ntuple_offset;
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u32 tc_flower_offset;
- u32 ntuple_max_flows;
- u32 tc_max_flows;
+ u16 tc_flower_offset;
+ u16 ntuple_max_flows;
+ u16 tc_max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@@ -319,6 +331,8 @@ struct otx2_nic {
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@@ -353,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
+ struct qmem *dync_lmt;
u16 tot_lmt_lines;
- u16 nix_lmt_lines;
+ u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@@ -362,6 +377,7 @@ struct otx2_nic {
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
+ unsigned long rq_bmap;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -822,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..383a6b5cb698
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..b906a0eb6e0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -286,21 +286,26 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
if (err)
- goto fail;
+ return err;
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
-fail:
if (if_up)
- dev->netdev_ops->ndo_open(dev);
+ err = dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -404,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->rqe_cnt = rx_count;
if (if_up)
- netdev->netdev_ops->ndo_open(netdev);
+ return netdev->netdev_ops->ndo_open(netdev);
return 0;
}
@@ -786,6 +791,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 0b4fa92ba821..4d9de525802d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,15 +18,133 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
};
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
+};
+
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->ntuple_max_flows = 0;
+ flow_cfg->tc_max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->ntuple_max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->ntuple_offset = 0;
+ flow_cfg->ntuple_max_flows = allocated;
+ flow_cfg->tc_max_flows = allocated;
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries for ntuple, got %d\n",
+ count, allocated);
+
+ return allocated;
+}
+
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
int vf_vlan_max_flows;
- int i;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
mutex_lock(&pfvf->mbox.lock);
@@ -36,9 +154,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
return -ENOMEM;
}
- vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
req->contig = false;
- req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+ req->count = count;
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox)) {
@@ -51,37 +168,36 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
- "Unable to allocate %d MCAM entries, got %d\n",
- req->count, rsp->count);
- /* support only ntuples here */
- flow_cfg->ntuple_max_flows = rsp->count;
- flow_cfg->ntuple_offset = 0;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- } else {
- flow_cfg->vf_vlan_offset = 0;
- flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
- vf_vlan_max_flows;
- flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
- flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
- OTX2_MAX_NTUPLE_FLOWS;
- flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
- pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- }
-
- for (i = 0; i < rsp->count; i++)
- flow_cfg->entry[i] = rsp->entry_list[i];
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
- pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
mutex_unlock(&pfvf->mbox.lock);
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
return 0;
}
@@ -96,18 +212,35 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
- pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
- pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
-
err = otx2_alloc_mcam_entries(pf);
if (err)
return err;
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -146,7 +279,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
ether_addr_copy(pf->mac_table[i].addr, mac);
pf->mac_table[i].inuse = true;
pf->mac_table[i].mcam_entry =
- flow_cfg->entry[i + flow_cfg->unicast_offset];
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
req->entry = pf->mac_table[i].mcam_entry;
break;
}
@@ -169,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -240,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->ntuple_max_flows;
+}
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -267,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -551,6 +700,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
req->features |= BIT_ULL(NPC_IPPROTO_AH);
else
req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
default:
break;
}
@@ -648,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -706,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -722,17 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow->location];
new = true;
}
/* struct copy */
@@ -741,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->ntuple_max_flows - 1);
+ err = -EINVAL;
+ } else {
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
if (new)
kfree(flow);
@@ -779,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -836,9 +1139,8 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
- req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow_cfg->ntuple_max_flows - 1];
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -905,7 +1207,7 @@ int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
req->intf = NIX_INTF_RX;
ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
eth_broadcast_addr((u8 *)&req->mask.dmac);
@@ -934,7 +1236,7 @@ static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -990,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 03004fdac0c6..2c24944a4dba 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -39,6 +39,8 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+static void otx2_vf_link_event_task(struct work_struct *work);
+
enum {
TYPE_PFAF,
TYPE_PFVF,
@@ -1108,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1459,6 +1466,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
@@ -1528,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
- (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+ (pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@@ -1639,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@@ -1648,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
err_free_cints:
otx2_free_cints(pf, qidx);
vec = pci_irq_vector(pf->pdev,
@@ -1675,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
struct otx2_rss_info *rss;
int qidx, vec, wrk;
+ /* If the DOWN flag is set resources are already freed */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@@ -1820,9 +1839,11 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
- else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
}
@@ -2044,7 +2065,7 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!netif_running(netdev))
return -EAGAIN;
- if (vf >= pci_num_vf(pdev))
+ if (vf >= pf->total_vfs)
return -EINVAL;
if (!is_valid_ether_addr(mac))
@@ -2055,7 +2076,8 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
ret = otx2_do_set_vf_mac(pf, vf, mac);
if (ret == 0)
- dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
return ret;
}
@@ -2104,7 +2126,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
if (err)
goto out;
@@ -2117,7 +2139,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
goto out;
@@ -2131,7 +2153,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->packet.vlan_tci = htons(vlan);
req->mask.vlan_tci = htons(VLAN_VID_MASK);
/* af fills the destination mac addr */
@@ -2182,7 +2204,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
eth_zero_addr((u8 *)&req->mask.dmac);
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->features = BIT_ULL(NPC_DMAC);
req->channel = pf->hw.tx_chan_base;
req->intf = NIX_INTF_TX;
@@ -2241,10 +2263,63 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
ivi->vf = vf;
ether_addr_copy(ivi->mac, config->mac);
ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
return 0;
}
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc)
+ pf->vf_configs[vf].trusted = !enable;
+ else
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ return rc;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -2261,6 +2336,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2315,6 +2391,40 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2430,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_pf_lmtst_init(pf);
+ err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@@ -2509,6 +2619,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mcam_flow_del;
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
@@ -2518,6 +2633,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
@@ -2527,8 +2644,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2576,7 +2693,7 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
- int ret, i;
+ int ret;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
@@ -2587,23 +2704,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
- pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
- GFP_KERNEL);
- if (!pf->vf_configs) {
- ret = -ENOMEM;
- goto free_intr;
- }
-
- for (i = 0; i < numvfs; i++) {
- pf->vf_configs[i].pf = pf;
- pf->vf_configs[i].intf_down = true;
- INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
- otx2_vf_link_event_task);
- }
-
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
- goto free_configs;
+ goto free_intr;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
@@ -2618,8 +2721,6 @@ free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
-free_configs:
- kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
@@ -2632,17 +2733,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
- int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
- cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
- kfree(pf->vf_configs);
-
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
@@ -2682,6 +2778,7 @@ static void otx2_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
@@ -2689,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
- if (pf->hw.lmt_base)
- iounmap(pf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 51157b283f6f..972b202b9884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -15,6 +15,7 @@
#include <net/tc_act/tc_vlan.h>
#include <net/ipv6.h>
+#include "cn10k.h"
#include "otx2_common.h"
/* Egress rate limiting definitions */
@@ -41,11 +42,14 @@ struct otx2_tc_flow_stats {
struct otx2_tc_flow {
struct rhash_head node;
unsigned long cookie;
- u16 entry;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
};
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
@@ -220,17 +224,76 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
- struct npc_install_flow_req *req)
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps = false;
+ u64 rate;
int i;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(nic->netdev, "no tc actions specified");
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
return -EINVAL;
}
@@ -247,8 +310,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
priv = netdev_priv(target);
/* npc_install_flow_req doesn't support passing a target pcifunc */
if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- netdev_info(nic->netdev,
- "can't redirect to other pf/vf\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -259,18 +322,55 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
default:
return -EOPNOTSUPP;
}
}
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
return 0;
}
-static int otx2_tc_prepare_flow(struct otx2_nic *nic,
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_msg *flow_spec = &req->packet;
struct flow_msg *flow_mask = &req->mask;
struct flow_dissector *dissector;
@@ -335,7 +435,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
- netdev_err(nic->netdev, "src mac match not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
return -EOPNOTSUPP;
}
@@ -353,11 +453,11 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_ip(rule, &match);
if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
match.mask->tos) {
- netdev_err(nic->netdev, "tos not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
return -EOPNOTSUPP;
}
if (match.mask->ttl) {
- netdev_err(nic->netdev, "ttl not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
return -EOPNOTSUPP;
}
flow_spec->tos = match.key->tos;
@@ -413,8 +513,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&match.key->src)) {
- netdev_err(nic->netdev,
- "Flow matching on IPv6 loopback addr is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
return -EOPNOTSUPP;
}
@@ -463,7 +563,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
- return otx2_tc_parse_actions(nic, &rule->action, req);
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
@@ -498,6 +598,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
+ int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -508,6 +609,27 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
@@ -524,14 +646,21 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
- struct npc_install_flow_req *req;
- int rc;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Not enough MCAM space to add the flow");
+ return -ENOMEM;
+ }
+
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -539,17 +668,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
- mutex_lock(&nic->mbox.lock);
- req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
- if (!req) {
- mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
- }
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
- rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
if (rc) {
- otx2_mbox_reset(&nic->mbox.mbox, 0);
- mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -560,18 +683,22 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
- if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
- netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
- otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto free_leaf;
}
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
- nic->flow_cfg->tc_max_flows - new_node->bitpos];
+ req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
+ nic->flow_cfg->tc_max_flows - new_node->bitpos];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -579,9 +706,10 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
- netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- goto out;
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
}
mutex_unlock(&nic->mbox.lock);
@@ -591,12 +719,35 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
- goto out;
+ goto free_leaf;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
-out:
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
return rc;
}
@@ -675,6 +826,87 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
}
}
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@@ -686,6 +918,8 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
default:
break;
}
@@ -775,6 +1009,9 @@ int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 52486c1f0973..2f144e2cf436 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
+ u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 085be90a03eb..a8bee5aefec1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -395,6 +395,42 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -432,12 +468,24 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -561,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_vf_lmtst_init(vf);
+ err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@@ -588,8 +636,6 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
-
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;
@@ -606,6 +652,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_detach_rsrc;
}
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2vf_set_ethtool_ops(netdev);
/* Enable pause frames by default */
@@ -614,9 +664,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -644,12 +696,12 @@ static void otx2vf_remove(struct pci_dev *pdev)
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
-
- if (vf->hw.lmt_base)
- iounmap(vf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);