diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
63 files changed, 10957 insertions, 1626 deletions
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index d14762d93640..62a97c46fba0 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -17,6 +17,8 @@ * warranty of any kind, whether express or implied. */ +#include <linux/acpi.h> +#include <linux/acpi_mdio.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -281,7 +283,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct orion_mdio_dev *dev; int i, ret; - type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev); + type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -369,7 +371,13 @@ static int orion_mdio_probe(struct platform_device *pdev) goto out_mdio; } - ret = of_mdiobus_register(bus, pdev->dev.of_node); + /* For the platforms not supporting DT/ACPI fall-back + * to mdiobus_register via of_mdiobus_register. + */ + if (is_acpi_node(pdev->dev.fwnode)) + ret = acpi_mdiobus_register(bus, pdev->dev.fwnode); + else + ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); goto out_mdio; @@ -421,12 +429,20 @@ static const struct of_device_id orion_mdio_match[] = { }; MODULE_DEVICE_TABLE(of, orion_mdio_match); +static const struct acpi_device_id orion_mdio_acpi_match[] = { + { "MRVL0100", BUS_TYPE_SMI }, + { "MRVL0101", BUS_TYPE_XSMI }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); + static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, .remove = orion_mdio_remove, .driver = { .name = "orion-mdio", .of_match_table = orion_mdio_match, + .acpi_match_table = ACPI_PTR(orion_mdio_acpi_match), }, }; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7d5cd9bc6c99..76a7777c746d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1805,18 +1805,14 @@ static void mvneta_rx_error(struct mvneta_port *pp, } /* Handle RX checksum offload based on the descriptor's status */ -static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, - struct sk_buff *skb) +static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) { if ((pp->dev->features & NETIF_F_RXCSUM) && (status & MVNETA_RXD_L3_IP4) && - (status & MVNETA_RXD_L4_CSUM_OK)) { - skb->csum = 0; - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } + (status & MVNETA_RXD_L4_CSUM_OK)) + return CHECKSUM_UNNECESSARY; - skb->ip_summed = CHECKSUM_NONE; + return CHECKSUM_NONE; } /* Return tx queue pointer (find last set bit) according to <cause> returned @@ -2303,24 +2299,24 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - - /* last fragment */ - if (len == *size) { - struct skb_shared_info *sinfo; - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = xdp_sinfo->nr_frags; - memcpy(sinfo->frags, xdp_sinfo->frags, - sinfo->nr_frags * sizeof(skb_frag_t)); - } } else { page_pool_put_full_page(rxq->page_pool, page, true); } + + /* last fragment */ + if (len == *size) { + struct skb_shared_info *sinfo; + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = xdp_sinfo->nr_frags; + memcpy(sinfo->frags, xdp_sinfo->frags, + sinfo->nr_frags * sizeof(skb_frag_t)); + } *size -= len; } static struct sk_buff * -mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, +mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); @@ -2331,11 +2327,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, if (!skb) return ERR_PTR(-ENOMEM); - page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); + skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); - mvneta_rx_csum(pp, desc_status, skb); + skb->ip_summed = mvneta_rx_csum(pp, desc_status); for (i = 0; i < num_frags; i++) { skb_frag_t *frag = &sinfo->frags[i]; @@ -2343,7 +2339,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), PAGE_SIZE); - page_pool_release_page(rxq->page_pool, skb_frag_page(frag)); + /* We don't need to reset pp_recycle here. It's already set, so + * just mark fragments for recycling. + */ + page_pool_store_mem_info(skb_frag_page(frag), pool); } return skb; @@ -2370,7 +2369,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); - rcu_read_lock(); xdp_prog = READ_ONCE(pp->xdp_prog); /* Fairness NAPI loop */ @@ -2425,7 +2423,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) goto next; - skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); + skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); if (IS_ERR(skb)) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); @@ -2448,7 +2446,6 @@ next: xdp_buf.data_hard_start = NULL; sinfo.nr_frags = 0; } - rcu_read_unlock(); if (xdp_buf.data_hard_start) mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); @@ -2532,7 +2529,7 @@ err_drop_frame: rx_bytes); skb->protocol = eth_type_trans(skb, dev); - mvneta_rx_csum(pp, rx_status, skb); + skb->ip_summed = mvneta_rx_csum(pp, rx_status); napi_gro_receive(napi, skb); rcvd_pkts++; @@ -2581,8 +2578,7 @@ err_drop_frame: skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); - - mvneta_rx_csum(pp, rx_status, skb); + skb->ip_summed = mvneta_rx_csum(pp, rx_status); napi_gro_receive(napi, skb); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 8edba5ea90f0..cf8acabb90ac 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format { #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 -#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */ +#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */ #define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */ #define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */ /* BM short pool packet size @@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format { #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) +/* Buffer header info bits */ +#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff +#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) +#define MVPP2_B_HDR_INFO_LAST_OFFS 12 +#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) +#define MVPP2_B_HDR_INFO_IS_LAST(info) \ + (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) + struct mvpp2_tai; /* Definitions */ @@ -1002,6 +1010,20 @@ struct mvpp2_rss_table { u32 indir[MVPP22_RSS_TABLE_ENTRIES]; }; +struct mvpp2_buff_hdr { + __le32 next_phys_addr; + __le32 next_dma_addr; + __le16 byte_count; + __le16 info; + __le16 reserved1; /* bm_qset (for future use, BM) */ + u8 next_phys_addr_high; + u8 next_dma_addr_high; + __le16 reserved2; + __le16 reserved3; + __le16 reserved4; + __le16 reserved5; +}; + /* Shared Packet Processor resources */ struct mvpp2 { /* Shared registers' base addresses */ @@ -1175,9 +1197,6 @@ struct mvpp2_port { /* Firmware node associated to the port */ struct fwnode_handle *fwnode; - /* Is a PHY always connected to the port */ - bool has_phy; - /* Per-port registers' base address */ void __iomem *base; void __iomem *stats_base; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ec706d614cac..3229bafa2a2c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3543,21 +3543,17 @@ static void mvpp2_rx_error(struct mvpp2_port *port, } /* Handle RX checksum offload */ -static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, - struct sk_buff *skb) +static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) { if (((status & MVPP2_RXD_L3_IP4) && !(status & MVPP2_RXD_IP4_HEADER_ERR)) || (status & MVPP2_RXD_L3_IP6)) if (((status & MVPP2_RXD_L4_UDP) || (status & MVPP2_RXD_L4_TCP)) && - (status & MVPP2_RXD_L4_CSUM_OK)) { - skb->csum = 0; - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } + (status & MVPP2_RXD_L4_CSUM_OK)) + return CHECKSUM_UNNECESSARY; - skb->ip_summed = CHECKSUM_NONE; + return CHECKSUM_NONE; } /* Allocate a new skb and add it to BM pool */ @@ -3784,9 +3780,9 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame, } static int -mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, - struct bpf_prog *prog, struct xdp_buff *xdp, - struct page_pool *pp, struct mvpp2_pcpu_stats *stats) +mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, + struct xdp_buff *xdp, struct page_pool *pp, + struct mvpp2_pcpu_stats *stats) { unsigned int len, sync, err; struct page *page; @@ -3839,6 +3835,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, return ret; } +static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, + int pool, u32 rx_status) +{ + phys_addr_t phys_addr, phys_addr_next; + dma_addr_t dma_addr, dma_addr_next; + struct mvpp2_buff_hdr *buff_hdr; + + phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + + do { + buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); + + phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); + dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); + + if (port->priv->hw_version >= MVPP22) { + phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); + dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); + } + + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + + phys_addr = phys_addr_next; + dma_addr = dma_addr_next; + + } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); +} + /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_todo, struct mvpp2_rx_queue *rxq) @@ -3852,8 +3877,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_done = 0; u32 xdp_ret = 0; - rcu_read_lock(); - xdp_prog = READ_ONCE(port->xdp_prog); /* Get number of received packets and clamp the to-do */ @@ -3871,28 +3894,24 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, phys_addr_t phys_addr; u32 rx_status, timestamp; int pool, rx_bytes, err, ret; + struct page *page; void *data; + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + page = virt_to_page(data); + prefetch(page); + rx_done++; rx_status = mvpp2_rxdesc_status_get(port, rx_desc); rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); rx_bytes -= MVPP2_MH_SIZE; dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); - phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); - data = (void *)phys_to_virt(phys_addr); pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool]; - /* In case of an error, release the requested buffer pointer - * to the Buffer Manager. This request process is controlled - * by the hardware, and the information about the buffer is - * comprised by the RX descriptor. - */ - if (rx_status & MVPP2_RXD_ERR_SUMMARY) - goto err_drop_frame; - if (port->priv->percpu_pools) { pp = port->priv->page_pool[pool]; dma_dir = page_pool_get_dma_dir(pp); @@ -3904,8 +3923,20 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, rx_bytes + MVPP2_MH_SIZE, dma_dir); + /* Buffer header not supported */ + if (rx_status & MVPP2_RXD_BUF_HDR) + goto err_drop_frame; + + /* In case of an error, release the requested buffer pointer + * to the Buffer Manager. This request process is controlled + * by the hardware, and the information about the buffer is + * comprised by the RX descriptor. + */ + if (rx_status & MVPP2_RXD_ERR_SUMMARY) + goto err_drop_frame; + /* Prefetch header */ - prefetch(data); + prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); if (bm_pool->frag_size > PAGE_SIZE) frag_size = 0; @@ -3925,7 +3956,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, rx_bytes, false); - ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); + ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); if (ret) { xdp_ret |= ret; @@ -3964,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, } if (pp) - page_pool_release_page(pp, virt_to_page(data)); + skb_mark_for_recycle(skb, page, pp); else dma_unmap_single_attrs(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE, @@ -3975,8 +4006,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); + skb->ip_summed = mvpp2_rx_csum(port, rx_status); skb->protocol = eth_type_trans(skb, dev); - mvpp2_rx_csum(port, rx_status, skb); napi_gro_receive(napi, skb); continue; @@ -3985,11 +4016,12 @@ err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + if (rx_status & MVPP2_RXD_BUF_HDR) + mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); + else + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } - rcu_read_unlock(); - if (xdp_ret & MVPP2_XDP_REDIR) xdp_do_flush_map(); @@ -4753,9 +4785,8 @@ static int mvpp2_open(struct net_device *dev) goto err_cleanup_txqs; } - /* Phylink isn't supported yet in ACPI mode */ - if (port->of_node) { - err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (port->phylink) { + err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); if (err) { netdev_err(port->dev, "could not attach PHY (%d)\n", err); @@ -6663,6 +6694,19 @@ static void mvpp2_acpi_start(struct mvpp2_port *port) SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); } +/* In order to ensure backward compatibility for ACPI, check if the port + * firmware node comprises the necessary description allowing to use phylink. + */ +static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) +{ + if (!is_acpi_node(port_fwnode)) + return false; + + return (!fwnode_property_present(port_fwnode, "phy-handle") && + !fwnode_property_present(port_fwnode, "managed") && + !fwnode_get_named_child_node(port_fwnode, "fixed-link")); +} + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct fwnode_handle *port_fwnode, @@ -6738,7 +6782,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, port = netdev_priv(dev); port->dev = dev; port->fwnode = port_fwnode; - port->has_phy = !!of_find_property(port_node, "phy", NULL); port->ntxqs = ntxqs; port->nrxqs = nrxqs; port->priv = priv; @@ -6881,8 +6924,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; dev->dev.of_node = port_node; - /* Phylink isn't used w/ ACPI as of now */ - if (port_node) { + if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; @@ -6894,6 +6936,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, } port->phylink = phylink; } else { + dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); port->phylink = NULL; } @@ -7311,7 +7354,6 @@ static int mvpp2_get_sram(struct platform_device *pdev, static int mvpp2_probe(struct platform_device *pdev) { - const struct acpi_device_id *acpi_id; struct fwnode_handle *fwnode = pdev->dev.fwnode; struct fwnode_handle *port_fwnode; struct mvpp2 *priv; @@ -7324,16 +7366,7 @@ static int mvpp2_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - if (has_acpi_companion(&pdev->dev)) { - acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, - &pdev->dev); - if (!acpi_id) - return -EINVAL; - priv->hw_version = (unsigned long)acpi_id->driver_data; - } else { - priv->hw_version = - (unsigned long)of_device_get_match_data(&pdev->dev); - } + priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev); /* multi queue mode isn't supported on PPV2.1, fallback to single * mode @@ -7351,6 +7384,10 @@ static int mvpp2_probe(struct platform_device *pdev) return PTR_ERR(priv->lms_base); } else { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } if (has_acpi_companion(&pdev->dev)) { /* In case the MDIO memory region is declared in * the ACPI, it can already appear as 'in-use' @@ -7445,34 +7482,35 @@ static int mvpp2_probe(struct platform_device *pdev) if (err < 0) goto err_gop_clk; - priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk"); if (IS_ERR(priv->mg_core_clk)) { - priv->mg_core_clk = NULL; - } else { - err = clk_prepare_enable(priv->mg_core_clk); - if (err < 0) - goto err_mg_clk; + err = PTR_ERR(priv->mg_core_clk); + goto err_mg_clk; } + + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; } - priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); + priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk"); if (IS_ERR(priv->axi_clk)) { err = PTR_ERR(priv->axi_clk); - if (err == -EPROBE_DEFER) - goto err_mg_core_clk; - priv->axi_clk = NULL; - } else { - err = clk_prepare_enable(priv->axi_clk); - if (err < 0) - goto err_mg_core_clk; + goto err_mg_core_clk; } + err = clk_prepare_enable(priv->axi_clk); + if (err < 0) + goto err_mg_core_clk; + /* Get system's tclk rate */ priv->tclk = clk_get_rate(priv->pp_clk); - } else if (device_property_read_u32(&pdev->dev, "clock-frequency", - &priv->tclk)) { - dev_err(&pdev->dev, "missing clock-frequency value\n"); - return -EINVAL; + } else { + err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk); + if (err) { + dev_err(&pdev->dev, "missing clock-frequency value\n"); + return err; + } } if (priv->hw_version >= MVPP22) { @@ -7552,6 +7590,8 @@ static int mvpp2_probe(struct platform_device *pdev) return 0; err_port_probe: + fwnode_handle_put(port_fwnode); + i = 0; fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) @@ -7560,13 +7600,10 @@ err_port_probe: } err_axi_clk: clk_disable_unprepare(priv->axi_clk); - err_mg_core_clk: - if (priv->hw_version >= MVPP22) - clk_disable_unprepare(priv->mg_core_clk); + clk_disable_unprepare(priv->mg_core_clk); err_mg_clk: - if (priv->hw_version >= MVPP22) - clk_disable_unprepare(priv->mg_clk); + clk_disable_unprepare(priv->mg_clk); err_gop_clk: clk_disable_unprepare(priv->gop_clk); err_pp_clk: diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 7cc7d72d761e..93575800ca92 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -394,9 +394,6 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, if (start > end) swap(start, end); - if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) - end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; - for (tid = start; tid <= end; tid++) { if (!priv->prs_shadow[tid].valid) return tid; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 1a3455620b38..cc8ac36cf687 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad694..544c96c8fe1d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id) return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr) return mac; } +static void cfg2mac(u64 cfg, u8 *mac_addr) +{ + int i, index = 0; + + for (i = ETH_ALEN - 1; i >= 0; i--, index++) + mac_addr[i] = (cfg >> (8 * index)) & 0xFF; +} + int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = mac2u64 (mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = mac2u64 (mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + cfg2mac(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); + u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + int index, i; u64 cfg = 0; + int id; if (!cgx) return; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } @@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + return err; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); @@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx) /* Add reference */ cgx->lmac_idmap[lmac->lmac_id] = lmac; - cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); set_bit(lmac->lmac_id, &cgx->lmac_bmap); + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); } return cgx_lmac_verify_fwi_version(cgx); @@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 12521262164a..237ba2b56210 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -23,6 +23,7 @@ #define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define MAX_DMAC_ENTRIES_PER_CGX 32 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) @@ -46,10 +47,12 @@ #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 @@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index); +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); @@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id); +u64 cgx_read_dmac_entry(void *cgxd, int index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index e66109367487..47f5ed006a93 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -197,6 +197,11 @@ enum nix_scheduler { #define SDP_CHANNELS 256 +/* The mask is to extract lower 10-bits of channel number + * which CPT will pass to X2P. + */ +#define NIX_CHAN_CPT_X2P_MASK (0x3ffull) + /* NIX LSO format indices. * As of now TSO is the only one using, so statically assigning indices. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 45706fd87120..a8b7b1c7a1d5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -10,17 +10,19 @@ #include "rvu.h" #include "cgx.h" /** - * struct lmac + * struct lmac - per lmac locks and properties * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @cmd_lock: Lock to serialize the command interface * @resp: command response * @link_info: link related information + * @mac_to_index_bmap: Mac address to CGX table index mapping * @event_cb: callback for linkchange events * @event_cb_lock: lock for serializing callback with unregister - * @cmd_pend: flag set before new command is started - * flag cleared after command response is received * @cgx: parent cgx port + * @mcast_filters_count: Number of multicast filters installed * @lmac_id: lmac port id + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received * @name: lmac port name */ struct lmac { @@ -29,12 +31,14 @@ struct lmac { struct mutex cmd_lock; u64 resp; struct cgx_link_user_info link_info; + struct rsrc_bmap mac_to_index_bmap; struct cgx_event_cb event_cb; /* lock for serializing callback with unregister */ spinlock_t event_cb_lock; - bool cmd_pend; struct cgx *cgx; + u8 mcast_filters_count; u8 lmac_id; + bool cmd_pend; char *name; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index cedb2616c509..f5ec39de026a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -134,6 +134,9 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ + msg_rsp) \ +M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -162,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ - /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ + msg_rsp) \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ + cgx_max_dmac_entries_get_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ + msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -259,7 +270,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ -M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info) +M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ +M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \ + nix_bandprof_alloc_rsp) \ +M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ + msg_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ @@ -396,6 +411,38 @@ struct cgx_mac_addr_set_or_get { u8 mac_addr[ETH_ALEN]; }; +/* Structure for requesting the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure for response against the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_rsp { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for requesting the operation to + * delete DMAC filter entry from CGX interface + */ +struct cgx_mac_addr_del_req { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for response against the operation to + * get maximum supported DMAC filter entries + */ +struct cgx_max_dmac_entries_get_rsp { + struct mbox_msghdr hdr; + u8 max_dmac_filters; +}; + struct cgx_link_user_info { uint64_t link_up:1; uint64_t full_duplex:1; @@ -494,6 +541,12 @@ struct cgx_set_link_mode_rsp { int status; }; +struct cgx_mac_addr_update_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; + u8 index; +}; + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) @@ -611,7 +664,12 @@ enum nix_af_status { NIX_AF_INVAL_SSO_PF_FUNC = -420, NIX_AF_ERR_TX_VTAG_NOSPC = -421, NIX_AF_ERR_RX_VTAG_INUSE = -422, - NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423, + NIX_AF_ERR_PTP_CONFIG_FAIL = -423, + NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424, + NIX_AF_ERR_INVALID_NIXBLK = -425, + NIX_AF_ERR_INVALID_BANDPROF = -426, + NIX_AF_ERR_IPOLICER_NOTSUPP = -427, + NIX_AF_ERR_BANDPROF_INVAL_REQ = -428, }; /* For NIX RX vtag action */ @@ -680,6 +738,7 @@ struct nix_cn10k_aq_enq_req { struct nix_cq_ctx_s cq; struct nix_rsse_s rss; struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; }; union { struct nix_cn10k_rq_ctx_s rq_mask; @@ -687,6 +746,7 @@ struct nix_cn10k_aq_enq_req { struct nix_cq_ctx_s cq_mask; struct nix_rsse_s rss_mask; struct nix_rx_mce_s mce_mask; + struct nix_bandprof_s prof_mask; }; }; @@ -698,6 +758,7 @@ struct nix_cn10k_aq_enq_rsp { struct nix_cq_ctx_s cq; struct nix_rsse_s rss; struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; }; }; @@ -713,6 +774,7 @@ struct nix_aq_enq_req { struct nix_cq_ctx_s cq; struct nix_rsse_s rss; struct nix_rx_mce_s mce; + u64 prof; }; union { struct nix_rq_ctx_s rq_mask; @@ -720,6 +782,7 @@ struct nix_aq_enq_req { struct nix_cq_ctx_s cq_mask; struct nix_rsse_s rss_mask; struct nix_rx_mce_s mce_mask; + u64 prof_mask; }; }; @@ -731,6 +794,7 @@ struct nix_aq_enq_rsp { struct nix_cq_ctx_s cq; struct nix_rsse_s rss; struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; }; }; @@ -913,6 +977,7 @@ struct nix_rx_mode { #define NIX_RX_MODE_UCAST BIT(0) #define NIX_RX_MODE_PROMISC BIT(1) #define NIX_RX_MODE_ALLMULTI BIT(2) +#define NIX_RX_MODE_USE_MCE BIT(3) u16 mode; }; @@ -971,6 +1036,31 @@ struct nix_hw_info { u16 min_mtu; }; +struct nix_bandprof_alloc_req { + struct mbox_msghdr hdr; + /* Count of profiles needed per layer */ + u16 prof_count[BAND_PROF_NUM_LAYERS]; +}; + +struct nix_bandprof_alloc_rsp { + struct mbox_msghdr hdr; + u16 prof_count[BAND_PROF_NUM_LAYERS]; + + /* There is no need to allocate morethan 1 bandwidth profile + * per RQ of a PF_FUNC's NIXLF. So limit the maximum + * profiles to 64 per PF_FUNC. + */ +#define MAX_BANDPROF_PER_PFFUNC 64 + u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC]; +}; + +struct nix_bandprof_free_req { + struct mbox_msghdr hdr; + u8 free_all; + u16 prof_count[BAND_PROF_NUM_LAYERS]; + u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC]; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1228,6 +1318,22 @@ struct ptp_rsp { u64 clk; }; +struct set_vf_perm { + struct mbox_msghdr hdr; + u16 vf; +#define RESET_VF_PERM BIT_ULL(0) +#define VF_TRUSTED BIT_ULL(1) + u64 flags; +}; + +struct lmtst_tbl_setup_req { + struct mbox_msghdr hdr; + u16 base_pcifunc; + u8 use_local_lmt_region; + u64 lmt_iova; + u64 rsvd[4]; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 1e012e787260..243cf8070e77 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -33,6 +33,10 @@ enum npc_kpu_la_ltype { NPC_LT_LA_IH_2_ETHER, NPC_LT_LA_HIGIG2_ETHER, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_LT_LA_CH_LEN_90B_ETHER, + NPC_LT_LA_CPT_HDR, + NPC_LT_LA_CUSTOM_L2_24B_ETHER, NPC_LT_LA_CUSTOM0 = 0xE, NPC_LT_LA_CUSTOM1 = 0xF, }; @@ -42,7 +46,7 @@ enum npc_kpu_lb_ltype { NPC_LT_LB_CTAG, NPC_LT_LB_STAG_QINQ, NPC_LT_LB_BTAG, - NPC_LT_LB_ITAG, + NPC_LT_LB_PPPOE, NPC_LT_LB_DSA, NPC_LT_LB_DSA_VLAN, NPC_LT_LB_EDSA, @@ -50,6 +54,7 @@ enum npc_kpu_lb_ltype { NPC_LT_LB_EXDSA, NPC_LT_LB_EXDSA_VLAN, NPC_LT_LB_FDSA, + NPC_LT_LB_VLAN_EXDSA, NPC_LT_LB_CUSTOM0 = 0xE, NPC_LT_LB_CUSTOM1 = 0xF, }; @@ -65,6 +70,7 @@ enum npc_kpu_lc_ltype { NPC_LT_LC_NSH, NPC_LT_LC_PTP, NPC_LT_LC_FCOE, + NPC_LT_LC_NGIO, NPC_LT_LC_CUSTOM0 = 0xE, NPC_LT_LC_CUSTOM1 = 0xF, }; @@ -145,8 +151,18 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND + enum npc_pkind_type { - NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */ + NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_VLAN_EXDSA_PKIND = 56ULL, + NPC_RX_CHLEN24B_PKIND = 57ULL, + NPC_RX_CPT_HDR_PKIND, + NPC_RX_CHLEN90B_PKIND, + NPC_TX_HIGIG_PKIND, + NPC_RX_HIGIG_PKIND, + NPC_RX_EDSA_PKIND, + NPC_TX_DEF_PKIND, /* NIX-TX PKIND */ }; /* list of known and supported fields in packet header and @@ -213,7 +229,7 @@ struct npc_kpu_profile_cam { u16 dp1_mask; u16 dp2; u16 dp2_mask; -}; +} __packed; struct npc_kpu_profile_action { u8 errlev; @@ -233,13 +249,13 @@ struct npc_kpu_profile_action { u8 mask; u8 right; u8 shift; -}; +} __packed; struct npc_kpu_profile { int cam_entries; int action_entries; - const struct npc_kpu_profile_cam *cam; - const struct npc_kpu_profile_action *action; + struct npc_kpu_profile_cam *cam; + struct npc_kpu_profile_action *action; }; /* NPC KPU register formats */ @@ -425,7 +441,19 @@ struct nix_tx_action { /* NPC MCAM reserved entry index per nixlf */ #define NIXLF_UCAST_ENTRY 0 #define NIXLF_BCAST_ENTRY 1 -#define NIXLF_PROMISC_ENTRY 2 +#define NIXLF_ALLMULTI_ENTRY 2 +#define NIXLF_PROMISC_ENTRY 3 + +struct npc_coalesced_kpu_prfl { +#define NPC_SIGN 0x00666f727063706e +#define NPC_PRFL_NAME "npc_prfls_array" +#define NPC_NAME_LEN 32 + __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */ + u8 name[NPC_NAME_LEN]; /* KPU Profile name */ + u64 version; /* KPU firmware/profile version */ + u8 num_prfl; /* No of NPC profiles. */ + u16 prfl_sz[0]; +}; struct npc_mcam_kex { /* MKEX Profle Header */ @@ -445,6 +473,15 @@ struct npc_mcam_kex { u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; } __packed; +struct npc_kpu_fwdata { + int entries; + /* What follows is: + * struct npc_kpu_profile_cam[entries]; + * struct npc_kpu_profile_action[entries]; + */ + u8 data[0]; +} __packed; + struct npc_lt_def { u8 ltype_mask; u8 ltype_match; @@ -459,6 +496,29 @@ struct npc_lt_def_ipsec { u8 spi_nz; }; +struct npc_lt_def_apad { + u8 ltype_mask; + u8 ltype_match; + u8 lid; + u8 valid; +} __packed; + +struct npc_lt_def_color { + u8 ltype_mask; + u8 ltype_match; + u8 lid; + u8 noffset; + u8 offset; +} __packed; + +struct npc_lt_def_et { + u8 ltype_mask; + u8 ltype_match; + u8 lid; + u8 valid; + u8 offset; +} __packed; + struct npc_lt_def_cfg { struct npc_lt_def rx_ol2; struct npc_lt_def rx_oip4; @@ -476,7 +536,41 @@ struct npc_lt_def_cfg { struct npc_lt_def pck_oip4; struct npc_lt_def pck_oip6; struct npc_lt_def pck_iip4; -}; + struct npc_lt_def_apad rx_apad0; + struct npc_lt_def_apad rx_apad1; + struct npc_lt_def_color ovlan; + struct npc_lt_def_color ivlan; + struct npc_lt_def_color rx_gen0_color; + struct npc_lt_def_color rx_gen1_color; + struct npc_lt_def_et rx_et[2]; +} __packed; + +/* Loadable KPU profile firmware data */ +struct npc_kpu_profile_fwdata { +#define KPU_SIGN 0x00666f727075706b +#define KPU_NAME_LEN 32 +/** Maximum number of custom KPU entries supported by the built-in profile. */ +#define KPU_MAX_CST_ENT 2 + /* KPU Profle Header */ + __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */ + u8 name[KPU_NAME_LEN]; /* KPU Profile name */ + __le64 version; /* KPU profile version */ + u8 kpus; + u8 reserved[7]; + + /* Default MKEX profile to be used with this KPU profile. May be + * overridden with mkex_profile module parameter. Format is same as for + * the MKEX profile to streamline processing. + */ + struct npc_mcam_kex mkex; + /* LTYPE values for specific HW offloaded protocols. */ + struct npc_lt_def_cfg lt_def; + /* Dynamically sized data: + * Custom KPU CAM and ACTION configuration entries. + * struct npc_kpu_fwdata kpu[kpus]; + */ + u8 data[0]; +} __packed; struct rvu_npc_mcam_rule { struct flow_msg packet; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 5c372d2c24a1..fee655cc7523 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -11,7 +11,10 @@ #ifndef NPC_PROFILE_H #define NPC_PROFILE_H -#define NPC_KPU_PROFILE_VER 0x0000000100050000 +#define NPC_KPU_PROFILE_VER 0x0000000100060000 +#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF)) +#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF)) +#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF)) #define NPC_IH_W 0x8000 #define NPC_IH_UTAG 0x2000 @@ -20,6 +23,7 @@ #define NPC_ETYPE_IP6 0x86dd #define NPC_ETYPE_ARP 0x0806 #define NPC_ETYPE_RARP 0x8035 +#define NPC_ETYPE_NGIO 0x8842 #define NPC_ETYPE_MPLSU 0x8847 #define NPC_ETYPE_MPLSM 0x8848 #define NPC_ETYPE_ETAG 0x893f @@ -33,6 +37,10 @@ #define NPC_ETYPE_PPP 0x880b #define NPC_ETYPE_NSH 0x894f #define NPC_ETYPE_DSA 0xdada +#define NPC_ETYPE_PPPOE 0x8864 + +#define NPC_PPP_IP 0x0021 +#define NPC_PPP_IP6 0x0057 #define NPC_IPNH_HOP 0 #define NPC_IPNH_ICMP 1 @@ -142,14 +150,15 @@ #define NPC_DSA_EDSA 0x8000 #define NPC_DSA_FDSA 0xc000 -#define NPC_KEXOF_DMAC 8 -#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */ +#define NPC_KEXOF_DMAC 9 +#define MKEX_SIGN 0x19bbfdbd15f #define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ ((flags_ena) << 6) | ((key_ofs) & 0x3F)) /* Rx parse key extract nibble enable */ #define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \ + NPC_PARSE_NIBBLE_ERRCODE | \ NPC_PARSE_NIBBLE_LA_LTYPE | \ NPC_PARSE_NIBBLE_LB_LTYPE | \ NPC_PARSE_NIBBLE_LC_LTYPE | \ @@ -170,25 +179,31 @@ enum npc_kpu_parser_state { NPC_S_KPU1_EXDSA, NPC_S_KPU1_HIGIG2, NPC_S_KPU1_IH_NIX_HIGIG2, + NPC_S_KPU1_CUSTOM_L2_90B, + NPC_S_KPU1_CPT_HDR, + NPC_S_KPU1_CUSTOM_L2_24B, + NPC_S_KPU1_VLAN_EXDSA, NPC_S_KPU2_CTAG, NPC_S_KPU2_CTAG2, NPC_S_KPU2_SBTAG, NPC_S_KPU2_QINQ, NPC_S_KPU2_ETAG, - NPC_S_KPU2_ITAG, NPC_S_KPU2_PREHEADER, NPC_S_KPU2_EXDSA, + NPC_S_KPU2_NGIO, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, - NPC_S_KPU3_ITAG, NPC_S_KPU3_CTAG_C, NPC_S_KPU3_STAG_C, NPC_S_KPU3_QINQ_C, NPC_S_KPU3_DSA, + NPC_S_KPU3_VLAN_EXDSA, NPC_S_KPU4_MPLS, NPC_S_KPU4_NSH, NPC_S_KPU4_FDSA, + NPC_S_KPU4_VLAN_EXDSA, + NPC_S_KPU4_PPPOE, NPC_S_KPU5_IP, NPC_S_KPU5_IP6, NPC_S_KPU5_ARP, @@ -198,13 +213,19 @@ enum npc_kpu_parser_state { NPC_S_KPU5_MPLS, NPC_S_KPU5_MPLS_PL, NPC_S_KPU5_NSH, + NPC_S_KPU5_CPT_IP, + NPC_S_KPU5_CPT_IP6, NPC_S_KPU6_IP6_EXT, NPC_S_KPU6_IP6_HOP_DEST, NPC_S_KPU6_IP6_ROUT, NPC_S_KPU6_IP6_FRAG, + NPC_S_KPU6_IP6_CPT_FRAG, + NPC_S_KPU6_IP6_CPT_HOP_DEST, + NPC_S_KPU6_IP6_CPT_ROUT, NPC_S_KPU7_IP6_EXT, NPC_S_KPU7_IP6_ROUT, NPC_S_KPU7_IP6_FRAG, + NPC_S_KPU7_CPT_IP6_FRAG, NPC_S_KPU8_TCP, NPC_S_KPU8_UDP, NPC_S_KPU8_SCTP, @@ -265,7 +286,6 @@ enum npc_kpu_la_lflag { NPC_F_LA_L_UNK_ETYPE = 1, NPC_F_LA_L_WITH_VLAN, NPC_F_LA_L_WITH_ETAG, - NPC_F_LA_L_WITH_ITAG, NPC_F_LA_L_WITH_MPLS, NPC_F_LA_L_WITH_NSH, }; @@ -442,7 +462,28 @@ enum NPC_ERRLEV_E { NPC_ERRLEV_ENUM_LAST = 16, }; -static const struct npc_kpu_profile_action ikpu_action_entries[] = { +#define NPC_KPU_NOP_CAM \ + { \ + NPC_S_NA, 0xff, \ + 0x0000, \ + 0x0000, \ + 0x0000, \ + 0x0000, \ + 0x0000, \ + 0x0000, \ + } + +#define NPC_KPU_NOP_ACTION \ + { \ + NPC_ERRLEV_RE, NPC_EC_NOERR, \ + 0, 0, 0, 0, 0, \ + NPC_S_NA, 0, 0, \ + NPC_LID_LA, NPC_LT_NA, \ + 0, \ + 0, 0, 0, 0, \ + } + +static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, @@ -950,7 +991,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, + NPC_S_KPU1_VLAN_EXDSA, 0, 0, NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -958,8 +999,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, + 36, 40, 44, 0, 0, + NPC_S_KPU1_CUSTOM_L2_24B, 0, 0, NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -967,8 +1008,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, + 40, 54, 58, 0, 0, + NPC_S_KPU1_CPT_HDR, 0, 0, NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -976,8 +1017,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, + 102, 106, 110, 0, 0, + NPC_S_KPU1_CUSTOM_L2_90B, 0, 0, NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -1021,7 +1062,9 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { +static struct npc_kpu_profile_cam kpu1_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, @@ -1080,6 +1123,15 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_NGIO, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1123,7 +1175,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_ITAG, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, @@ -1132,7 +1184,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_MPLSU, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, @@ -1141,7 +1193,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_MPLSM, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, @@ -1150,7 +1202,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_NSH, + NPC_ETYPE_DSA, 0xffff, 0x0000, 0x0000, @@ -1159,7 +1211,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_DSA, + NPC_ETYPE_PPPOE, 0xffff, 0x0000, 0x0000, @@ -1294,15 +1346,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH_NIX, 0xff, - NPC_ETYPE_ITAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_IH_NIX, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1339,8 +1382,8 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH, 0xff, - NPC_IH_W|NPC_IH_UTAG, - NPC_IH_W|NPC_IH_UTAG, + NPC_IH_W | NPC_IH_UTAG, + NPC_IH_W | NPC_IH_UTAG, 0x0000, 0x0000, 0x0000, @@ -1349,7 +1392,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { { NPC_S_KPU1_IH, 0xff, NPC_IH_W, - NPC_IH_W|NPC_IH_UTAG, + NPC_IH_W | NPC_IH_UTAG, 0x0000, 0x0000, 0x0000, @@ -1358,7 +1401,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { { NPC_S_KPU1_IH, 0xff, 0x0000, - NPC_IH_W|NPC_IH_UTAG, + NPC_IH_W | NPC_IH_UTAG, 0x0000, 0x0000, 0x0000, @@ -1501,15 +1544,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_HIGIG2, 0xff, - NPC_ETYPE_ITAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_HIGIG2, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1645,7 +1679,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, - NPC_ETYPE_ITAG, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, @@ -1654,7 +1688,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, - NPC_ETYPE_MPLSU, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, @@ -1663,7 +1697,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, - NPC_ETYPE_MPLSM, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, @@ -1672,6 +1706,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, NPC_ETYPE_NSH, 0xffff, 0x0000, @@ -1680,7 +1840,88 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_QINQ, + 0xffff, + }, + { + NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, 0x0000, 0x0000, @@ -1689,6 +1930,150 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_VLAN_EXDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -1699,7 +2084,9 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { +static struct npc_kpu_profile_cam kpu2_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, @@ -1783,6 +2170,24 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { }, { NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_PPPOE, + 0xffff, + 0x0000, + 0x0000, + NPC_PPP_IP, + 0xffff, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_PPPOE, + 0xffff, + 0x0000, + 0x0000, + NPC_PPP_IP6, + 0xffff, + }, + { + NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000, 0x0000, @@ -2226,15 +2631,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_ITAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ETAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, 0x0000, 0x0000, 0x0000, @@ -2313,159 +2709,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_ITAG, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { NPC_S_KPU2_CTAG2, 0xff, NPC_ETYPE_IP, 0xffff, @@ -2817,6 +3060,15 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { + NPC_S_KPU2_NGIO, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -2827,7 +3079,9 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu3_cam_entries[] = { +static struct npc_kpu_profile_cam kpu3_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, @@ -3243,159 +3497,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = { 0x0000, }, { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU3_ITAG, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { NPC_S_KPU3_CTAG_C, 0xff, NPC_ETYPE_IP, 0xffff, @@ -3936,6 +4037,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = { 0x0000, }, { + NPC_S_KPU3_VLAN_EXDSA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -3946,7 +4056,9 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu4_cam_entries[] = { +static struct npc_kpu_profile_cam kpu4_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, @@ -4084,6 +4196,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = { }, { NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_PPPOE, + 0xffff, + 0x0000, + 0x0000, + NPC_PPP_IP, + 0xffff, + }, + { + NPC_S_KPU4_FDSA, 0xff, + NPC_ETYPE_PPPOE, + 0xffff, + 0x0000, + 0x0000, + NPC_PPP_IP6, + 0xffff, + }, + { + NPC_S_KPU4_FDSA, 0xff, 0x0000, NPC_DSA_FDSA, 0x0000, @@ -4092,6 +4276,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = { 0x0000, }, { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_VLAN_EXDSA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_PPPOE, 0xff, + NPC_PPP_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_PPPOE, 0xff, + NPC_PPP_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -4102,7 +4367,9 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { +static struct npc_kpu_profile_cam kpu5_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU5_IP, 0xff, 0x0000, @@ -4125,116 +4392,116 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -4245,7 +4512,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4254,7 +4521,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4263,7 +4530,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4272,7 +4539,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4281,7 +4548,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4290,7 +4557,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4299,7 +4566,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4308,7 +4575,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4317,7 +4584,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4326,7 +4593,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4335,7 +4602,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4344,7 +4611,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, - NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF, }, { NPC_S_KPU5_IP, 0xff, @@ -4662,6 +4929,429 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { 0x0000, }, { + NPC_S_KPU5_CPT_IP, 0xff, + 0x0000, + NPC_IP_TTL_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0001, + NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + 0x0000, + NPC_IP6_HOP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_HOP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_DEST << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_MOBILITY << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_HOSTID << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_SHIM6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -4672,7 +5362,9 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu6_cam_entries[] = { +static struct npc_kpu_profile_cam kpu6_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, @@ -5007,6 +5699,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = { 0x0000, }, { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -5017,7 +6033,9 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu7_cam_entries[] = { +static struct npc_kpu_profile_cam kpu7_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, @@ -5226,6 +6244,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = { 0x0000, }, { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -5236,7 +6353,9 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { +static struct npc_kpu_profile_cam kpu8_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU8_TCP, 0xff, 0x0000, @@ -5259,8 +6378,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN, 0x0000, 0x0000, }, @@ -5268,8 +6387,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, - NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN, 0x0000, 0x0000, }, @@ -5277,8 +6396,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN, 0x0000, 0x0000, }, @@ -5286,8 +6405,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, - NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN, 0x0000, 0x0000, }, @@ -5565,7 +6684,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, @@ -5574,7 +6693,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5583,7 +6702,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5592,7 +6711,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5637,7 +6756,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, @@ -5646,7 +6765,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5655,7 +6774,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5664,7 +6783,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5709,7 +6828,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, @@ -5718,7 +6837,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5727,7 +6846,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5736,7 +6855,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5781,7 +6900,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, @@ -5790,7 +6909,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5799,7 +6918,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5808,7 +6927,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5853,7 +6972,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, @@ -5862,7 +6981,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5871,7 +6990,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5880,7 +6999,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, @@ -5916,7 +7035,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_VER_1, + NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, @@ -5925,7 +7044,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, @@ -5934,7 +7053,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1, + NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, @@ -5943,7 +7062,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, @@ -5977,7 +7096,9 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu9_cam_entries[] = { +static struct npc_kpu_profile_cam kpu9_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, NPC_MPLS_S, @@ -6387,8 +7508,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = { NPC_S_KPU9_GTPU, 0xff, 0x0000, 0x0000, - 0x0000, - 0x0000, + NPC_GTP_PT_GTP | NPC_GTP_VER1, + NPC_GTP_PT_MASK | NPC_GTP_VER_MASK, 0x0000, 0x0000, }, @@ -6448,7 +7569,9 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu10_cam_entries[] = { +static struct npc_kpu_profile_cam kpu10_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, @@ -6613,7 +7736,9 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu11_cam_entries[] = { +static struct npc_kpu_profile_cam kpu11_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, @@ -6922,13 +8047,15 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { +static struct npc_kpu_profile_cam kpu12_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6936,8 +8063,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6945,8 +8072,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6954,8 +8081,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6963,8 +8090,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6972,8 +8099,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6981,8 +8108,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -6990,8 +8117,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4|NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, }, @@ -7177,7 +8304,9 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu13_cam_entries[] = { +static struct npc_kpu_profile_cam kpu13_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, @@ -7189,7 +8318,9 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu14_cam_entries[] = { +static struct npc_kpu_profile_cam kpu14_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, @@ -7201,7 +8332,9 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { +static struct npc_kpu_profile_cam kpu15_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU15_TU_TCP, 0xff, 0x0000, @@ -7224,8 +8357,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN, 0x0000, 0x0000, }, @@ -7233,8 +8366,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, - NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN, 0x0000, 0x0000, }, @@ -7242,8 +8375,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, - NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN, 0x0000, 0x0000, }, @@ -7251,8 +8384,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, - NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN, 0x0000, 0x0000, }, @@ -7402,7 +8535,9 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = { }, }; -static const struct npc_kpu_profile_cam kpu16_cam_entries[] = { +static struct npc_kpu_profile_cam kpu16_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, { NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, @@ -7459,7 +8594,9 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu1_action_entries[] = { +static struct npc_kpu_profile_action kpu1_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, @@ -7511,6 +8648,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, + NPC_S_KPU2_NGIO, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, NPC_S_KPU2_CTAG2, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, @@ -7518,7 +8663,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, + 4, 8, 12, 0, 0, NPC_S_KPU2_CTAG, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, @@ -7550,14 +8695,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 18, 22, 26, 0, 0, - NPC_S_KPU2_ITAG, 12, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, NPC_S_KPU4_MPLS, 14, 1, NPC_LID_LA, NPC_LT_LA_ETHER, @@ -7590,6 +8727,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 2, 0, + NPC_S_KPU4_PPPOE, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LA, NPC_LT_LA_8023, @@ -7707,15 +8852,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 18, 22, 26, 0, 0, - NPC_S_KPU2_ITAG, 20, 1, - NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, - NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG - | NPC_F_LA_L_WITH_ITAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, NPC_S_KPU4_MPLS, 22, 1, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, @@ -7788,7 +8924,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 16, 2, 0, + 4, 8, 12, 2, 0, NPC_S_KPU4_FDSA, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, 0, @@ -7897,15 +9033,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 18, 22, 26, 0, 0, - NPC_S_KPU2_ITAG, 28, 1, - NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, - NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG - | NPC_F_LA_L_WITH_ITAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, NPC_S_KPU4_MPLS, 30, 1, NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, @@ -8031,15 +9158,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 18, 22, 26, 0, 0, - NPC_S_KPU2_ITAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, - NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 - | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, NPC_S_KPU4_MPLS, 38, 1, NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, @@ -8075,6 +9193,326 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 102, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 102, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 102, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 102, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 102, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 104, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_CPT_IP, 56, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_CPT_IP6, 56, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 54, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 54, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_CPT_IP, 60, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_CPT_IP6, 60, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 58, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 58, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 36, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 36, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 38, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 0, 0, 1, 0, + NPC_S_KPU3_VLAN_EXDSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -8084,7 +9522,9 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu2_action_entries[] = { +static struct npc_kpu_profile_action kpu2_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 2, 0, @@ -8159,6 +9599,22 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LB, NPC_LT_LB_CTAG, @@ -8170,7 +9626,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 8, 0, 6, 2, 0, NPC_S_KPU5_IP, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8178,7 +9634,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 6, 0, 0, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8186,7 +9642,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_ARP, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8194,7 +9650,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_RARP, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8202,7 +9658,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_PTP, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8210,7 +9666,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_FCOE, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8218,7 +9674,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 10, 1, 0, NPC_S_KPU4_MPLS, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8226,7 +9682,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 10, 1, 0, NPC_S_KPU4_MPLS, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8234,7 +9690,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 1, 0, NPC_S_KPU4_NSH, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8242,7 +9698,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK, 0, 0, 0, 0, }, { @@ -8250,7 +9706,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 0, 0, 0, NPC_S_KPU3_CTAG, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG, 0, 0, 0, 0, }, { @@ -8258,7 +9714,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 0, 0, 0, NPC_S_KPU3_STAG, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG, 0, 0, 0, 0, }, { @@ -8266,7 +9722,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 8, 0, 6, 2, 0, NPC_S_KPU5_IP, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8274,7 +9730,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 6, 0, 0, 2, 0, NPC_S_KPU5_IP6, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8282,7 +9738,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_ARP, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8290,7 +9746,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_RARP, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8298,7 +9754,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_PTP, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8306,7 +9762,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_FCOE, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8314,7 +9770,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 10, 1, 0, NPC_S_KPU4_MPLS, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8322,7 +9778,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 10, 1, 0, NPC_S_KPU4_MPLS, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8330,7 +9786,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 1, 0, NPC_S_KPU4_NSH, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8338,7 +9794,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 0, 0, NPC_S_KPU3_STAG, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG, 0, 0, 0, 0, }, { @@ -8346,7 +9802,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 0, 0, NPC_S_KPU3_CTAG, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG, 0, 0, 0, 0, }, { @@ -8354,7 +9810,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LB, NPC_LT_LB_BTAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK, 0, 0, 0, 0, }, { @@ -8546,15 +10002,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 0, 0, NPC_S_KPU3_CTAG, 10, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 16, 20, 24, 0, 0, - NPC_S_KPU3_ITAG, 14, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, 0, 0, 0, 0, }, { @@ -8562,7 +10010,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 0, 0, 0, NPC_S_KPU3_STAG, 10, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG, 0, 0, 0, 0, }, { @@ -8570,7 +10018,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 6, 0, 0, 0, NPC_S_KPU3_QINQ, 10, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ, 0, 0, 0, 0, }, { @@ -8578,7 +10026,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 8, 0, 6, 2, 0, NPC_S_KPU5_IP, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8586,7 +10034,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 6, 0, 0, 2, 0, NPC_S_KPU5_IP6, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8594,7 +10042,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 2, 0, NPC_S_KPU5_ARP, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, 0, 0, 0, 0, }, { @@ -8602,7 +10050,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 0, 0, NPC_S_KPU3_STAG, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG, 0, 0, 0, 0, }, { @@ -8610,7 +10058,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 2, 0, 0, 0, 0, NPC_S_KPU3_CTAG, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG, 0, 0, 0, 0, }, { @@ -8618,7 +10066,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LB, NPC_LT_LB_ETAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK, 0, 0, 0, 0, }, { @@ -8632,142 +10080,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 20, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 20, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 20, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_RARP, 20, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 28, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 28, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 28, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 24, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, - NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, NPC_S_KPU5_IP, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -9078,6 +10390,14 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_NGIO, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -9087,11 +10407,13 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu3_action_entries[] = { +static struct npc_kpu_profile_action kpu3_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 1, 0, - NPC_S_KPU5_IP, 4, 0, + NPC_S_KPU5_IP, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9099,7 +10421,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 1, 0, - NPC_S_KPU5_IP6, 4, 0, + NPC_S_KPU5_IP6, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9107,7 +10429,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_ARP, 4, 0, + NPC_S_KPU5_ARP, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9115,7 +10437,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_RARP, 4, 0, + NPC_S_KPU5_RARP, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9123,7 +10445,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_PTP, 4, 0, + NPC_S_KPU5_PTP, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9131,7 +10453,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_FCOE, 4, 0, + NPC_S_KPU5_FCOE, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9139,7 +10461,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 0, 0, - NPC_S_KPU4_MPLS, 4, 0, + NPC_S_KPU4_MPLS, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9147,7 +10469,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 0, 0, - NPC_S_KPU4_MPLS, 4, 0, + NPC_S_KPU4_MPLS, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9155,7 +10477,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, 0, 0, - NPC_S_KPU4_NSH, 4, 0, + NPC_S_KPU4_NSH, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, @@ -9458,142 +10780,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 18, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 18, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 18, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_RARP, 18, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 1, 0, - NPC_S_KPU5_IP, 26, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, - NPC_S_KPU5_IP6, 26, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 1, 0, - NPC_S_KPU5_ARP, 26, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 1, 0, - NPC_S_KPU5_IP, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, - NPC_S_KPU5_IP6, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 1, 0, - NPC_S_KPU5_ARP, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 1, 0, - NPC_S_KPU5_IP, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, - NPC_S_KPU5_IP6, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 1, 0, - NPC_S_KPU5_ARP, 22, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 1, 0, NPC_S_KPU5_IP, 4, 1, NPC_LID_LB, NPC_LT_LB_CTAG, @@ -10073,6 +11259,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU4_VLAN_EXDSA, 12, 1, + NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -10082,7 +11276,9 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu4_action_entries[] = { +static struct npc_kpu_profile_action kpu4_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 0, @@ -10205,6 +11401,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_FDSA, + NPC_F_LB_L_FDSA, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LB, NPC_LT_LB_FDSA, @@ -10212,6 +11472,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_IP, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_IP6, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_ARP, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_RARP, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_PTP, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_FCOE, 2, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU5_IP, 10, 0, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU5_IP6, 10, 0, + NPC_LID_LB, NPC_LT_LB_PPPOE, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -10221,7 +11553,9 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu5_action_entries[] = { +static struct npc_kpu_profile_action kpu5_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_LC, NPC_EC_IP_TTL_0, 0, 0, 0, 0, 1, @@ -10719,6 +12053,382 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_LC, NPC_EC_IP_TTL_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_IP_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_6TO4, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 2, 0, + NPC_S_KPU8_UDP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_IP_IN_IP, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_6TO4, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_TUN_IP6, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_MPLS_IN_IP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOP, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_DEST, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_CPT_ROUT, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_ROUT, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU6_IP6_CPT_FRAG, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_U_IP6_FRAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU9_ESP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_MOBILITY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOSTID, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_SHIM6, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP6_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -10728,7 +12438,9 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu6_action_entries[] = { +static struct npc_kpu_profile_action kpu6_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, @@ -11026,6 +12738,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU7_IP6_ROUT, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_CPT_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_CPT_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -11035,7 +13035,9 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu7_action_entries[] = { +static struct npc_kpu_profile_action kpu7_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, @@ -11221,6 +13223,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 0, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 0, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU9_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 4, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -11230,7 +13320,9 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu8_action_entries[] = { +static struct npc_kpu_profile_action kpu8_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY, 0, 0, 0, 0, 1, @@ -11889,7 +13981,9 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu9_action_entries[] = { +static struct npc_kpu_profile_action kpu9_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 0, @@ -12252,10 +14346,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, NPC_LID_LE, NPC_LT_LE_GTPU, - NPC_F_LE_L_GTPU_UNK, + 0, 0, 0, 0, 0, }, { @@ -12308,7 +14402,9 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu10_action_entries[] = { +static struct npc_kpu_profile_action kpu10_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 1, 0, @@ -12455,7 +14551,9 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu11_action_entries[] = { +static struct npc_kpu_profile_action kpu11_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 0, 0, @@ -12730,7 +14828,9 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu12_action_entries[] = { +static struct npc_kpu_profile_action kpu12_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, 2, 0, @@ -12957,7 +15057,9 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu13_action_entries[] = { +static struct npc_kpu_profile_action kpu13_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, @@ -12968,7 +15070,9 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu14_action_entries[] = { +static struct npc_kpu_profile_action kpu14_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, @@ -12979,7 +15083,9 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu15_action_entries[] = { +static struct npc_kpu_profile_action kpu15_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY, 0, 0, 0, 0, 1, @@ -13158,7 +15264,9 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = { }, }; -static const struct npc_kpu_profile_action kpu16_action_entries[] = { +static struct npc_kpu_profile_action kpu16_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, @@ -13209,7 +15317,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = { }, }; -static const struct npc_kpu_profile npc_kpu_profiles[] = { +static struct npc_kpu_profile npc_kpu_profiles[] = { { ARRAY_SIZE(kpu1_cam_entries), ARRAY_SIZE(kpu1_action_entries), @@ -13308,12 +15416,22 @@ static const struct npc_kpu_profile npc_kpu_profiles[] = { }, }; -static const struct npc_lt_def_cfg npc_lt_defaults = { +static struct npc_lt_def_cfg npc_lt_defaults = { .rx_ol2 = { .lid = NPC_LID_LA, .ltype_match = NPC_LT_LA_ETHER, .ltype_mask = 0x0F, }, + .ovlan = { + .lid = NPC_LID_LB, + .ltype_match = NPC_LT_LB_CTAG, + .ltype_mask = 0x0F, + }, + .ivlan = { + .lid = NPC_LID_LB, + .ltype_match = NPC_LT_LB_STAG_QINQ, + .ltype_mask = 0x0F, + }, .rx_oip4 = { .lid = NPC_LID_LC, .ltype_match = NPC_LT_LC_IP, @@ -13392,6 +15510,30 @@ static const struct npc_lt_def_cfg npc_lt_defaults = { .ltype_match = NPC_LT_LG_TU_IP, .ltype_mask = 0x0F, }, + .rx_apad0 = { + .valid = 0, + .lid = NPC_LID_LC, + .ltype_match = NPC_LT_LC_IP6, + .ltype_mask = 0x0F, + }, + .rx_apad1 = { + .valid = 0, + .lid = NPC_LID_LC, + .ltype_match = NPC_LT_LC_IP6, + .ltype_mask = 0x0F, + }, + .rx_et = { + { + .lid = NPC_LID_LB, + .ltype_match = NPC_LT_NA, + .ltype_mask = 0x0, + }, + { + .lid = NPC_LID_LB, + .ltype_match = NPC_LT_NA, + .ltype_mask = 0x0, + }, + }, }; static struct npc_mcam_kex npc_mkex_default = { @@ -13399,7 +15541,7 @@ static struct npc_mcam_kex npc_mkex_default = { .name = "default", .kpu_version = NPC_KPU_PROFILE_VER, .keyx_cfg = { - /* nibble: LA..LE (ltype only) + channel */ + /* nibble: LA..LE (ltype only) + Error code + Channel */ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX, /* nibble: LA..LE (ltype only) */ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX, @@ -13410,30 +15552,40 @@ static struct npc_mcam_kex npc_mkex_default = { [NPC_LID_LA] = { /* Layer A: Ethernet: */ [NPC_LT_LA_ETHER] = { - /* DMAC: 6 bytes, KW1[47:0] */ + /* DMAC: 6 bytes, KW1[55:8] */ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC), - /* Ethertype: 2 bytes, KW0[47:32] */ - KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4), + /* Ethertype: 2 bytes, KW0[55:40] */ + KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5), + }, + /* Layer A: HiGig2: */ + [NPC_LT_LA_HIGIG2_ETHER] = { + /* Classification: 2 bytes, KW1[23:8] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC), + /* VID: 2 bytes, KW1[39:24] */ + KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, + NPC_KEXOF_DMAC + 2), }, }, [NPC_LID_LB] = { /* Layer B: Single VLAN (CTAG) */ - /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ [NPC_LT_LB_CTAG] = { - KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4), + /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7), + /* Ethertype: 2 bytes, KW0[55:40] */ + KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5), }, /* Layer B: Stacked VLAN (STAG|QinQ) */ [NPC_LT_LB_STAG_QINQ] = { - /* Outer VLAN: 2 bytes, KW0[63:48] */ - KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), - /* Ethertype: 2 bytes, KW0[47:32] */ - KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4), + /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7), + /* Ethertype: 2 bytes, KW0[55:40] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5), }, [NPC_LT_LB_FDSA] = { - /* SWITCH PORT: 1 byte, KW0[63:48] */ - KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6), - /* Ethertype: 2 bytes, KW0[47:32] */ - KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4), + /* SWITCH PORT: 1 byte, KW0[63:56] */ + KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7), + /* Ethertype: 2 bytes, KW0[55:40] */ + KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5), }, }, [NPC_LID_LC] = { @@ -13477,6 +15629,13 @@ static struct npc_mcam_kex npc_mkex_default = { /* DMAC: 6 bytes, KW1[63:16] */ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa), }, + /* Layer A: HiGig2: */ + [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = { + /* PF_FUNC: 2B , KW0 [47:32] */ + KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4), + /* VID: 2 bytes, KW1[31:16] */ + KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa), + }, }, [NPC_LID_LB] = { /* Layer B: Single VLAN (CTAG) */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index ab24a5e8ee8a..5fe277e354f7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -57,6 +57,10 @@ static char *mkex_profile; /* MKEX profile name */ module_param(mkex_profile, charp, 0000); MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); +static char *kpu_profile; /* KPU profile name */ +module_param(kpu_profile, charp, 0000); +MODULE_PARM_DESC(kpu_profile, "KPU profile name string"); + static void rvu_setup_hw_capabilities(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; @@ -180,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) return (rsrc->max - used); } +bool is_rsrc_free(struct rsrc_bmap *rsrc, int id) +{ + if (!rsrc->bmap) + return false; + + return !test_bit(id, rsrc->bmap); +} + int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) { rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), @@ -379,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) /* Get numVFs attached to this PF and first HWVF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); - *numvfs = (cfg >> 12) & 0xFF; - *hwvf = cfg & 0xFFF; + if (numvfs) + *numvfs = (cfg >> 12) & 0xFF; + if (hwvf) + *hwvf = cfg & 0xFFF; } static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) @@ -1302,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } -static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr = BLKADDR_NIX0, vf; @@ -1754,6 +1768,48 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int blkaddr, nixlf; + u16 target; + + /* Only PF can add VF permissions */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc)) + return -EOPNOTSUPP; + + target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1); + pfvf = rvu_get_pfvf(rvu, target); + + if (req->flags & RESET_VF_PERM) { + pfvf->flags &= RVU_CLEAR_VF_PERM; + } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^ + (req->flags & VF_TRUSTED)) { + change_bit(PF_SET_VF_TRUSTED, &pfvf->flags); + /* disable multicast and promisc entries */ + if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target); + if (blkaddr < 0) + return 0; + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], + target, 0); + if (nixlf < 0) + return 0; + npc_enadis_default_mce_entry(rvu, target, nixlf, + NIXLF_ALLMULTI_ENTRY, + false); + npc_enadis_default_mce_entry(rvu, target, nixlf, + NIXLF_PROMISC_ENTRY, + false); + } + } + + return 0; +} + static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { @@ -2279,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_unlock(&rvu->flr_lock); } @@ -2804,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu) if (!vfs) return 0; + /* LBK channel number 63 is used for switching packets between + * CGX mapped VFs. Hence limit LBK pairs till 62 only. + */ + if (vfs > 62) + vfs = 62; + /* Save VFs number for reference in VF interrupts handlers. * Since interrupts might start arriving during SRIOV enablement * ordinary API cannot be used to get number of enabled VFs. @@ -2842,6 +2905,8 @@ static void rvu_update_module_params(struct rvu *rvu) strscpy(rvu->mkex_pfl_name, mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); + strscpy(rvu->kpu_pfl_name, + kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN); } static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -2944,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize debugfs */ rvu_dbg_init(rvu); + mutex_init(&rvu->rswitch.switch_lock); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index c2cc4806d13c..91503fb2762c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -223,13 +223,17 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; - u8 pf_set_vf_cfg; u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ - /* Broadcast pkt replication info */ + /* Broadcast/Multicast/Promisc pkt replication info */ u16 bcast_mce_idx; + u16 mcast_mce_idx; + u16 promisc_mce_idx; struct nix_mce_list bcast_mce_list; + struct nix_mce_list mcast_mce_list; + struct nix_mce_list promisc_mce_list; + bool use_mce_list; struct rvu_npc_mcam_rule *def_ucast_rule; @@ -239,8 +243,19 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ + unsigned long flags; }; +enum rvu_pfvf_flags { + NIXLF_INITIALIZED = 0, + PF_SET_VF_MAC, + PF_SET_VF_CFG, + PF_SET_VF_TRUSTED, +}; + +#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC) + struct nix_txsch { struct rsrc_bmap schq; u8 lvl; @@ -282,6 +297,13 @@ struct nix_txvlan { struct mutex rsrc_lock; /* Serialize resource alloc/free */ }; +struct nix_ipolicer { + struct rsrc_bmap band_prof; + u16 *pfvf_map; + u16 *match_id; + u16 *ref_count; +}; + struct nix_hw { int blkaddr; struct rvu *rvu; @@ -291,6 +313,7 @@ struct nix_hw { struct nix_mark_format mark_format; struct nix_lso lso; struct nix_txvlan txvlan; + struct nix_ipolicer *ipolicer; }; /* RVU block's capabilities or functionality, @@ -308,6 +331,7 @@ struct hw_cap { bool nix_rx_multicast; /* Rx packet replication support */ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ bool programmable_chans; /* Channels programmable ? */ + bool ipolicer; }; struct rvu_hwinfo { @@ -386,10 +410,21 @@ struct npc_kpu_profile_adapter { const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ const struct npc_kpu_profile *kpu; /* array[kpus] */ struct npc_mcam_kex *mkex; + bool custom; size_t pkinds; size_t kpus; }; +#define RVU_SWITCH_LBK_CHAN 63 + +struct rvu_switch { + struct mutex switch_lock; /* Serialize flow installation */ + u32 used_entries; + u16 *entry2pcifunc; + u16 mode; + u16 start_entry; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -420,6 +455,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */ u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ @@ -435,9 +471,13 @@ struct rvu { struct mutex cgx_cfg_lock; /* serialize cgx configuration */ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ + char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */ /* Firmware data */ struct rvu_fwdata *fwdata; + void *kpu_fwdata; + size_t kpu_fwdata_sz; + void __iomem *kpu_prfl_addr; /* NPC KPU data */ struct npc_kpu_profile_adapter kpu; @@ -448,6 +488,9 @@ struct rvu { struct rvu_debugfs rvu_dbg; #endif struct rvu_devlink *rvu_dl; + + /* RVU switch implementation over NPC with DMAC rules */ + struct rvu_switch rswitch; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -543,11 +586,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) /* Function Prototypes * RVU */ -static inline int is_afvf(u16 pcifunc) +static inline bool is_afvf(u16 pcifunc) { return !(pcifunc & ~RVU_PFVF_FUNC_MASK); } +static inline bool is_vf(u16 pcifunc) +{ + return !!(pcifunc & RVU_PFVF_FUNC_MASK); +} + /* check if PF_FUNC is AF */ static inline bool is_pffunc_af(u16 pcifunc) { @@ -563,6 +611,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu) int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); +bool is_rsrc_free(struct rsrc_bmap *rsrc, int id); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); @@ -603,6 +652,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) *lmac_id = (map & 0xF); } +static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) +{ + return ((pcifunc & RVU_PFVF_FUNC_MASK) && + is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))); +} + #define M(_name, _id, fn_name, req, rsp) \ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); MBOX_MESSAGES @@ -616,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat); +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); + /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); @@ -632,10 +689,23 @@ void rvu_nix_freemem(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu); void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr); -int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); +int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, + struct nix_mce_list *mce_list, + int mce_idx, int mcam_index, bool add); +void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, + struct nix_mce_list **mce_list, int *mce_idx); struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr); int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr); void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc); +int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, + struct nix_hw **nix_hw, int *blkaddr); +int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, + u16 rq_idx, u16 match_id); +int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_cn10k_aq_enq_req *aq_req, + struct nix_cn10k_aq_enq_rsp *aq_rsp, + u16 pcifunc, u8 ctype, u32 qidx); +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -646,13 +716,19 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en); void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr); void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, u8 chan_cnt, - bool allmulti); -void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); -void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); + int nixlf, u64 chan, u8 chan_cnt); +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + bool enable); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable); +void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + bool enable); +void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + u64 chan); +void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + bool enable); +void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, int type, bool enable); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); @@ -683,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); @@ -696,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); +/* CN10K RVU - LMT*/ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); + #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); void rvu_dbg_exit(struct rvu *rvu); @@ -703,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu); static inline void rvu_dbg_init(struct rvu *rvu) {} static inline void rvu_dbg_exit(struct rvu *rvu) {} #endif + +/* RVU Switch */ +void rvu_switch_enable(struct rvu *rvu); +void rvu_switch_disable(struct rvu *rvu); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); + #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6e2bf4fcd29c..fe99ac4a4dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; @@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) unsigned long lmac_bmap; int size, free_pkind; int cgx, lmac, iter; + int numvfs, hwvfs; if (!cgx_cnt_max) return 0; @@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); + rvu->cgx_mapped_vfs += numvfs; pf++; } } @@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + int i = 0, lmac_count = 0; + u8 max_dmac_filters; + u8 cgx_id, lmac_id; + void *cgx_dev; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgx_dev = cgx_get_pdata(cgx_id); + lmac_count = cgx_get_lmac_cnt(cgx_dev); + max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + for (i = 0; i < max_dmac_filters; i++) + cgx_lmac_addr_del(cgx_id, lmac_id, i); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx_id, lmac_id); +} + int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, + struct cgx_mac_addr_add_req *req, + struct cgx_mac_addr_add_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); + if (rc >= 0) { + rsp->index = rc; + return 0; + } + + return rc; +} + +int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, + struct cgx_mac_addr_del_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); +} + +int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_max_dmac_entries_get_rsp + *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* If msg is received from PFs(which are not mapped to CGX LMACs) + * or VF then no entries are allocated for DMAC filters at CGX level. + * So returning zero. + */ + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { + rsp->max_dmac_filters = 0; + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); + return 0; +} + int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } + +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_reset(cgx_id, lmac_id); +} + +int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, + struct cgx_mac_addr_update_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7d9e71c6965f..8d48b64485c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -10,6 +10,206 @@ #include "cgx.h" #include "rvu_reg.h" +/* RVU LMTST */ +#define LMT_TBL_OP_READ 0 +#define LMT_TBL_OP_WRITE 1 +#define LMT_MAP_TABLE_SIZE (128 * 1024) +#define LMT_MAPTBL_ENTRY_SIZE 16 + +/* Function to perform operations (read/write) on lmtst map table */ +static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) +{ + void __iomem *lmt_map_base; + u64 tbl_base; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; + } + + if (lmt_tbl_op == LMT_TBL_OP_READ) { + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write + * to 1 to complete the flush. + */ + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); + rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); + } + + iounmap(lmt_map_base); + return 0; +} + +static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) +{ + return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; +} + +static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + u64 iova, u64 *lmt_addr) +{ + u64 pa, val, pf; + int err; + + if (!iova) { + dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); + return -EINVAL; + } + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); + pf = rvu_get_pf(pcifunc) & 0x1F; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); + + err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); + if (err) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); + return err; + } + val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); + if (val & ~0x1ULL) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); + return -EIO; + } + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + * PA[11:0] = IOVA[11:0] + */ + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa &= GENMASK_ULL(39, 0); + *lmt_addr = (pa << 12) | (iova & 0xFFF); + + return 0; +} + +static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err = 0; + u64 val; + + /* Read the current lmt addr of pcifunc */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Update the LMT table with new addr */ + err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + return 0; +} + +int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, + struct lmtst_tbl_setup_req *req, + struct msg_rsp *rsp) +{ + u64 lmt_addr, val; + u32 pri_tbl_idx; + int err = 0; + + /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + * region, if so, convert that IOVA to physical address and + * populate LMT table with that address + */ + if (req->use_local_lmt_region) { + err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, + req->lmt_iova, &lmt_addr); + if (err < 0) + return err; + + /* Update the lmt addr for this PFFUNC in the LMT table */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); + if (err) + return err; + } + + /* Reconfiguring lmtst map table in lmt region shared mode i.e. make + * multiple PF_FUNCs to share an LMTLINE region, so primary/base + * pcifunc (which is passed as an argument to mailbox) is the one + * whose lmt base address will be shared among other secondary + * pcifunc (will be the one who is calling this mailbox). + */ + if (req->base_pcifunc) { + /* Calculating the LMT table index equivalent to primary + * pcifunc. + */ + pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); + + /* Read the base lmt addr of the primary pcifunc */ + err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + pri_tbl_idx, err); + return err; + } + + /* Update the base lmt addr of secondary with primary's base + * lmt addr. + */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); + if (err) + return err; + } + + return 0; +} + +/* Resetting the lmtst map table to original base addresses */ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err; + + if (is_rvu_otx2(rvu)) + return; + + if (pfvf->lmt_base_addr) { + /* This corresponds to lmt map table index */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + /* Reverting back original lmt base addr for respective + * pcifunc. + */ + err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } +} + int rvu_set_channels_base(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 9bf8eaabf9ab..9b2dfbf90e51 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); +static void print_band_prof_ctx(struct seq_file *m, + struct nix_bandprof_s *prof) +{ + char *str; + + switch (prof->pc_mode) { + case NIX_RX_PC_MODE_VLAN: + str = "VLAN"; + break; + case NIX_RX_PC_MODE_DSCP: + str = "DSCP"; + break; + case NIX_RX_PC_MODE_GEN: + str = "Generic"; + break; + case NIX_RX_PC_MODE_RSVD: + str = "Reserved"; + break; + } + seq_printf(m, "W0: pc_mode\t\t%s\n", str); + str = (prof->icolor == 3) ? "Color blind" : + (prof->icolor == 0) ? "Green" : + (prof->icolor == 1) ? "Yellow" : "Red"; + seq_printf(m, "W0: icolor\t\t%s\n", str); + seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); + seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); + seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); + seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); + seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); + seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa); + seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa); + seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa); + + seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa); + str = (prof->lmode == 0) ? "byte" : "packet"; + seq_printf(m, "W1: lmode\t\t%s\n", str); + seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect); + seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv); + seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent); + seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa); + str = (prof->gc_action == 0) ? "PASS" : + (prof->gc_action == 1) ? "DROP" : "RED"; + seq_printf(m, "W1: gc_action\t\t%s\n", str); + str = (prof->yc_action == 0) ? "PASS" : + (prof->yc_action == 1) ? "DROP" : "RED"; + seq_printf(m, "W1: yc_action\t\t%s\n", str); + str = (prof->rc_action == 0) ? "PASS" : + (prof->rc_action == 1) ? "DROP" : "RED"; + seq_printf(m, "W1: rc_action\t\t%s\n", str); + seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); + seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); + seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); + + seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); + seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum); + seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum); + seq_printf(m, "W4: green_pkt_pass\t%lld\n", + (u64)prof->green_pkt_pass); + seq_printf(m, "W5: yellow_pkt_pass\t%lld\n", + (u64)prof->yellow_pkt_pass); + seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass); + seq_printf(m, "W7: green_octs_pass\t%lld\n", + (u64)prof->green_octs_pass); + seq_printf(m, "W8: yellow_octs_pass\t%lld\n", + (u64)prof->yellow_octs_pass); + seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass); + seq_printf(m, "W10: green_pkt_drop\t%lld\n", + (u64)prof->green_pkt_drop); + seq_printf(m, "W11: yellow_pkt_drop\t%lld\n", + (u64)prof->yellow_pkt_drop); + seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop); + seq_printf(m, "W13: green_octs_drop\t%lld\n", + (u64)prof->green_octs_drop); + seq_printf(m, "W14: yellow_octs_drop\t%lld\n", + (u64)prof->yellow_octs_drop); + seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop); + seq_puts(m, "==============================\n"); +} + +static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) +{ + struct nix_hw *nix_hw = m->private; + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + struct rvu *rvu = nix_hw->rvu; + struct nix_ipolicer *ipolicer; + int layer, prof_idx, idx, rc; + u16 pcifunc; + char *str; + + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : + (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top"; + + seq_printf(m, "\n%s bandwidth profiles\n", str); + seq_puts(m, "=======================\n"); + + ipolicer = &nix_hw->ipolicer[layer]; + + for (idx = 0; idx < ipolicer->band_prof.max; idx++) { + if (is_rsrc_free(&ipolicer->band_prof, idx)) + continue; + + prof_idx = (idx & 0x3FFF) | (layer << 14); + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + 0x00, NIX_AQ_CTYPE_BANDPROF, + prof_idx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of %s profile %d, err %d\n", + __func__, str, idx, rc); + return 0; + } + seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx); + pcifunc = ipolicer->pfvf_map[idx]; + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + seq_printf(m, "Allocated to :: PF %d\n", + rvu_get_pf(pcifunc)); + else + seq_printf(m, "Allocated to :: PF %d VF %d\n", + rvu_get_pf(pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK) - 1); + print_band_prof_ctx(m, &aq_rsp.prof); + } + } + return 0; +} + +RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL); + +static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused) +{ + struct nix_hw *nix_hw = m->private; + struct nix_ipolicer *ipolicer; + int layer; + char *str; + + seq_puts(m, "\nBandwidth profile resource free count\n"); + seq_puts(m, "=====================================\n"); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : + (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top "; + + ipolicer = &nix_hw->ipolicer[layer]; + seq_printf(m, "%s :: Max: %4d Free: %4d\n", str, + ipolicer->band_prof.max, + rvu_rsrc_free_count(&ipolicer->band_prof)); + } + seq_puts(m, "=====================================\n"); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL); + static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) { struct nix_hw *nix_hw; @@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) &rvu_dbg_nix_ndc_rx_hits_miss_fops); debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, &rvu_dbg_nix_qsize_fops); + debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_band_prof_ctx_fops); + debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_band_prof_rsrc_fops); } static void rvu_dbg_npa_init(struct rvu *rvu) @@ -1808,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; - int err, lmac_id; char *buf; current_dir = filp->file->f_path.dentry->d_parent; @@ -1819,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) if (!buf) return -EINVAL; - err = kstrtoint(buf + 1, 10, &lmac_id); - if (!err) { - err = cgx_print_stats(filp, lmac_id); - if (err) - return err; - } + return kstrtoint(buf + 1, 10, lmac_id); +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + int lmac_id, err; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_stats(filp, lmac_id); + return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); +static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) +{ + struct pci_dev *pdev = NULL; + void *cgxd = s->private; + char *bcast, *mcast; + u16 index, domain; + u8 dmac[ETH_ALEN]; + struct rvu *rvu; + u64 cfg, mac; + int pf; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + domain = 2; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + return 0; + + cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); + bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; + mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; + + seq_puts(s, + "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); + seq_printf(s, "%s PF%d %9s %9s", + dev_name(&pdev->dev), pf, bcast, mcast); + if (cfg & CGX_DMAC_CAM_ACCEPT) + seq_printf(s, "%12s\n\n", "UNICAST"); + else + seq_printf(s, "%16s\n\n", "PROMISCUOUS"); + + seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); + + for (index = 0 ; index < 32 ; index++) { + cfg = cgx_read_dmac_entry(cgxd, index); + /* Display enabled dmac entries associated with current lmac */ + if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && + FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { + mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); + u64_to_ether_addr(mac, dmac); + seq_printf(s, "%7d %pM\n", index, dmac); + } + } + + return 0; +} + +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +{ + int err, lmac_id; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_dmac_flt(filp, lmac_id); + + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -1866,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); + debugfs_create_file("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_dmac_flt_fops); } } } @@ -1878,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, int entry_acnt, entry_ecnt; int cntr_acnt, cntr_ecnt; - /* Skip PF0 */ - if (!pcifunc) - return; rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, &entry_acnt, &entry_ecnt); rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, @@ -2063,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { - if (rule->intf == NIX_INTF_TX) { + if (is_npc_intf_tx(rule->intf)) { switch (rule->tx_action.op) { case NIX_TX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); @@ -2132,6 +2364,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) struct rvu *rvu = s->private; struct npc_mcam *mcam; int pf, vf = -1; + bool enabled; int blkaddr; u16 target; u64 hits; @@ -2173,7 +2406,9 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) } rvu_dbg_npc_mcam_show_action(s, iter); - seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no"); + + enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry); + seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no"); if (!iter->has_cntr) continue; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 10a98bcb7c54..2688186066d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + *mode = rswitch->mode; + + return 0; +} + +static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (rswitch->mode == mode) + return 0; + rswitch->mode = mode; + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + rvu_switch_enable(rvu); + else + rvu_switch_disable(rvu); + break; + default: + return -EINVAL; + } + + return 0; +} + static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { @@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req static const struct devlink_ops rvu_devlink_ops = { .info_get = rvu_devlink_info_get, + .eswitch_mode_get = rvu_devlink_eswitch_mode_get, + .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; int rvu_register_dl(struct rvu *rvu) @@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL); - if (!rvu_dl) - return -ENOMEM; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); - kfree(rvu_dl); return -ENOMEM; } @@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu) if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); - kfree(rvu_dl); return err; } + rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; @@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu) rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); - kfree(rvu_dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 0a8bd667cb11..4bfbbdf38770 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -21,6 +21,16 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id); +static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, + int type, bool add); +static int nix_setup_ipolicers(struct rvu *rvu, + struct nix_hw *nix_hw, int blkaddr); +static void nix_ipolicer_freemem(struct nix_hw *nix_hw); +static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, + struct nix_hw *nix_hw, u16 pcifunc); +static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); +static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, + u32 leaf_prof); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -132,6 +142,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) return 0; } +int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, + struct nix_hw **nix_hw, int *blkaddr) +{ + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || *blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nix_hw = get_nix_hw(rvu->hw, *blkaddr); + if (!*nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + return 0; +} + static void nix_mce_list_init(struct nix_mce_list *list, int max) { INIT_HLIST_HEAD(&list->head); @@ -170,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; - /*Sync all in flight RX packets to LLC/DRAM */ + /* Sync all in flight RX packets to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) - dev_err(rvu->dev, "NIX RX software sync failed\n"); + dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); + + /* SW_SYNC ensures all existing transactions are finished and pkts + * are written to LLC/DRAM, queues should be teared down after + * successful SW_SYNC. Due to a HW errata, in some rare scenarios + * an existing transaction might end after SW_SYNC operation. To + * ensure operation is fully done, do the SW_SYNC twice. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, @@ -272,9 +309,10 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, - pfvf->rx_chan_cnt, false); + pfvf->rx_chan_cnt); break; } @@ -285,16 +323,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->rx_chan_base, pfvf->mac_addr); /* Add this PF_FUNC to bcast pkt replication list */ - err = nix_update_bcast_mce_list(rvu, pcifunc, true); + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); if (err) { dev_err(rvu->dev, "Bcast list, failed to enable PF_FUNC 0x%x\n", pcifunc); return err; } - + /* Install MCAM rule matching Ethernet broadcast mac address */ rvu_npc_install_bcast_match_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); + pfvf->maxlen = NIC_HW_MIN_FRS; pfvf->minlen = NIC_HW_MIN_FRS; @@ -310,7 +349,7 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) pfvf->minlen = 0; /* Remove this PF_FUNC from bcast pkt replication list */ - err = nix_update_bcast_mce_list(rvu, pcifunc, false); + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); if (err) { dev_err(rvu->dev, "Bcast list, failed to disable PF_FUNC 0x%x\n", @@ -319,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, @@ -680,8 +722,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, pfvf = rvu_get_pfvf(rvu, pcifunc); nixlf = rvu_get_lf(rvu, block, pcifunc, 0); - /* Skip NIXLF check for broadcast MCE entry init */ - if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { + /* Skip NIXLF check for broadcast MCE entry and bandwidth profile + * operations done by AF itself. + */ + if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || + (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { if (!pfvf->nixlf || nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; } @@ -721,6 +766,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (rsp) rc = NIX_AF_ERR_AQ_ENQUEUE; break; + case NIX_AQ_CTYPE_BANDPROF: + if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, + nix_hw, pcifunc)) + rc = NIX_AF_ERR_INVALID_BANDPROF; + break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; } @@ -777,6 +827,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(mask, &req->prof_mask, + sizeof(struct nix_bandprof_s)); fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) @@ -789,6 +842,8 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: @@ -866,6 +921,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, sizeof(struct nix_rx_mce_s)); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(&rsp->prof, ctx, + sizeof(struct nix_bandprof_s)); } } @@ -1906,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, + u16 pcifunc, struct nix_txsch *txsch) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lbk_link_start, lbk_links; + u8 pf = rvu_get_pf(pcifunc); + int schq; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + lbk_link_start = hw->cgx_links; + + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + /* Enable all LBK links with channel 63 by default so that + * packets can be sent to LBK with a NPC TX MCAM rule + */ + lbk_links = hw->lbk_links; + while (lbk_links--) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, + lbk_link_start + + lbk_links), + BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); + } +} + int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) @@ -1994,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_write64(rvu, blkaddr, reg, regval); } + rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, + &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); + return 0; } @@ -2203,8 +2293,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, aq_req.op = op; aq_req.qidx = mce; - /* Forward bcast pkts to RQ0, RSS not needed */ - aq_req.mce.op = 0; + /* Use RSS with RSS index 0 */ + aq_req.mce.op = 1; aq_req.mce.index = 0; aq_req.mce.eol = eol; aq_req.mce.pf_func = pcifunc; @@ -2222,8 +2312,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } -static int nix_update_mce_list(struct nix_mce_list *mce_list, - u16 pcifunc, bool add) +static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, + u16 pcifunc, bool add) { struct mce *mce, *tail = NULL; bool delete = false; @@ -2234,6 +2324,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, if (mce->pcifunc == pcifunc && !add) { delete = true; break; + } else if (mce->pcifunc == pcifunc && add) { + /* entry already exists */ + return 0; } tail = mce; } @@ -2261,36 +2354,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, return 0; } -int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) +int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, + struct nix_mce_list *mce_list, + int mce_idx, int mcam_index, bool add) { - int err = 0, idx, next_idx, last_idx; - struct nix_mce_list *mce_list; + int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; + struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_mcast *mcast; struct nix_hw *nix_hw; - struct rvu_pfvf *pfvf; struct mce *mce; - int blkaddr; - /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ - if (is_afvf(pcifunc)) - return 0; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return 0; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return 0; - - mcast = &nix_hw->mcast; + if (!mce_list) + return -EINVAL; /* Get this PF/VF func's MCE index */ - pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); - idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); + idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); - mce_list = &pfvf->bcast_mce_list; - if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { + if (idx > (mce_idx + mce_list->max)) { dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, @@ -2298,20 +2378,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) return -EINVAL; } + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast = &nix_hw->mcast; mutex_lock(&mcast->mce_lock); - err = nix_update_mce_list(mce_list, pcifunc, add); + err = nix_update_mce_list_entry(mce_list, pcifunc, add); if (err) goto end; /* Disable MCAM entry in NPC */ if (!mce_list->count) { - rvu_npc_enable_bcast_entry(rvu, pcifunc, false); + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); goto end; } /* Dump the updated list to HW */ - idx = pfvf->bcast_mce_idx; + idx = mce_idx; last_idx = idx + mce_list->count - 1; hlist_for_each_entry(mce, &mce_list->head, node) { if (idx > last_idx) @@ -2332,7 +2418,71 @@ end: return err; } -static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) +void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, + struct nix_mce_list **mce_list, int *mce_idx) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_pfvf *pfvf; + + if (!hw->cap.nix_rx_multicast || + !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { + *mce_list = NULL; + *mce_idx = 0; + return; + } + + /* Get this PF/VF func's MCE index */ + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + + if (type == NIXLF_BCAST_ENTRY) { + *mce_list = &pfvf->bcast_mce_list; + *mce_idx = pfvf->bcast_mce_idx; + } else if (type == NIXLF_ALLMULTI_ENTRY) { + *mce_list = &pfvf->mcast_mce_list; + *mce_idx = pfvf->mcast_mce_idx; + } else if (type == NIXLF_PROMISC_ENTRY) { + *mce_list = &pfvf->promisc_mce_list; + *mce_idx = pfvf->promisc_mce_idx; + } else { + *mce_list = NULL; + *mce_idx = 0; + } +} + +static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, + int type, bool add) +{ + int err = 0, nixlf, blkaddr, mcam_index, mce_idx; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_mce_list *mce_list; + + /* skip multicast pkt replication for AF's VFs */ + if (is_afvf(pcifunc)) + return 0; + + if (!hw->cap.nix_rx_multicast) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return -EINVAL; + + nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); + + mcam_index = npc_get_nixlf_mcam_index(mcam, + pcifunc & ~RVU_PFVF_FUNC_MASK, + nixlf, type); + err = nix_update_mce_list(rvu, pcifunc, mce_list, + mce_idx, mcam_index, add); + return err; +} + +static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_mcast *mcast = &nix_hw->mcast; int err, pf, numvfs, idx; @@ -2355,11 +2505,18 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) if (pfvf->nix_blkaddr != nix_hw->blkaddr) continue; - /* Save the start MCE */ + /* save start idx of broadcast mce list */ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); - nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); + /* save start idx of multicast mce list */ + pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); + + /* save the start idx of promisc mce list */ + pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); + for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ pcifunc = (pf << RVU_PFVF_PF_SHIFT); @@ -2375,6 +2532,22 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) pcifunc, 0, true); if (err) return err; + + /* add dummy entries to multicast mce list */ + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->mcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, true); + if (err) + return err; + + /* add dummy entries to promisc mce list */ + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->promisc_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, true); + if (err) + return err; } } return 0; @@ -2421,7 +2594,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) mutex_init(&mcast->mce_lock); - return nix_setup_bcast_tables(rvu, nix_hw); + return nix_setup_mce_tables(rvu, nix_hw); } static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) @@ -3035,15 +3208,24 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, pfvf = rvu_get_pfvf(rvu, pcifunc); - /* VF can't overwrite admin(PF) changes */ - if (from_vf && pfvf->pf_set_vf_cfg) + /* untrusted VF can't overwrite admin(PF) changes */ + if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && + (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { + dev_warn(rvu->dev, + "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); return -EPERM; + } ether_addr_copy(pfvf->mac_addr, req->mac_addr); rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); + if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) + ether_addr_copy(pfvf->default_mac, req->mac_addr); + + rvu_switch_update_rules(rvu, pcifunc); + return 0; } @@ -3067,30 +3249,75 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) { - bool allmulti = false, disable_promisc = false; + bool allmulti, promisc, nix_rx_multicast; u16 pcifunc = req->hdr.pcifunc; - int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; + int nixlf, err; - err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + pfvf = rvu_get_pfvf(rvu, pcifunc); + promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; + allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; + pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; + + nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; + + if (is_vf(pcifunc) && !nix_rx_multicast && + (promisc || allmulti)) { + dev_warn_ratelimited(rvu->dev, + "VF promisc/multicast not supported\n"); + return 0; + } + + /* untrusted VF can't configure promisc/allmulti */ + if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && + (promisc || allmulti)) + return 0; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; - pfvf = rvu_get_pfvf(rvu, pcifunc); + if (nix_rx_multicast) { + /* add/del this PF_FUNC to/from mcast pkt replication list */ + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, + allmulti); + if (err) { + dev_err(rvu->dev, + "Failed to update pcifunc 0x%x to multicast list\n", + pcifunc); + return err; + } - if (req->mode & NIX_RX_MODE_PROMISC) - allmulti = false; - else if (req->mode & NIX_RX_MODE_ALLMULTI) - allmulti = true; - else - disable_promisc = true; + /* add/del this PF_FUNC to/from promisc pkt replication list */ + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, + promisc); + if (err) { + dev_err(rvu->dev, + "Failed to update pcifunc 0x%x to promisc list\n", + pcifunc); + return err; + } + } - if (disable_promisc) - rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); - else + /* install/uninstall allmulti entry */ + if (allmulti) { + rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base); + } else { + if (!nix_rx_multicast) + rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); + } + + /* install/uninstall promisc entry */ + if (promisc) { rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, - pfvf->rx_chan_cnt, allmulti); + pfvf->rx_chan_cnt); + } else { + if (!nix_rx_multicast) + rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); + } + return 0; } @@ -3470,6 +3697,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); + if (err) + return err; + err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); if (err) return err; @@ -3523,6 +3754,40 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | ltdefs->rx_isctp.ltype_mask); + if (!is_rvu_otx2(rvu)) { + /* Enable APAD calculation for other protocols + * matching APAD0 and APAD1 lt def registers. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, + (ltdefs->rx_apad0.valid << 11) | + (ltdefs->rx_apad0.lid << 8) | + (ltdefs->rx_apad0.ltype_match << 4) | + ltdefs->rx_apad0.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, + (ltdefs->rx_apad1.valid << 11) | + (ltdefs->rx_apad1.lid << 8) | + (ltdefs->rx_apad1.ltype_match << 4) | + ltdefs->rx_apad1.ltype_mask); + + /* Receive ethertype defination register defines layer + * information in NPC_RESULT_S to identify the Ethertype + * location in L2 header. Used for Ethertype overwriting + * in inline IPsec flow. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), + (ltdefs->rx_et[0].offset << 12) | + (ltdefs->rx_et[0].valid << 11) | + (ltdefs->rx_et[0].lid << 8) | + (ltdefs->rx_et[0].ltype_match << 4) | + ltdefs->rx_et[0].ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), + (ltdefs->rx_et[1].offset << 12) | + (ltdefs->rx_et[1].valid << 11) | + (ltdefs->rx_et[1].lid << 8) | + (ltdefs->rx_et[1].ltype_match << 4) | + ltdefs->rx_et[1].ltype_mask); + } + err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); if (err) return err; @@ -3584,10 +3849,11 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, kfree(txsch->schq.bmap); } + nix_ipolicer_freemem(nix_hw); + vlan = &nix_hw->txvlan; kfree(vlan->rsrc.bmap); mutex_destroy(&vlan->rsrc_lock); - devm_kfree(rvu->dev, vlan->entry2pfvf_map); mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); @@ -3614,6 +3880,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; int nixlf, err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); @@ -3624,6 +3891,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, npc_mcam_enable_flows(rvu, pcifunc); + pfvf = rvu_get_pfvf(rvu, pcifunc); + set_bit(NIXLF_INITIALIZED, &pfvf->flags); + + rvu_switch_update_rules(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } @@ -3631,6 +3903,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; int nixlf, err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); @@ -3639,6 +3912,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + pfvf = rvu_get_pfvf(rvu, pcifunc); + clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + return rvu_cgx_start_stop_io(rvu, pcifunc, false); } @@ -3657,6 +3933,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); + clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_cgx_start_stop_io(rvu, pcifunc, false); if (pfvf->sq_ctx) { @@ -3681,6 +3959,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) } nix_ctx_free(rvu, pfvf); + + nix_free_all_bandprof(rvu, pcifunc); } #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) @@ -3789,3 +4069,586 @@ void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) if (from_vf) ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); } + +/* NIX ingress policers or bandwidth profiles APIs */ +static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) +{ + struct npc_lt_def_cfg defs, *ltdefs; + + ltdefs = &defs; + memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); + + /* Extract PCP and DEI fields from outer VLAN from byte offset + * 2 from the start of LB_PTR (ie TAG). + * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN + * fields are considered when 'Tunnel enable' is set in profile. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, + (2UL << 12) | (ltdefs->ovlan.lid << 8) | + (ltdefs->ovlan.ltype_match << 4) | + ltdefs->ovlan.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, + (2UL << 12) | (ltdefs->ivlan.lid << 8) | + (ltdefs->ivlan.ltype_match << 4) | + ltdefs->ivlan.ltype_mask); + + /* DSCP field in outer and tunneled IPv4 packets */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, + (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | + (ltdefs->rx_oip4.ltype_match << 4) | + ltdefs->rx_oip4.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, + (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | + (ltdefs->rx_iip4.ltype_match << 4) | + ltdefs->rx_iip4.ltype_mask); + + /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, + (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | + (ltdefs->rx_oip6.ltype_match << 4) | + ltdefs->rx_oip6.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, + (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | + (ltdefs->rx_iip6.ltype_match << 4) | + ltdefs->rx_iip6.ltype_mask); +} + +static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, + int layer, int prof_idx) +{ + struct nix_cn10k_aq_enq_req aq_req; + int rc; + + memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + + aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); + aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req.op = NIX_AQ_INSTOP_INIT; + + /* Context is all zeros, submit to AQ */ + rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)&aq_req, NULL); + if (rc) + dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", + layer, prof_idx); + return rc; +} + +static int nix_setup_ipolicers(struct rvu *rvu, + struct nix_hw *nix_hw, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct nix_ipolicer *ipolicer; + int err, layer, prof_idx; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + if (!(cfg & BIT_ULL(61))) { + hw->cap.ipolicer = false; + return 0; + } + + hw->cap.ipolicer = true; + nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, + sizeof(*ipolicer), GFP_KERNEL); + if (!nix_hw->ipolicer) + return -ENOMEM; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); + + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + ipolicer = &nix_hw->ipolicer[layer]; + switch (layer) { + case BAND_PROF_LEAF_LAYER: + ipolicer->band_prof.max = cfg & 0XFFFF; + break; + case BAND_PROF_MID_LAYER: + ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; + break; + case BAND_PROF_TOP_LAYER: + ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; + break; + } + + if (!ipolicer->band_prof.max) + continue; + + err = rvu_alloc_bitmap(&ipolicer->band_prof); + if (err) + return err; + + ipolicer->pfvf_map = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + if (!ipolicer->pfvf_map) + return -ENOMEM; + + ipolicer->match_id = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + if (!ipolicer->match_id) + return -ENOMEM; + + for (prof_idx = 0; + prof_idx < ipolicer->band_prof.max; prof_idx++) { + /* Set AF as current owner for INIT ops to succeed */ + ipolicer->pfvf_map[prof_idx] = 0x00; + + /* There is no enable bit in the profile context, + * so no context disable. So let's INIT them here + * so that PF/VF later on have to just do WRITE to + * setup policer rates and config. + */ + err = nix_init_policer_context(rvu, nix_hw, + layer, prof_idx); + if (err) + return err; + } + + /* Allocate memory for maintaining ref_counts for MID level + * profiles, this will be needed for leaf layer profiles' + * aggregation. + */ + if (layer != BAND_PROF_MID_LAYER) + continue; + + ipolicer->ref_count = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + } + + /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ + rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); + + nix_config_rx_pkt_policer_precolor(rvu, blkaddr); + + return 0; +} + +static void nix_ipolicer_freemem(struct nix_hw *nix_hw) +{ + struct nix_ipolicer *ipolicer; + int layer; + + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + ipolicer = &nix_hw->ipolicer[layer]; + + if (!ipolicer->band_prof.max) + continue; + + kfree(ipolicer->band_prof.bmap); + } +} + +static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, + struct nix_hw *nix_hw, u16 pcifunc) +{ + struct nix_ipolicer *ipolicer; + int layer, hi_layer, prof_idx; + + /* Bits [15:14] in profile index represent layer */ + layer = (req->qidx >> 14) & 0x03; + prof_idx = req->qidx & 0x3FFF; + + ipolicer = &nix_hw->ipolicer[layer]; + if (prof_idx >= ipolicer->band_prof.max) + return -EINVAL; + + /* Check if the profile is allocated to the requesting PCIFUNC or not + * with the exception of AF. AF is allowed to read and update contexts. + */ + if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) + return -EINVAL; + + /* If this profile is linked to higher layer profile then check + * if that profile is also allocated to the requesting PCIFUNC + * or not. + */ + if (!req->prof.hl_en) + return 0; + + /* Leaf layer profile can link only to mid layer and + * mid layer to top layer. + */ + if (layer == BAND_PROF_LEAF_LAYER) + hi_layer = BAND_PROF_MID_LAYER; + else if (layer == BAND_PROF_MID_LAYER) + hi_layer = BAND_PROF_TOP_LAYER; + else + return -EINVAL; + + ipolicer = &nix_hw->ipolicer[hi_layer]; + prof_idx = req->prof.band_prof_id; + if (prof_idx >= ipolicer->band_prof.max || + ipolicer->pfvf_map[prof_idx] != pcifunc) + return -EINVAL; + + return 0; +} + +int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, + struct nix_bandprof_alloc_req *req, + struct nix_bandprof_alloc_rsp *rsp) +{ + int blkaddr, layer, prof, idx, err; + u16 pcifunc = req->hdr.pcifunc; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + if (!req->prof_count[layer]) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + for (idx = 0; idx < req->prof_count[layer]; idx++) { + /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; + + prof = rvu_alloc_rsrc(&ipolicer->band_prof); + if (prof < 0) + break; + rsp->prof_count[layer]++; + rsp->prof_idx[layer][idx] = prof; + ipolicer->pfvf_map[prof] = pcifunc; + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, layer, prof_idx, err; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + /* Free all the profiles allocated to the PCIFUNC */ + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + ipolicer = &nix_hw->ipolicer[layer]; + + for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { + if (ipolicer->pfvf_map[prof_idx] != pcifunc) + continue; + + /* Clear ratelimit aggregation, if any */ + if (layer == BAND_PROF_LEAF_LAYER && + ipolicer->match_id[prof_idx]) + nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); + + ipolicer->pfvf_map[prof_idx] = 0x00; + ipolicer->match_id[prof_idx] = 0; + rvu_free_rsrc(&ipolicer->band_prof, prof_idx); + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, + struct nix_bandprof_free_req *req, + struct msg_rsp *rsp) +{ + int blkaddr, layer, prof_idx, idx, err; + u16 pcifunc = req->hdr.pcifunc; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (req->free_all) + return nix_free_all_bandprof(rvu, pcifunc); + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + /* Free the requested profile indices */ + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + if (!req->prof_count[layer]) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + for (idx = 0; idx < req->prof_count[layer]; idx++) { + prof_idx = req->prof_idx[layer][idx]; + if (prof_idx >= ipolicer->band_prof.max || + ipolicer->pfvf_map[prof_idx] != pcifunc) + continue; + + /* Clear ratelimit aggregation, if any */ + if (layer == BAND_PROF_LEAF_LAYER && + ipolicer->match_id[prof_idx]) + nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); + + ipolicer->pfvf_map[prof_idx] = 0x00; + ipolicer->match_id[prof_idx] = 0; + rvu_free_rsrc(&ipolicer->band_prof, prof_idx); + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_cn10k_aq_enq_req *aq_req, + struct nix_cn10k_aq_enq_rsp *aq_rsp, + u16 pcifunc, u8 ctype, u32 qidx) +{ + memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req->hdr.pcifunc = pcifunc; + aq_req->ctype = ctype; + aq_req->op = NIX_AQ_INSTOP_READ; + aq_req->qidx = qidx; + + return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)aq_req, + (struct nix_aq_enq_rsp *)aq_rsp); +} + +static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_cn10k_aq_enq_req *aq_req, + struct nix_cn10k_aq_enq_rsp *aq_rsp, + u32 leaf_prof, u16 mid_prof) +{ + memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req->hdr.pcifunc = 0x00; + aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req->op = NIX_AQ_INSTOP_WRITE; + aq_req->qidx = leaf_prof; + + aq_req->prof.band_prof_id = mid_prof; + aq_req->prof_mask.band_prof_id = GENMASK(6, 0); + aq_req->prof.hl_en = 1; + aq_req->prof_mask.hl_en = 1; + + return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)aq_req, + (struct nix_aq_enq_rsp *)aq_rsp); +} + +int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, + u16 rq_idx, u16 match_id) +{ + int leaf_prof, mid_prof, leaf_match; + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + int blkaddr, idx, rc; + + if (!rvu->hw->cap.ipolicer) + return 0; + + rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (rc) + return rc; + + /* Fetch the RQ's context to see if policing is enabled */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, + NIX_AQ_CTYPE_RQ, rq_idx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", + __func__, rq_idx, pcifunc); + return rc; + } + + if (!aq_rsp.rq.policer_ena) + return 0; + + /* Get the bandwidth profile ID mapped to this RQ */ + leaf_prof = aq_rsp.rq.band_prof_id; + + ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; + ipolicer->match_id[leaf_prof] = match_id; + + /* Check if any other leaf profile is marked with same match_id */ + for (idx = 0; idx < ipolicer->band_prof.max; idx++) { + if (idx == leaf_prof) + continue; + if (ipolicer->match_id[idx] != match_id) + continue; + + leaf_match = idx; + break; + } + + if (idx == ipolicer->band_prof.max) + return 0; + + /* Fetch the matching profile's context to check if it's already + * mapped to a mid level profile. + */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_match); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_match); + return rc; + } + + ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; + if (aq_rsp.prof.hl_en) { + /* Get Mid layer prof index and map leaf_prof index + * also such that flows that are being steered + * to different RQs and marked with same match_id + * are rate limited in a aggregate fashion + */ + mid_prof = aq_rsp.prof.band_prof_id; + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_prof, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_prof, mid_prof); + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + goto exit; + } + + /* Allocate a mid layer profile and + * map both 'leaf_prof' and 'leaf_match' profiles to it. + */ + mutex_lock(&rvu->rsrc_lock); + mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); + if (mid_prof < 0) { + dev_err(rvu->dev, + "%s: Unable to allocate mid layer profile\n", __func__); + mutex_unlock(&rvu->rsrc_lock); + goto exit; + } + mutex_unlock(&rvu->rsrc_lock); + ipolicer->pfvf_map[mid_prof] = 0x00; + ipolicer->ref_count[mid_prof] = 0; + + /* Initialize mid layer profile same as 'leaf_prof' */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_prof); + goto exit; + } + + memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req.hdr.pcifunc = 0x00; + aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); + aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req.op = NIX_AQ_INSTOP_WRITE; + memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); + /* Clear higher layer enable bit in the mid profile, just in case */ + aq_req.prof.hl_en = 0; + aq_req.prof_mask.hl_en = 1; + + rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)&aq_req, NULL); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to INIT context of mid layer profile %d\n", + __func__, mid_prof); + goto exit; + } + + /* Map both leaf profiles to this mid layer profile */ + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_prof, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_prof, mid_prof); + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_match, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_match, mid_prof); + ipolicer->ref_count[mid_prof]--; + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + +exit: + return rc; +} + +/* Called with mutex rsrc_lock */ +static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, + u32 leaf_prof) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + struct nix_ipolicer *ipolicer; + u16 mid_prof; + int rc; + + mutex_unlock(&rvu->rsrc_lock); + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_prof); + + mutex_lock(&rvu->rsrc_lock); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_prof); + return; + } + + if (!aq_rsp.prof.hl_en) + return; + + mid_prof = aq_rsp.prof.band_prof_id; + ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; + ipolicer->ref_count[mid_prof]--; + /* If ref_count is zero, free mid layer profile */ + if (!ipolicer->ref_count[mid_prof]) { + ipolicer->pfvf_map[mid_prof] = 0x00; + rvu_free_rsrc(&ipolicer->band_prof, mid_prof); + } +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 0bc4529691ec..52b255426c22 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -19,7 +19,7 @@ #include "cgx.h" #include "npc_profile.h" -#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ +#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 @@ -27,6 +27,8 @@ #define NPC_KEX_CHAN_MASK 0xFFFULL #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL +#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) + static const char def_pfl_name[] = "default"; static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, @@ -212,8 +214,10 @@ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, */ if (type == NIXLF_BCAST_ENTRY) return index; - else if (type == NIXLF_PROMISC_ENTRY) + else if (type == NIXLF_ALLMULTI_ENTRY) return index + 1; + else if (type == NIXLF_PROMISC_ENTRY) + return index + 2; } return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); @@ -411,37 +415,50 @@ static void npc_fill_entryword(struct mcam_entry *entry, int idx, } } -static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, - int blkaddr, int index, - struct mcam_entry *entry) +static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pf_func) +{ + int bank, nixlf, index; + + /* get ucast entry rule entry index */ + nix_get_nixlf(rvu, pf_func, &nixlf, NULL); + index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, + NIXLF_UCAST_ENTRY); + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + return rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); +} + +static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, struct mcam_entry *entry, + bool *enable) { u16 owner, target_func; struct rvu_pfvf *pfvf; - int bank, nixlf; u64 rx_action; owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; - /* return incase target is PF or LBK or rule owner is not PF */ - if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || + /* do nothing when target is LBK/PF or owner is not PF */ + if (is_pffunc_af(owner) || is_afvf(target_func) || + (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; + /* save entry2target_pffunc */ pfvf = rvu_get_pfvf(rvu, target_func); mcam->entry2target_pffunc[index] = target_func; - /* return if nixlf is not attached or initialized */ - if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule) - return; - /* get VF ucast entry rule */ - nix_get_nixlf(rvu, target_func, &nixlf, NULL); - index = npc_get_nixlf_mcam_index(mcam, target_func, - nixlf, NIXLF_UCAST_ENTRY); - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); + /* don't enable rule when nixlf not attached or initialized */ + if (!(is_nixlf_attached(rvu, target_func) && + test_bit(NIXLF_INITIALIZED, &pfvf->flags))) + *enable = false; - rx_action = rvu_read64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); + /* copy VF default entry action to the VF mcam entry */ + rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, + target_func); if (rx_action) entry->action = rx_action; } @@ -452,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, { int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex; + u8 tx_intf_mask = ~intf & 0x3; + u8 tx_intf = intf; u64 cam0, cam1; actbank = bank; /* Save bank id, to set action later on */ @@ -472,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, */ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ + if (is_npc_intf_tx(intf)) { + /* Last bit must be set and rest don't care + * for TX interfaces + */ + tx_intf_mask = 0x1; + tx_intf = intf & tx_intf_mask; + tx_intf_mask = ~tx_intf & tx_intf_mask; + } + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), - intf); + tx_intf); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), - ~intf & 0x3); + tx_intf_mask); /* Set the match key */ npc_get_keyword(entry, kw, &cam0, &cam1); @@ -493,10 +521,9 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); } - /* copy VF default entry action to the VF mcam entry */ + /* PF installing VF rule */ if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries) - npc_get_default_entry_action(rvu, mcam, blkaddr, actindex, - entry); + npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable); /* Set 'action' */ rvu_write64(rvu, blkaddr, @@ -635,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ @@ -647,30 +675,32 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, u64 chan, u8 chan_cnt, - bool allmulti) + int nixlf, u64 chan, u8 chan_cnt) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; - u8 mac_addr[ETH_ALEN] = { 0 }; struct nix_rx_action action; u64 relaxed_mask; - /* Only PF or AF VF can add a promiscuous entry */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) + if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; - *(u64 *)&action = 0x00; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); + if (is_cgx_vf(rvu, pcifunc)) + index = npc_get_nixlf_mcam_index(mcam, + pcifunc & ~RVU_PFVF_FUNC_MASK, + nixlf, NIXLF_PROMISC_ENTRY); + /* If the corresponding PF's ucast action is RSS, * use the same action for promisc also */ @@ -678,19 +708,20 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) *(u64 *)&action = npc_get_mcam_action(rvu, mcam, - blkaddr, ucast_idx); + blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0x00; action.op = NIX_RX_ACTIONOP_UCAST; - action.pf_func = pcifunc; } - if (allmulti) { - mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ - ether_addr_copy(req.packet.dmac, mac_addr); - ether_addr_copy(req.mask.dmac, mac_addr); - req.features = BIT_ULL(NPC_DMAC); + /* RX_ACTION set to MCAST for CGX PF's */ + if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && + is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_MCAST; + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + action.index = pfvf->promisc_mce_idx; } req.chan_mask = 0xFFFU; @@ -718,8 +749,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, - int nixlf, bool enable) +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; @@ -728,25 +759,14 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, if (blkaddr < 0) return; - /* Only PF's have a promiscuous entry */ - if (pcifunc & RVU_PFVF_FUNC_MASK) - return; + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } -void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) -{ - npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); -} - -void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) -{ - npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); -} - void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { @@ -756,8 +776,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, index; - u32 req_index = 0; - u8 op; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -770,7 +788,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, /* If pkt replication is not supported, * then only PF is allowed to add a bcast match entry. */ - if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK) + if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) return; /* Get 'pcifunc' of PF device */ @@ -784,27 +802,27 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, * so install entry with UCAST action, so that PF * receives all broadcast packets. */ - op = NIX_RX_ACTIONOP_UCAST; + req.op = NIX_RX_ACTIONOP_UCAST; } else { - op = NIX_RX_ACTIONOP_MCAST; - req_index = pfvf->bcast_mce_idx; + req.op = NIX_RX_ACTIONOP_MCAST; + req.index = pfvf->bcast_mce_idx; } eth_broadcast_addr((u8 *)&req.packet.dmac); eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.entry = index; - req.op = op; req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc; - req.index = req_index; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) +void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; @@ -816,7 +834,104 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; - index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, + NIXLF_BCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + +void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + u64 chan) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr, ucast_idx, index; + u8 mac_addr[ETH_ALEN] = { 0 }; + struct nix_rx_action action; + struct rvu_pfvf *pfvf; + u16 vf_func; + + /* Only CGX PF/VF can add allmulticast entry */ + if (is_afvf(pcifunc)) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Get 'pcifunc' of PF device */ + vf_func = pcifunc & RVU_PFVF_FUNC_MASK; + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + pfvf = rvu_get_pfvf(rvu, pcifunc); + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_ALLMULTI_ENTRY); + + /* If the corresponding PF's ucast action is RSS, + * use the same action for multicast entry also + */ + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) + *(u64 *)&action = npc_get_mcam_action(rvu, mcam, + blkaddr, ucast_idx); + + if (action.op != NIX_RX_ACTIONOP_RSS) { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } + + /* RX_ACTION set to MCAST for CGX PF's */ + if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_MCAST; + action.index = pfvf->mcast_mce_idx; + } + + mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ + ether_addr_copy(req.packet.dmac, mac_addr); + ether_addr_copy(req.mask.dmac, mac_addr); + req.features = BIT_ULL(NPC_DMAC); + + /* For cn10k the upper two bits of the channel number are + * cpt channel number. with masking out these bits in the + * mcam entry, same entry used for NIX will allow packets + * received from cpt for parsing. + */ + if (!is_rvu_otx2(rvu)) + req.chan_mask = NIX_CHAN_CPT_X2P_MASK; + else + req.chan_mask = 0xFFFU; + + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.entry = index; + req.op = action.op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc | vf_func; + req.index = action.index; + req.match_id = action.match_id; + req.flow_key_alg = action.flow_key_alg; + + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, + bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, + NIXLF_ALLMULTI_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } @@ -858,6 +973,7 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf; @@ -913,7 +1029,8 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, /* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well */ - if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { + if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && + is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { bank = npc_get_bank(mcam, index); index &= (mcam->banksize - 1); @@ -923,12 +1040,47 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, } } +void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, int type, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_mce_list *mce_list; + int index, blkaddr, mce_idx; + struct rvu_pfvf *pfvf; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, + nixlf, type); + + /* disable MCAM entry when packet replication is not supported by hw */ + if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + return; + } + + /* return incase mce list is not enabled */ + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && + type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) + return; + + nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); + + nix_update_mce_list(rvu, pcifunc, mce_list, + mce_idx, index, enable); + if (enable) + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; - struct nix_rx_action action; - int index, bank, blkaddr; + int index, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -939,48 +1091,33 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, nixlf, NIXLF_UCAST_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); - /* For PF, ena/dis promisc and bcast MCAM match entries. - * For VFs add/delete from bcast list when RX multicast - * feature is present. + /* Nothing to do for VFs, on platforms where pkt replication + * is not supported */ - if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast) + if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) return; - /* For bcast, enable/disable only if it's action is not - * packet replication, incase if action is replication - * then this PF/VF's nixlf is removed from bcast replication - * list. - */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, - nixlf, NIXLF_BCAST_ENTRY); - bank = npc_get_bank(mcam, index); - *(u64 *)&action = rvu_read64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); - - /* VFs will not have BCAST entry */ - if (action.op != NIX_RX_ACTIONOP_MCAST && - !(pcifunc & RVU_PFVF_FUNC_MASK)) { - npc_enable_mcam_entry(rvu, mcam, - blkaddr, index, enable); - } else { - nix_update_bcast_mce_list(rvu, pcifunc, enable); - /* Enable PF's BCAST entry for packet replication */ - rvu_npc_enable_bcast_entry(rvu, pcifunc, enable); - } - - if (enable) - rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); - else - rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); + /* add/delete pf_func to broadcast MCE list */ + npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, + NIXLF_BCAST_ENTRY, enable); } void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { npc_enadis_default_entries(rvu, pcifunc, nixlf, false); + + /* Delete multicast and promisc MCAM entries */ + npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, + NIXLF_ALLMULTI_ENTRY, false); + npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, + NIXLF_PROMISC_ENTRY, false); } void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { + /* Enables only broadcast match entry. Promisc/Allmulti are enabled + * in set_rx_mode mbox handler. + */ npc_enadis_default_entries(rvu, pcifunc, nixlf, true); } @@ -1000,7 +1137,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) /* Disable MCAM entries directing traffic to this 'pcifunc' */ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (is_npc_intf_rx(rule->intf) && - rule->rx_action.pf_func == pcifunc) { + rule->rx_action.pf_func == pcifunc && + rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, false); rule->enable = false; @@ -1134,6 +1272,30 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, } } +static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, + u64 *size) +{ + u64 prfl_addr, prfl_sz; + + if (!rvu->fwdata) + return -EINVAL; + + prfl_addr = rvu->fwdata->mcam_addr; + prfl_sz = rvu->fwdata->mcam_sz; + + if (!prfl_addr || !prfl_sz) + return -EINVAL; + + *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); + if (!(*prfl_img_addr)) + return -ENOMEM; + + *size = prfl_sz; + + return 0; +} + +/* strtoull of "mkexprof" with base:36 */ #define MKEX_END_SIGN 0xdeadbeef static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, @@ -1141,26 +1303,21 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, { struct device *dev = &rvu->pdev->dev; struct npc_mcam_kex *mcam_kex; - void *mkex_prfl_addr = NULL; - u64 prfl_addr, prfl_sz; + void __iomem *mkex_prfl_addr = NULL; + u64 prfl_sz; + int ret; /* If user not selected mkex profile */ - if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) + if (rvu->kpu_fwdata_sz || + !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) goto program_mkex; - if (!rvu->fwdata) + /* Setting up the mapping for mkex profile image */ + ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); + if (ret < 0) goto program_mkex; - prfl_addr = rvu->fwdata->mcam_addr; - prfl_sz = rvu->fwdata->mcam_sz; - if (!prfl_addr || !prfl_sz) - goto program_mkex; - - mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC); - if (!mkex_prfl_addr) - goto program_mkex; - - mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr; + mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { /* Compare with mkex mod_param name string */ @@ -1186,7 +1343,7 @@ program_mkex: /* Program selected mkex profile */ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); if (mkex_prfl_addr) - memunmap(mkex_prfl_addr); + iounmap(mkex_prfl_addr); } static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, @@ -1263,6 +1420,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, const struct npc_kpu_profile *profile) { int entry, num_entries, max_entries; + u64 entry_mask; if (profile->cam_entries != profile->action_entries) { dev_err(rvu->dev, @@ -1286,8 +1444,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, /* Enable all programmed entries */ num_entries = min_t(int, profile->action_entries, profile->cam_entries); + entry_mask = enable_mask(num_entries); + /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ + if (!rvu->kpu.custom) + entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); rvu_write64(rvu, blkaddr, - NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries)); + NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); if (num_entries > 64) { rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRY_DISX(kpu, 1), @@ -1300,6 +1462,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) { + profile->custom = 0; profile->name = def_pfl_name; profile->version = NPC_KPU_PROFILE_VER; profile->ikpu = ikpu_action_entries; @@ -1312,10 +1475,245 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) return 0; } +static int npc_apply_custom_kpu(struct rvu *rvu, + struct npc_kpu_profile_adapter *profile) +{ + size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; + struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; + struct npc_kpu_profile_action *action; + struct npc_kpu_profile_cam *cam; + struct npc_kpu_fwdata *fw_kpu; + int entries; + u16 kpu, entry; + + if (rvu->kpu_fwdata_sz < hdr_sz) { + dev_warn(rvu->dev, "Invalid KPU profile size\n"); + return -EINVAL; + } + if (le64_to_cpu(fw->signature) != KPU_SIGN) { + dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", + fw->signature); + return -EINVAL; + } + /* Verify if the using known profile structure */ + if (NPC_KPU_VER_MAJ(profile->version) > + NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { + dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", + NPC_KPU_VER_MAJ(profile->version), + NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); + return -EINVAL; + } + /* Verify if profile is aligned with the required kernel changes */ + if (NPC_KPU_VER_MIN(profile->version) < + NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { + dev_warn(rvu->dev, + "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", + NPC_KPU_VER_MAJ(profile->version), + NPC_KPU_VER_MIN(profile->version), + NPC_KPU_VER_PATCH(profile->version), + NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), + NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), + NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); + return -EINVAL; + } + /* Verify if profile fits the HW */ + if (fw->kpus > profile->kpus) { + dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, + profile->kpus); + return -EINVAL; + } + + profile->custom = 1; + profile->name = fw->name; + profile->version = le64_to_cpu(fw->version); + profile->mkex = &fw->mkex; + profile->lt_def = &fw->lt_def; + + for (kpu = 0; kpu < fw->kpus; kpu++) { + fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); + if (fw_kpu->entries > KPU_MAX_CST_ENT) + dev_warn(rvu->dev, + "Too many custom entries on KPU%d: %d > %d\n", + kpu, fw_kpu->entries, KPU_MAX_CST_ENT); + entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); + cam = (struct npc_kpu_profile_cam *)fw_kpu->data; + offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); + action = (struct npc_kpu_profile_action *)(fw->data + offset); + offset += fw_kpu->entries * sizeof(*action); + if (rvu->kpu_fwdata_sz < hdr_sz + offset) { + dev_warn(rvu->dev, + "Profile size mismatch on KPU%i parsing.\n", + kpu + 1); + return -EINVAL; + } + for (entry = 0; entry < entries; entry++) { + profile->kpu[kpu].cam[entry] = cam[entry]; + profile->kpu[kpu].action[entry] = action[entry]; + } + } + + return 0; +} + +static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, + u64 prfl_sz, const char *kpu_profile) +{ + struct npc_kpu_profile_fwdata *kpu_data = NULL; + int rc = -EINVAL; + + kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; + if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && + !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { + dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", + kpu_profile); + rvu->kpu_fwdata = kpu_data; + rvu->kpu_fwdata_sz = prfl_sz; + rvu->kpu_prfl_addr = prfl_addr; + rc = 0; + } + + return rc; +} + +static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, + const char *kpu_profile) +{ + struct npc_coalesced_kpu_prfl *img_data = NULL; + int i = 0, rc = -EINVAL; + void __iomem *kpu_prfl_addr; + u16 offset; + + img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; + if (le64_to_cpu(img_data->signature) == KPU_SIGN && + !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { + /* Loaded profile is a single KPU profile. */ + rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, + prfl_sz, kpu_profile); + goto done; + } + + /* Loaded profile is coalesced image, offset of first KPU profile.*/ + offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + + (img_data->num_prfl * sizeof(uint16_t)); + /* Check if mapped image is coalesced image. */ + while (i < img_data->num_prfl) { + /* Profile image offsets are rounded up to next 8 multiple.*/ + offset = ALIGN_8B_CEIL(offset); + kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + + offset); + rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, + img_data->prfl_sz[i], kpu_profile); + if (!rc) + break; + /* Calculating offset of profile image based on profile size.*/ + offset += img_data->prfl_sz[i]; + i++; + } +done: + return rc; +} + +static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) +{ + int ret = -EINVAL; + u64 prfl_sz; + + /* Setting up the mapping for NPC profile image */ + ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); + if (ret < 0) + goto done; + + /* Detect if profile is coalesced or single KPU profile and load */ + ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); + if (ret == 0) + goto done; + + /* Cleaning up if KPU profile image from fwdata is not valid. */ + if (rvu->kpu_prfl_addr) { + iounmap(rvu->kpu_prfl_addr); + rvu->kpu_prfl_addr = NULL; + rvu->kpu_fwdata_sz = 0; + rvu->kpu_fwdata = NULL; + } + +done: + return ret; +} + static void npc_load_kpu_profile(struct rvu *rvu) { struct npc_kpu_profile_adapter *profile = &rvu->kpu; + const char *kpu_profile = rvu->kpu_pfl_name; + const struct firmware *fw = NULL; + bool retry_fwdb = false; + + /* If user not specified profile customization */ + if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) + goto revert_to_default; + /* First prepare default KPU, then we'll customize top entries. */ + npc_prepare_default_kpu(profile); + + /* Order of preceedence for load loading NPC profile (high to low) + * Firmware binary in filesystem. + * Firmware database method. + * Default KPU profile. + */ + if (!request_firmware(&fw, kpu_profile, rvu->dev)) { + dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", + kpu_profile); + rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); + if (rvu->kpu_fwdata) { + memcpy(rvu->kpu_fwdata, fw->data, fw->size); + rvu->kpu_fwdata_sz = fw->size; + } + release_firmware(fw); + retry_fwdb = true; + goto program_kpu; + } + +load_image_fwdb: + /* Loading the KPU profile using firmware database */ + if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) + goto revert_to_default; + +program_kpu: + /* Apply profile customization if firmware was loaded. */ + if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { + /* If image from firmware filesystem fails to load or invalid + * retry with firmware database method. + */ + if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { + /* Loading image from firmware database failed. */ + if (rvu->kpu_prfl_addr) { + iounmap(rvu->kpu_prfl_addr); + rvu->kpu_prfl_addr = NULL; + } else { + kfree(rvu->kpu_fwdata); + } + rvu->kpu_fwdata = NULL; + rvu->kpu_fwdata_sz = 0; + if (retry_fwdb) { + retry_fwdb = false; + goto load_image_fwdb; + } + } + + dev_warn(rvu->dev, + "Can't load KPU profile %s. Using default.\n", + kpu_profile); + kfree(rvu->kpu_fwdata); + rvu->kpu_fwdata = NULL; + goto revert_to_default; + } + dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", + profile->name, NPC_KPU_VER_MAJ(profile->version), + NPC_KPU_VER_MIN(profile->version), + NPC_KPU_VER_PATCH(profile->version)); + + return; + +revert_to_default: npc_prepare_default_kpu(profile); } @@ -1323,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; int num_pkinds, num_kpus, idx; - struct npc_pkind *pkind; /* Disable all KPUs and their entries */ for (idx = 0; idx < hw->npc_kpus; idx++) { @@ -1341,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) * Check HW max count to avoid configuring junk or * writing to unsupported CSR addresses. */ - pkind = &hw->pkind; num_pkinds = rvu->kpu.pkinds; - num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); + num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); for (idx = 0; idx < num_pkinds; idx++) npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); @@ -1361,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err; + u16 index; + int cntr; u64 cfg; /* Actual number of MCAM entries vary by entry size */ @@ -1461,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (!mcam->entry2target_pffunc) goto free_mem; + for (index = 0; index < mcam->bmap_entries; index++) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + for (cntr = 0; cntr < mcam->counters.max; cntr++) + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mutex_init(&mcam->lock); return 0; @@ -1483,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) if (npc_const1 & BIT_ULL(63)) npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); - pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL; + pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; + hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; hw->npc_kpu_entries = npc_const1 & 0xFFFULL; hw->npc_kpus = (npc_const >> 8) & 0x1FULL; hw->npc_intfs = npc_const & 0xFULL; @@ -1594,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu) err = rvu_alloc_bitmap(&pkind->rsrc); if (err) return err; + /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', + * no need to configure PKIND for all LBKs separately. + */ + rvu_alloc_rsrc(&pkind->rsrc); /* Allocate mem for pkind to PF and channel mapping info */ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, @@ -1654,6 +2065,10 @@ void rvu_npc_freemem(struct rvu *rvu) kfree(pkind->rsrc.bmap); kfree(mcam->counters.bmap); + if (rvu->kpu_prfl_addr) + iounmap(rvu->kpu_prfl_addr); + else + kfree(rvu->kpu_fwdata); mutex_destroy(&mcam->lock); } @@ -2149,8 +2564,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, rsp->free_count = 0; /* Check if ref_entry is within range */ - if (req->priority && req->ref_entry >= mcam->bmap_entries) + if (req->priority && req->ref_entry >= mcam->bmap_entries) { + dev_err(rvu->dev, "%s: reference entry %d is out of range\n", + __func__, req->ref_entry); return NPC_MCAM_INVALID_REQ; + } /* ref_entry can't be '0' if requested priority is high. * Can't be last entry if requested priority is low. @@ -2163,11 +2581,15 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, /* Since list of allocated indices needs to be sent to requester, * max number of non-contiguous entries per mbox msg is limited. */ - if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) + if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { + dev_err(rvu->dev, + "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", + __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); return NPC_MCAM_INVALID_REQ; + } /* Alloc request from PFFUNC with no NIXLF attached should be denied */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_ALLOC_DENIED; return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); @@ -2187,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* Free request from PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); @@ -2199,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, if (rc) goto exit; - mcam->entry2pfvf_map[req->entry] = 0; + mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); @@ -2284,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - pcifunc)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } @@ -2441,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* If the request is from a PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated counter IDs needs to be sent to requester, @@ -2686,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (rc) { /* Free allocated MCAM entry */ mutex_lock(&mcam->lock); - mcam->entry2pfvf_map[entry] = 0; + mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; npc_mcam_clear_bit(mcam, entry); mutex_unlock(&mcam->lock); return rc; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 7f35b62eea13..5c01cf4a9c5b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -123,11 +123,8 @@ static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) static bool npc_is_same(struct npc_key_field *input, struct npc_key_field *field) { - int ret; - - ret = memcmp(&input->layer_mdata, &field->layer_mdata, - sizeof(struct npc_layer_mdata)); - return ret == 0; + return memcmp(&input->layer_mdata, &field->layer_mdata, + sizeof(struct npc_layer_mdata)) == 0; } static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, @@ -913,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, - struct npc_install_flow_req *req, u16 target) + struct npc_install_flow_req *req, + u16 target, bool pf_set_vfs_mac) { + struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; - u64 chan_mask; - chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, - NIX_INTF_RX); + if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) + req->chan_mask = 0x0; /* Do not care channel */ + + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, + 0, NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -952,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; + u64 mask = ~0ULL; + + /* If AF is installing then do not care about + * PF_FUNC in Send Descriptor + */ + if (is_pffunc_af(req->hdr.pcifunc)) + mask = 0; npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), - 0, ~0ULL, 0, NIX_INTF_TX); + 0, mask, 0, NIX_INTF_TX); *(u64 *)&action = 0x00; action.op = req->op; @@ -1005,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, req->intf); if (is_npc_intf_rx(req->intf)) - npc_update_rx_entry(rvu, pfvf, entry, req, target); + npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); else npc_update_tx_entry(rvu, pfvf, entry, req, target); @@ -1103,11 +1110,18 @@ find_rule: if (pf_set_vfs_mac) { ether_addr_copy(pfvf->default_mac, req->packet.dmac); ether_addr_copy(pfvf->mac_addr, req->packet.dmac); + set_bit(PF_SET_VF_MAC, &pfvf->flags); } - if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) + if (test_bit(PF_SET_VF_CFG, &pfvf->flags) && + req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) rule->vfvlan_cfg = true; + if (is_npc_intf_rx(req->intf) && req->match_id && + (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS)) + return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, + req->index, req->match_id); + return 0; } @@ -1160,14 +1174,16 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (err) return err; - if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) + /* Skip channel validation if AF is installing */ + if (!is_pffunc_af(req->hdr.pcifunc) && + npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, target); /* PF installing for its VF */ if (req->hdr.pcifunc && !from_vf && req->vf) - pfvf->pf_set_vf_cfg = 1; + set_bit(PF_SET_VF_CFG, &pfvf->flags); /* update req destination mac addr */ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && @@ -1176,10 +1192,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, eth_broadcast_addr((u8 *)&req->mask.dmac); } + /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); + if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) + return -EINVAL; - /* If interface is uninitialized then do not enable entry */ - if (err || (!req->default_rule && !pfvf->def_ucast_rule)) + /* don't enable rule when nixlf not attached or initialized */ + if (!(is_nixlf_attached(rvu, target) && + test_bit(NIXLF_INITIALIZED, &pfvf->flags))) enable = false; /* Packets reaching NPC in Tx path implies that a @@ -1193,6 +1213,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (from_vf && !enable) return -EINVAL; + /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ + if (pf_set_vfs_mac && !enable) { + ether_addr_copy(pfvf->default_mac, req->packet.dmac); + ether_addr_copy(pfvf->mac_addr, req->packet.dmac); + set_bit(PF_SET_VF_MAC, &pfvf->flags); + return 0; + } + /* If message is from VF then its flow should not overlap with * reserved unicast flow. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index ac71c0f2f960..8b01ef6e2c99 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -49,6 +49,11 @@ #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) +#define RVU_AF_SMMU_ADDR_REQ (0x6000) +#define RVU_AF_SMMU_TXN_REQ (0x6008) +#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) +#define RVU_AF_SMMU_ADDR_TLN (0x6018) +#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -171,6 +176,7 @@ #define NIX_AF_SQ_CONST (0x0040) #define NIX_AF_CQ_CONST (0x0048) #define NIX_AF_RQ_CONST (0x0050) +#define NIX_AF_PL_CONST (0x0058) #define NIX_AF_PSE_CONST (0x0060) #define NIX_AF_TL1_CONST (0x0070) #define NIX_AF_TL2_CONST (0x0078) @@ -181,6 +187,7 @@ #define NIX_AF_LSO_CFG (0x00A8) #define NIX_AF_BLK_RST (0x00B0) #define NIX_AF_TX_TSTMP_CFG (0x00C0) +#define NIX_AF_PL_TS (0x00C8) #define NIX_AF_RX_CFG (0x00D0) #define NIX_AF_AVG_DELAY (0x00E0) #define NIX_AF_CINT_DELAY (0x00F0) @@ -208,19 +215,27 @@ #define NIX_AF_RVU_INT_ENA_W1S (0x01D0) #define NIX_AF_RVU_INT_ENA_W1C (0x01D8) #define NIX_AF_TCP_TIMER (0x01E0) -#define NIX_AF_RX_WQE_TAG_CTL (0x01F0) +#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3) #define NIX_AF_RX_DEF_OL2 (0x0200) #define NIX_AF_RX_DEF_OIP4 (0x0210) #define NIX_AF_RX_DEF_IIP4 (0x0220) +#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228) #define NIX_AF_RX_DEF_OIP6 (0x0230) +#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238) #define NIX_AF_RX_DEF_IIP6 (0x0240) #define NIX_AF_RX_DEF_OTCP (0x0250) #define NIX_AF_RX_DEF_ITCP (0x0260) #define NIX_AF_RX_DEF_OUDP (0x0270) #define NIX_AF_RX_DEF_IUDP (0x0280) #define NIX_AF_RX_DEF_OSCTP (0x0290) +#define NIX_AF_RX_DEF_CST_APAD0 (0x0298) #define NIX_AF_RX_DEF_ISCTP (0x02A0) #define NIX_AF_RX_DEF_IPSECX (0x02B0) +#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8) +#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0) +#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8) +#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0) +#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8) #define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) #define NIX_AF_NDC_TX_SYNC (0x03F0) @@ -682,4 +697,9 @@ #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) +/* APR */ +#define APR_AF_LMT_CFG (0x000ull) +#define APR_AF_LMT_MAP_BASE (0x008ull) +#define APR_AF_LMT_CTL (0x010ull) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5e5f45c7eab0..5bbe6727d11d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -35,7 +35,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NPA0 = 0xeULL, BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, - BLK_COUNT = 0x12ULL, + BLKADDR_APR = 0x16ULL, + BLK_COUNT = 0x17ULL, }; /* RVU Block Type Enumeration */ @@ -286,7 +287,7 @@ enum nix_aq_ctype { NIX_AQ_CTYPE_MCE = 0x3, NIX_AQ_CTYPE_RSS = 0x4, NIX_AQ_CTYPE_DYNO = 0x5, - NIX_AQ_CTYPE_BAND_PROF = 0x6, + NIX_AQ_CTYPE_BANDPROF = 0x6, }; /* NIX admin queue instruction opcodes */ @@ -665,6 +666,89 @@ struct nix_rx_mce_s { uint64_t next : 16; }; +enum nix_band_prof_layers { + BAND_PROF_LEAF_LAYER = 0, + BAND_PROF_INVAL_LAYER = 1, + BAND_PROF_MID_LAYER = 2, + BAND_PROF_TOP_LAYER = 3, + BAND_PROF_NUM_LAYERS = 4, +}; + +enum NIX_RX_BAND_PROF_ACTIONRESULT_E { + NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0, + NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1, + NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2, +}; + +enum nix_band_prof_pc_mode { + NIX_RX_PC_MODE_VLAN = 0, + NIX_RX_PC_MODE_DSCP = 1, + NIX_RX_PC_MODE_GEN = 2, + NIX_RX_PC_MODE_RSVD = 3, +}; + +/* NIX ingress policer bandwidth profile structure */ +struct nix_bandprof_s { + uint64_t pc_mode : 2; /* W0 */ + uint64_t icolor : 2; + uint64_t tnl_ena : 1; + uint64_t reserved_5_7 : 3; + uint64_t peir_exponent : 5; + uint64_t reserved_13_15 : 3; + uint64_t pebs_exponent : 5; + uint64_t reserved_21_23 : 3; + uint64_t cir_exponent : 5; + uint64_t reserved_29_31 : 3; + uint64_t cbs_exponent : 5; + uint64_t reserved_37_39 : 3; + uint64_t peir_mantissa : 8; + uint64_t pebs_mantissa : 8; + uint64_t cir_mantissa : 8; + uint64_t cbs_mantissa : 8; /* W1 */ + uint64_t lmode : 1; + uint64_t l_sellect : 3; + uint64_t rdiv : 4; + uint64_t adjust_exponent : 5; + uint64_t reserved_85_86 : 2; + uint64_t adjust_mantissa : 9; + uint64_t gc_action : 2; + uint64_t yc_action : 2; + uint64_t rc_action : 2; + uint64_t meter_algo : 2; + uint64_t band_prof_id : 7; + uint64_t reserved_111_118 : 8; + uint64_t hl_en : 1; + uint64_t reserved_120_127 : 8; + uint64_t ts : 48; /* W2 */ + uint64_t reserved_176_191 : 16; + uint64_t pe_accum : 32; /* W3 */ + uint64_t c_accum : 32; + uint64_t green_pkt_pass : 48; /* W4 */ + uint64_t reserved_304_319 : 16; + uint64_t yellow_pkt_pass : 48; /* W5 */ + uint64_t reserved_368_383 : 16; + uint64_t red_pkt_pass : 48; /* W6 */ + uint64_t reserved_432_447 : 16; + uint64_t green_octs_pass : 48; /* W7 */ + uint64_t reserved_496_511 : 16; + uint64_t yellow_octs_pass : 48; /* W8 */ + uint64_t reserved_560_575 : 16; + uint64_t red_octs_pass : 48; /* W9 */ + uint64_t reserved_624_639 : 16; + uint64_t green_pkt_drop : 48; /* W10 */ + uint64_t reserved_688_703 : 16; + uint64_t yellow_pkt_drop : 48; /* W11 */ + uint64_t reserved_752_767 : 16; + uint64_t red_pkt_drop : 48; /* W12 */ + uint64_t reserved_816_831 : 16; + uint64_t green_octs_drop : 48; /* W13 */ + uint64_t reserved_880_895 : 16; + uint64_t yellow_octs_drop : 48; /* W14 */ + uint64_t reserved_944_959 : 16; + uint64_t red_octs_drop : 48; /* W15 */ + uint64_t reserved_1008_1023 : 16; +}; + enum nix_lsoalg { NIX_LSOALG_NOP, NIX_LSOALG_ADD_SEGNUM, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c new file mode 100644 index 000000000000..820adf390b8e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + */ + +#include <linux/bitfield.h> +#include "rvu.h" + +static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 chan_mask) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.features = BIT_ULL(NPC_DMAC); + req.channel = pfvf->rx_chan_base; + req.chan_mask = chan_mask; + req.intf = pfvf->nix_rx_intf; + req.op = NIX_RX_ACTION_DEFAULT; + req.default_rule = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.entry = entry; + req.features = BIT_ULL(NPC_DMAC); + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc, entry = 0; + int pf, vf, numvfs; + int err; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + /* rvu_get_nix_blkaddr sets up the corresponding NIX block + * address and NIX RX and TX interfaces for a pcifunc. + * Generally it is called during attach call of a pcifunc but it + * is called here since we are pre-installing rules before + * nixlfs are attached + */ + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* MCAM RX rule for a PF/VF already exists as default unicast + * rules installed by AF. Hence change the channel in those + * rules to ignore channel so that packets with the required + * DMAC received from LBK(by other PF/VFs in system) or from + * external world (from wire) are accepted. + */ + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); + if (err) { + dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + rvu_get_nix_blkaddr(rvu, pcifunc); + + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, + "RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, + start + entry); + if (err) { + dev_err(rvu->dev, + "TX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + } + } + + return 0; +} + +void rvu_switch_enable(struct rvu *rvu) +{ + struct npc_mcam_alloc_entry_req alloc_req = { 0 }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct msg_rsp rsp; + int ret; + + alloc_req.contig = true; + alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (ret) { + dev_err(rvu->dev, + "Unable to allocate MCAM entries\n"); + goto exit; + } + + if (alloc_rsp.count != alloc_req.count) { + dev_err(rvu->dev, + "Unable to allocate %d MCAM entries, got %d\n", + alloc_req.count, alloc_rsp.count); + goto free_entries; + } + + rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), + GFP_KERNEL); + if (!rswitch->entry2pcifunc) + goto free_entries; + + rswitch->used_entries = alloc_rsp.count; + rswitch->start_entry = alloc_rsp.entry; + + ret = rvu_switch_install_rules(rvu); + if (ret) + goto uninstall_rules; + + return; + +uninstall_rules: + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + kfree(rswitch->entry2pcifunc); +free_entries: + free_req.all = 1; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); +exit: + return; +} + +void rvu_switch_disable(struct rvu *rvu) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs; + struct msg_rsp rsp; + u16 pcifunc; + int err; + + if (!rswitch->used_entries) + return; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%d failed(%d)\n", + pf, err); + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + } + } + + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + free_req.all = 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); + rswitch->used_entries = 0; + kfree(rswitch->entry2pcifunc); +} + +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u32 max = rswitch->used_entries; + u16 entry; + + if (!rswitch->used_entries) + return; + + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + break; + } + + if (entry >= max) + return; + + rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); + rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index e6609068e81b..64aa7d350df1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -21,7 +21,7 @@ TRACE_EVENT(otx2_msg_alloc, __field(u16, id) __field(u64, size) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)) + TP_fast_assign(__assign_str(dev, pci_name(pdev)); __entry->id = id; __entry->size = size; ), @@ -36,7 +36,7 @@ TRACE_EVENT(otx2_msg_send, __field(u16, num_msgs) __field(u64, msg_size) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)) + TP_fast_assign(__assign_str(dev, pci_name(pdev)); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; ), @@ -52,7 +52,7 @@ TRACE_EVENT(otx2_msg_check, __field(u16, rspid) __field(int, rc) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)) + TP_fast_assign(__assign_str(dev, pci_name(pdev)); __entry->reqid = reqid; __entry->rspid = rspid; __entry->rc = rc; @@ -69,8 +69,8 @@ TRACE_EVENT(otx2_msg_interrupt, __string(str, msg) __field(u64, intr) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)) - __assign_str(str, msg) + TP_fast_assign(__assign_str(dev, pci_name(pdev)); + __assign_str(str, msg); __entry->intr = intr; ), TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev), @@ -84,7 +84,7 @@ TRACE_EVENT(otx2_msg_process, __field(u16, id) __field(int, err) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)) + TP_fast_assign(__assign_str(dev, pci_name(pdev)); __entry->id = id; __entry->err = err; ), diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 457c94793e63..3254b02205ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 9ec0313f13fc..184de9466286 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = { .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; -int cn10k_pf_lmtst_init(struct otx2_nic *pf) +int cn10k_lmtst_init(struct otx2_nic *pfvf) { - int size, num_lines; - u64 base; - if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { - pf->hw_ops = &otx2_hw_ops; + struct lmtst_tbl_setup_req *req; + int qcount, err; + + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; return 0; } - pf->hw_ops = &cn10k_hw_ops; - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - (MBOX_SIZE * (pf->total_vfs + 1)); - - size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - - (MBOX_SIZE * (pf->total_vfs + 1)); - - pf->hw.lmt_base = ioremap(base, size); + pfvf->hw_ops = &cn10k_hw_ops; + qcount = pfvf->hw.max_queues; + /* LMTST lines allocation + * qcount = num_online_cpus(); + * NPA = TX + RX + XDP. + * NIX = TX * 32 (For Burst SQE flush). + */ + pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32); + pfvf->npa_lmt_lines = qcount * 3; + pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE; - if (!pf->hw.lmt_base) { - dev_err(pf->dev, "Unable to map PF LMTST region\n"); + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } - /* FIXME: Get the num of LMTST lines from LMT table */ - pf->tot_lmt_lines = size / LMT_LINE_SIZE; - num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / - pf->hw.tx_queues; - /* Number of LMT lines per SQ queues */ - pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - - pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; - return 0; -} - -int cn10k_vf_lmtst_init(struct otx2_nic *vf) -{ - int size, num_lines; + req->use_local_lmt_region = true; - if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { - vf->hw_ops = &otx2_hw_ops; - return 0; + err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, + LMT_LINE_SIZE); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; } + pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; + req->lmt_iova = (u64)pfvf->dync_lmt->iova; - vf->hw_ops = &cn10k_hw_ops; - size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); - vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, - PCI_MBOX_BAR_NUM), - size); - if (!vf->hw.lmt_base) { - dev_err(vf->dev, "Unable to map VF LMTST region\n"); - return -ENOMEM; - } + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); - vf->tot_lmt_lines = size / LMT_LINE_SIZE; - /* LMTST lines per SQ */ - num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / - vf->hw.tx_queues; - vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; return 0; } -EXPORT_SYMBOL(cn10k_vf_lmtst_init); +EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { @@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) struct otx2_snd_queue *sq; sq = &pfvf->qset.sq[qidx]; - sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + + sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base + (qidx * pfvf->nix_lmt_size)); + sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE); + /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) @@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { - struct otx2_nic *pfvf = dev; - int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); u64 val = 0, tar_addr = 0; /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ - val = (lmt_id & 0x7FF); + val = (sq->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. @@ -179,3 +162,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) sq->head++; sq->head &= (sq->sqe_cnt - 1); } + +int cn10k_free_all_ipolicers(struct otx2_nic *pfvf) +{ + struct nix_bandprof_free_req *req; + int rc; + + if (is_dev_otx2(pfvf->pdev)) + return 0; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox); + if (!req) { + rc = -ENOMEM; + goto out; + } + + /* Free all bandwidth profiles allocated */ + req->free_all = true; + + rc = otx2_sync_mbox_msg(&pfvf->mbox); +out: + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + +int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf) +{ + struct nix_bandprof_alloc_req *req; + struct nix_bandprof_alloc_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox); + if (!req) + return -ENOMEM; + + req->prof_count[BAND_PROF_LEAF_LAYER] = 1; + + rc = otx2_sync_mbox_msg(&pfvf->mbox); + if (rc) + goto out; + + rsp = (struct nix_bandprof_alloc_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) { + rc = -EIO; + goto out; + } + + *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0]; +out: + if (rc) { + dev_warn(pfvf->dev, + "Failed to allocate ingress bandwidth policer\n"); + } + + return rc; +} + +int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int ret; + + mutex_lock(&pfvf->mbox.lock); + + ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer); + + mutex_unlock(&pfvf->mbox.lock); + + return ret; +} + +#define POLICER_TIMESTAMP 1 /* 1 second */ +#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */ + +static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp, + u32 *burst_mantissa) +{ + int tmp; + + /* Burst is calculated as + * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT] + * This is the upper limit on number tokens (bytes) that + * can be accumulated in the bucket. + */ + *burst_exp = ilog2(burst); + if (burst < 256) { + /* No float: can't express mantissa in this case */ + *burst_mantissa = 0; + return; + } + + if (*burst_exp > MAX_RATE_EXP) + *burst_exp = MAX_RATE_EXP; + + /* Calculate mantissa + * Find remaining bytes 'burst - 2^burst_exp' + * mantissa = (remaining bytes) / 2^ (burst_exp - 8) + */ + tmp = burst - rounddown_pow_of_two(burst); + *burst_mantissa = tmp / (1UL << (*burst_exp - 8)); +} + +static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp, + u32 *rate_mantissa, u32 *rdiv) +{ + u32 div = 0; + u32 exp = 0; + u64 tmp; + + /* Figure out mantissa, exponent and divider from given max pkt rate + * + * To achieve desired rate HW adds + * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every + * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket. + * Here policer timeunit is 2 usecs and rate is in bits per sec. + * Since floating point cannot be used below algorithm uses 1000000 + * scale factor to support rates upto 100Gbps. + */ + tmp = rate * 32 * 2; + if (tmp < 256000000) { + while (tmp < 256000000) { + tmp = tmp * 2; + div++; + } + } else { + for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++) + tmp = tmp / 2; + + if (exp > MAX_RATE_EXP) + exp = MAX_RATE_EXP; + } + + *rate_mantissa = (tmp - 256000000) / 1000000; + *rate_exp = exp; + *rdiv = div; +} + +int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, + u16 policer, bool map) +{ + struct nix_cn10k_aq_enq_req *aq; + + aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + /* Enable policing and set the bandwidth profile (policer) index */ + if (map) + aq->rq.policer_ena = 1; + else + aq->rq.policer_ena = 0; + aq->rq_mask.policer_ena = 1; + + aq->rq.band_prof_id = policer; + aq->rq_mask.band_prof_id = GENMASK(9, 0); + + /* Fill AQ info */ + aq->qidx = rq_idx; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf) +{ + struct nix_bandprof_free_req *req; + + req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox); + if (!req) + return -ENOMEM; + + req->prof_count[BAND_PROF_LEAF_LAYER] = 1; + req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int qidx, rc; + + mutex_lock(&pfvf->mbox.lock); + + /* Remove RQ's policer mapping */ + for (qidx = 0; qidx < hw->rx_queues; qidx++) + cn10k_map_unmap_rq_policer(pfvf, qidx, + hw->matchall_ipolicer, false); + + rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); + + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + +int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, + u32 burst, u64 rate, bool pps) +{ + struct nix_cn10k_aq_enq_req *aq; + u32 burst_exp, burst_mantissa; + u32 rate_exp, rate_mantissa; + u32 rdiv; + + /* Get exponent and mantissa values for the desired rate */ + cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa); + cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv); + + /* Init bandwidth profile */ + aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + /* Set initial color mode to blind */ + aq->prof.icolor = 0x03; + aq->prof_mask.icolor = 0x03; + + /* Set rate and burst values */ + aq->prof.cir_exponent = rate_exp; + aq->prof_mask.cir_exponent = 0x1F; + + aq->prof.cir_mantissa = rate_mantissa; + aq->prof_mask.cir_mantissa = 0xFF; + + aq->prof.cbs_exponent = burst_exp; + aq->prof_mask.cbs_exponent = 0x1F; + + aq->prof.cbs_mantissa = burst_mantissa; + aq->prof_mask.cbs_mantissa = 0xFF; + + aq->prof.rdiv = rdiv; + aq->prof_mask.rdiv = 0xF; + + if (pps) { + /* The amount of decremented tokens is calculated according to + * the following equation: + * max([ LMODE ? 0 : (packet_length - LXPTR)] + + * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT], + * 1/256) + * if LMODE is 1 then rate limiting will be based on + * PPS otherwise bps. + * The aim of the ADJUST value is to specify a token cost per + * packet in contrary to the packet length that specifies a + * cost per byte. To rate limit based on PPS adjust mantissa + * is set as 384 and exponent as 1 so that number of tokens + * decremented becomes 1 i.e, 1 token per packeet. + */ + aq->prof.adjust_exponent = 1; + aq->prof_mask.adjust_exponent = 0x1F; + + aq->prof.adjust_mantissa = 384; + aq->prof_mask.adjust_mantissa = 0x1FF; + + aq->prof.lmode = 0x1; + aq->prof_mask.lmode = 0x1; + } + + /* Two rate three color marker + * With PEIR/EIR set to zero, color will be either green or red + */ + aq->prof.meter_algo = 2; + aq->prof_mask.meter_algo = 0x3; + + aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP; + aq->prof_mask.rc_action = 0x3; + + aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS; + aq->prof_mask.yc_action = 0x3; + + aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS; + aq->prof_mask.gc_action = 0x3; + + /* Setting exponent value as 24 and mantissa as 0 configures + * the bucket with zero values making bucket unused. Peak + * information rate and Excess information rate buckets are + * unused here. + */ + aq->prof.peir_exponent = 24; + aq->prof_mask.peir_exponent = 0x1F; + + aq->prof.peir_mantissa = 0; + aq->prof_mask.peir_mantissa = 0xFF; + + aq->prof.pebs_exponent = 24; + aq->prof_mask.pebs_exponent = 0x1F; + + aq->prof.pebs_mantissa = 0; + aq->prof_mask.pebs_mantissa = 0xFF; + + /* Fill AQ info */ + aq->qidx = profile; + aq->ctype = NIX_AQ_CTYPE_BANDPROF; + aq->op = NIX_AQ_INSTOP_WRITE; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf, + u32 burst, u64 rate) +{ + struct otx2_hw *hw = &pfvf->hw; + int qidx, rc; + + mutex_lock(&pfvf->mbox.lock); + + rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst, + rate, false); + if (rc) + goto out; + + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, + hw->matchall_ipolicer, true); + if (rc) + break; + } + +out: + mutex_unlock(&pfvf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index e0bc595cbb78..1a1ae334477d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -12,6 +12,16 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_pf_lmtst_init(struct otx2_nic *pf); -int cn10k_vf_lmtst_init(struct otx2_nic *vf); +int cn10k_lmtst_init(struct otx2_nic *pfvf); +int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); +int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); +int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf); +int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf, + u32 burst, u64 rate); +int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, + u16 policer, bool map); +int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf); +int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, + u32 burst, u64 rate, bool pps); +int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf); #endif /* CN10K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index cf7875d51d87..70fcc1fd962f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) /* update dmac field in vlan offload rule */ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); + /* update dmac address in ntuple and DMAC filter list */ + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } @@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); aq->cq.drop_ena = 1; - /* Enable receive CQ backpressure */ - aq->cq.bp_ena = 1; - aq->cq.bpid = pfvf->bpid[0]; + if (!is_otx2_lbkvf(pfvf->pdev)) { + /* Enable receive CQ backpressure */ + aq->cq.bp_ena = 1; + aq->cq.bpid = pfvf->bpid[0]; - /* Set backpressure level is same as cq pass level */ - aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + /* Set backpressure level is same as cq pass level */ + aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + } } /* Fill AQ info */ @@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ /* Enable backpressure for RQ aura */ - if (aura_id < pfvf->hw.rqpool_cnt) { + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { aq->aura.bp_ena = 0; aq->aura.nix0_bpid = pfvf->bpid[0]; /* Set backpressure level for RQ's Aura */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 45730d0d92f2..8fd58cd07f50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -180,6 +180,7 @@ struct otx2_hw { /* NIX */ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u16 matchall_ipolicer; /* HW settings, coalescing etc */ u16 rx_chan_base; @@ -217,12 +218,17 @@ struct otx2_hw { unsigned long cap_flag; #define LMT_LINE_SIZE 128 -#define NIX_LMTID_BASE 72 /* RX + TX + XDP */ - void __iomem *lmt_base; +#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ + u64 *lmt_base; u64 *npa_lmt_base; u64 *nix_lmt_base; }; +enum vfperm { + OTX2_RESET_VF_PERM, + OTX2_TRUSTED_VF, +}; + struct otx2_vf_config { struct otx2_nic *pf; struct delayed_work link_event_work; @@ -230,6 +236,7 @@ struct otx2_vf_config { u8 mac[ETH_ALEN]; u16 vlan; int tx_vtag_idx; + bool trusted; }; struct flr_work { @@ -261,24 +268,29 @@ struct otx2_mac_table { struct otx2_flow_config { u16 entry[NPC_MAX_NONCONTIG_ENTRIES]; - u32 nr_flows; -#define OTX2_MAX_NTUPLE_FLOWS 32 -#define OTX2_MAX_UNICAST_FLOWS 8 -#define OTX2_MAX_VLAN_FLOWS 1 -#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS -#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \ + u16 *flow_ent; + u16 *def_ent; + u16 nr_flows; +#define OTX2_DEFAULT_FLOWCOUNT 16 +#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_MAX_VLAN_FLOWS 1 +#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT +#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_VLAN_FLOWS) - u32 ntuple_offset; - u32 unicast_offset; - u32 rx_vlan_offset; - u32 vf_vlan_offset; -#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */ + u16 ntuple_offset; + u16 unicast_offset; + u16 rx_vlan_offset; + u16 vf_vlan_offset; +#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ #define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_TX_INDEX 1 - u32 tc_flower_offset; - u32 ntuple_max_flows; - u32 tc_max_flows; + u16 tc_flower_offset; + u16 ntuple_max_flows; + u16 tc_max_flows; + u8 dmacflt_max_flows; + u8 *bmap_to_dmacindex; + unsigned long dmacflt_bmap; struct list_head flow_list; }; @@ -319,6 +331,8 @@ struct otx2_nic { #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) +#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) +#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; struct otx2_qset qset; @@ -353,8 +367,9 @@ struct otx2_nic { /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ int nix_blkaddr; /* LMTST Lines info */ + struct qmem *dync_lmt; u16 tot_lmt_lines; - u16 nix_lmt_lines; + u16 npa_lmt_lines; u32 nix_lmt_size; struct otx2_ptp *ptp; @@ -362,6 +377,7 @@ struct otx2_nic { struct otx2_flow_config *flow_cfg; struct otx2_tc_info tc_info; + unsigned long rq_bmap; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -822,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +/* CGX/RPM DMAC filters support */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c new file mode 100644 index 000000000000..383a6b5cb698 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, + u8 *dmac_index) +{ + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + err = otx2_sync_mbox_msg(&pf->mbox); + + if (!err) { + rsp = (struct cgx_mac_addr_add_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + *dmac_index = rsp->index; + } + + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +{ + struct cgx_mac_addr_set_or_get *req; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +{ + u8 *dmacindex; + + /* Store dmacindex returned by CGX/RPM driver which will + * be used for macaddr update/remove + */ + dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_add_pfmac(pf); + else + return otx2_dmacflt_do_add(pf, mac, dmacindex); +} + +static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, + u8 dmac_index) +{ + struct cgx_mac_addr_del_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->index = dmac_index; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return err; +} + +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, + u8 bit_pos) +{ + u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_remove_pfmac(pf); + else + return otx2_dmacflt_do_remove(pf, mac, dmacindex); +} + +/* CGX/RPM blocks support max unicast entries of 32. + * on typical configuration MAC block associated + * with 4 lmacs, each lmac will have 8 dmac entries + */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_max_dmac_entries_get_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; + +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +{ + struct cgx_mac_addr_update_req *req; + int rc; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); + + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + rc = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index f4962a97a075..b906a0eb6e0d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -286,21 +286,26 @@ static int otx2_set_channels(struct net_device *dev, if (!channel->rx_count || !channel->tx_count) return -EINVAL; + if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { + netdev_err(dev, + "Receive queues are in use by TC police action\n"); + return -EINVAL; + } + if (if_up) dev->netdev_ops->ndo_stop(dev); err = otx2_set_real_num_queues(dev, channel->tx_count, channel->rx_count); if (err) - goto fail; + return err; pfvf->hw.rx_queues = channel->rx_count; pfvf->hw.tx_queues = channel->tx_count; pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; -fail: if (if_up) - dev->netdev_ops->ndo_open(dev); + err = dev->netdev_ops->ndo_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", pfvf->hw.tx_queues, pfvf->hw.rx_queues); @@ -404,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev, qs->rqe_cnt = rx_count; if (if_up) - netdev->netdev_ops->ndo_open(netdev); + return netdev->netdev_ops->ndo_open(netdev); return 0; } @@ -786,6 +791,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; + if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && + *rss_context >= MAX_RSS_GROUPS) + return -EINVAL; + rss = &pfvf->hw.rss_info; if (!rss->enable) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 0b4fa92ba821..4d9de525802d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,15 +18,133 @@ struct otx2_flow { bool is_vf; u8 rss_ctx_id; int vf; + bool dmac_filter; }; +enum dmac_req { + DMAC_ADDR_UPDATE, + DMAC_ADDR_DEL +}; + +static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) +{ + devm_kfree(pfvf->dev, flow_cfg->flow_ent); + flow_cfg->flow_ent = NULL; + flow_cfg->ntuple_max_flows = 0; + flow_cfg->tc_max_flows = 0; +} + +static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_mcam_free_entry_req *req; + int ent, err; + + if (!flow_cfg->ntuple_max_flows) + return 0; + + mutex_lock(&pfvf->mbox.lock); + for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) { + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); + if (!req) + break; + + req->entry = flow_cfg->flow_ent[ent]; + + /* Send message to AF to free MCAM entries */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + break; + } + mutex_unlock(&pfvf->mbox.lock); + otx2_clear_ntuple_flow_info(pfvf, flow_cfg); + return 0; +} + +static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + int ent, allocated = 0; + + /* Free current ones and allocate new ones with requested count */ + otx2_free_ntuple_mcam_entries(pfvf); + + if (!count) + return 0; + + flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count, + sizeof(u16), GFP_KERNEL); + if (!flow_cfg->flow_ent) + return -ENOMEM; + + mutex_lock(&pfvf->mbox.lock); + + /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries + * can only be allocated. + */ + while (allocated < count) { + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); + if (!req) + goto exit; + + req->contig = false; + req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? + NPC_MAX_NONCONTIG_ENTRIES : count - allocated; + req->priority = NPC_MCAM_HIGHER_PRIO; + req->ref_entry = flow_cfg->def_ent[0]; + + /* Send message to AF */ + if (otx2_sync_mbox_msg(&pfvf->mbox)) + goto exit; + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&pfvf->mbox.mbox, 0, &req->hdr); + + for (ent = 0; ent < rsp->count; ent++) + flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; + + allocated += rsp->count; + + /* If this request is not fulfilled, no need to send + * further requests. + */ + if (rsp->count != req->count) + break; + } + +exit: + mutex_unlock(&pfvf->mbox.lock); + + flow_cfg->ntuple_offset = 0; + flow_cfg->ntuple_max_flows = allocated; + flow_cfg->tc_max_flows = allocated; + + if (allocated != count) + netdev_info(pfvf->netdev, + "Unable to allocate %d MCAM entries for ntuple, got %d\n", + count, allocated); + + return allocated; +} + int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; struct npc_mcam_alloc_entry_rsp *rsp; int vf_vlan_max_flows; - int i; + int ent, count; + + vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; + count = OTX2_MAX_UNICAST_FLOWS + + OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; + + flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, + sizeof(u16), GFP_KERNEL); + if (!flow_cfg->def_ent) + return -ENOMEM; mutex_lock(&pfvf->mbox.lock); @@ -36,9 +154,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) return -ENOMEM; } - vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; req->contig = false; - req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows; + req->count = count; /* Send message to AF */ if (otx2_sync_mbox_msg(&pfvf->mbox)) { @@ -51,37 +168,36 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) if (rsp->count != req->count) { netdev_info(pfvf->netdev, - "Unable to allocate %d MCAM entries, got %d\n", - req->count, rsp->count); - /* support only ntuples here */ - flow_cfg->ntuple_max_flows = rsp->count; - flow_cfg->ntuple_offset = 0; - pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; - flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows; - pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; - } else { - flow_cfg->vf_vlan_offset = 0; - flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset + - vf_vlan_max_flows; - flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset; - flow_cfg->unicast_offset = flow_cfg->ntuple_offset + - OTX2_MAX_NTUPLE_FLOWS; - flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + - OTX2_MAX_UNICAST_FLOWS; - pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; - pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; - pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; - pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; - pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; - } - - for (i = 0; i < rsp->count; i++) - flow_cfg->entry[i] = rsp->entry_list[i]; + "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n"); + mutex_unlock(&pfvf->mbox.lock); + devm_kfree(pfvf->dev, flow_cfg->def_ent); + return 0; + } - pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + for (ent = 0; ent < rsp->count; ent++) + flow_cfg->def_ent[ent] = rsp->entry_list[ent]; + + flow_cfg->vf_vlan_offset = 0; + flow_cfg->unicast_offset = vf_vlan_max_flows; + flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + + OTX2_MAX_UNICAST_FLOWS; + pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; + pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; + pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; + pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; mutex_unlock(&pfvf->mbox.lock); + /* Allocate entries for Ntuple filters */ + count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + if (count <= 0) { + otx2_clear_ntuple_flow_info(pfvf, flow_cfg); + return 0; + } + + pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + return 0; } @@ -96,18 +212,35 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); - pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; - pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows; - err = otx2_alloc_mcam_entries(pf); if (err) return err; + /* Check if MCAM entries are allocate or not */ + if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) + return 0; + pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); if (!pf->mac_table) return -ENOMEM; + otx2_dmacflt_get_max_cnt(pf); + + /* DMAC filters are not allocated */ + if (!pf->flow_cfg->dmacflt_max_flows) + return 0; + + pf->flow_cfg->bmap_to_dmacindex = + devm_kzalloc(pf->dev, sizeof(u8) * + pf->flow_cfg->dmacflt_max_flows, + GFP_KERNEL); + + if (!pf->flow_cfg->bmap_to_dmacindex) + return -ENOMEM; + + pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; + return 0; } @@ -146,7 +279,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) ether_addr_copy(pf->mac_table[i].addr, mac); pf->mac_table[i].inuse = true; pf->mac_table[i].mcam_entry = - flow_cfg->entry[i + flow_cfg->unicast_offset]; + flow_cfg->def_ent[i + flow_cfg->unicast_offset]; req->entry = pf->mac_table[i].mcam_entry; break; } @@ -169,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); + if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(netdev, + "Add %pM to CGX/RPM DMAC filters list as well\n", + mac); + return otx2_do_add_macfilter(pf, mac); } @@ -240,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } +static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +{ + if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) + return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + else + return flow_cfg->ntuple_max_flows; +} + int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; - if (location >= pfvf->flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { @@ -267,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int idx = 0; int err = 0; - nfc->data = pfvf->flow_cfg->ntuple_max_flows; + nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) @@ -551,6 +700,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, req->features |= BIT_ULL(NPC_IPPROTO_AH); else req->features |= BIT_ULL(NPC_IPPROTO_ESP); + break; default: break; } @@ -648,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return 0; } +static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + u64 ring_cookie = fsp->ring_cookie; + u32 flow_type; + + if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) + return false; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + /* CGX/RPM block dmac filtering configured for white listing + * check for action other than DROP + */ + if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && + !ethtool_get_flow_spec_ring_vf(ring_cookie)) { + if (is_zero_ether_addr(eth_mask->h_dest) && + is_valid_ether_addr(eth_hdr->h_dest)) + return true; + } + + return false; +} + static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; @@ -706,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) return err; } +static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, + struct otx2_flow *flow) +{ + struct otx2_flow *pf_mac; + struct ethhdr *eth_hdr; + + pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); + if (!pf_mac) + return -ENOMEM; + + pf_mac->entry = 0; + pf_mac->dmac_filter = true; + pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + memcpy(&pf_mac->flow_spec, &flow->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + pf_mac->flow_spec.location = pf_mac->location; + + /* Copy PF mac address */ + eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; + ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + + /* Install DMAC filter with PF mac address */ + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); + + otx2_add_flow_to_list(pfvf, pf_mac); + pfvf->flow_cfg->nr_flows++; + set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + + return 0; +} + int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; + struct ethhdr *eth_hdr; bool new = false; + int err = 0; u32 ring; - int err; ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) @@ -722,17 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (fsp->location >= flow_cfg->ntuple_max_flows) + if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; - flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset + - flow->location]; new = true; } /* struct copy */ @@ -741,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; - err = otx2_add_flow_msg(pfvf, flow); + if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { + eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* Sync dmac filter table with updated fields */ + if (flow->dmac_filter) + return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, + flow->entry); + + if (bitmap_full(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) { + netdev_warn(pfvf->netdev, + "Can't insert the rule %d as max allowed dmac filters are %d\n", + flow->location + + flow_cfg->dmacflt_max_flows, + flow_cfg->dmacflt_max_flows); + err = -EINVAL; + if (new) + kfree(flow); + return err; + } + + /* Install PF mac address to DMAC filter list */ + if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + otx2_add_flow_with_pfmac(pfvf, flow); + + flow->dmac_filter = true; + flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows); + fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + flow->flow_spec.location = fsp->location; + flow->location = fsp->location; + + set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); + + } else { + if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + netdev_warn(pfvf->netdev, + "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", + flow->location, + flow_cfg->ntuple_max_flows - 1); + err = -EINVAL; + } else { + flow->entry = flow_cfg->flow_ent[flow->location]; + err = otx2_add_flow_msg(pfvf, flow); + } + } + if (err) { if (new) kfree(flow); @@ -779,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) return err; } +static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + bool found = false; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->dmac_filter && iter->entry == 0) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + if (req == DMAC_ADDR_DEL) { + otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + 0); + clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + found = true; + } else { + ether_addr_copy(eth_hdr->h_dest, + pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); + } + break; + } + } + + if (found) { + list_del(&iter->list); + kfree(iter); + pfvf->flow_cfg->nr_flows--; + } +} + int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; - if (location >= flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; - err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (flow->dmac_filter) { + struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* user not allowed to remove dmac filter with interface mac */ + if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) + return -EPERM; + + err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + flow->entry); + clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + /* If all dmac filters are removed delete macfilter with + * interface mac address and configure CGX/RPM block in + * promiscuous mode + */ + if (bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows) == 1) + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); + } else { + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + } + if (err) return err; @@ -836,9 +1139,8 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) return -ENOMEM; } - req->start = flow_cfg->entry[flow_cfg->ntuple_offset]; - req->end = flow_cfg->entry[flow_cfg->ntuple_offset + - flow_cfg->ntuple_max_flows - 1]; + req->start = flow_cfg->flow_ent[0]; + req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1]; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); @@ -905,7 +1207,7 @@ int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf) return -ENOMEM; } - req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset]; + req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; req->intf = NIX_INTF_RX; ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr); eth_broadcast_addr((u8 *)&req->mask.dmac); @@ -934,7 +1236,7 @@ static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf) return -ENOMEM; } - req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset]; + req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); @@ -990,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } + +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + + list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { + if (iter->dmac_filter) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + otx2_dmacflt_add(pf, eth_hdr->h_dest, + iter->entry); + } + } +} + +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) +{ + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 03004fdac0c6..2c24944a4dba 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -39,6 +39,8 @@ MODULE_DESCRIPTION(DRV_STRING); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); +static void otx2_vf_link_event_task(struct work_struct *work); + enum { TYPE_PFAF, TYPE_PFVF, @@ -1108,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; + if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(pf->netdev, + "CGX/RPM internal loopback might not work as DMAC filters are active\n"); + mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); @@ -1459,6 +1466,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) otx2_free_cq_res(pf); + /* Free all ingress bandwidth profiles allocated */ + cn10k_free_all_ipolicers(pf); + mutex_lock(&mbox->lock); /* Reset NIX LF */ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); @@ -1528,10 +1538,10 @@ int otx2_open(struct net_device *netdev) if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { /* Reserve LMT lines for NPA AURA batch free */ - pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; + pf->hw.npa_lmt_base = pf->hw.lmt_base; /* Reserve LMT lines for NIX TX */ - pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + - (NIX_LMTID_BASE * LMT_LINE_SIZE)); + pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base + + (pf->npa_lmt_lines * LMT_LINE_SIZE)); } err = otx2_init_hw_resources(pf); @@ -1639,6 +1649,10 @@ int otx2_open(struct net_device *netdev) /* Restore pause frame settings */ otx2_config_pause_frm(pf); + /* Install DMAC Filters */ + if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_reinstall_flows(pf); + err = otx2_rxtx_enable(pf, true); if (err) goto err_tx_stop_queues; @@ -1648,6 +1662,7 @@ int otx2_open(struct net_device *netdev) err_tx_stop_queues: netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + pf->flags |= OTX2_FLAG_INTF_DOWN; err_free_cints: otx2_free_cints(pf, qidx); vec = pci_irq_vector(pf->pdev, @@ -1675,6 +1690,10 @@ int otx2_stop(struct net_device *netdev) struct otx2_rss_info *rss; int qidx, vec, wrk; + /* If the DOWN flag is set resources are already freed */ + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return 0; + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -1820,9 +1839,11 @@ static void otx2_do_set_rx_mode(struct work_struct *work) if (promisc) req->mode |= NIX_RX_MODE_PROMISC; - else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; + req->mode |= NIX_RX_MODE_USE_MCE; + otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); } @@ -2044,7 +2065,7 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!netif_running(netdev)) return -EAGAIN; - if (vf >= pci_num_vf(pdev)) + if (vf >= pf->total_vfs) return -EINVAL; if (!is_valid_ether_addr(mac)) @@ -2055,7 +2076,8 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) ret = otx2_do_set_vf_mac(pf, vf, mac); if (ret == 0) - dev_info(&pdev->dev, "Reload VF driver to apply the changes\n"); + dev_info(&pdev->dev, + "Load/Reload VF driver\n"); return ret; } @@ -2104,7 +2126,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); del_req->entry = - flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; @@ -2117,7 +2139,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); del_req->entry = - flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; err = otx2_sync_mbox_msg(&pf->mbox); goto out; @@ -2131,7 +2153,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); - req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; req->packet.vlan_tci = htons(vlan); req->mask.vlan_tci = htons(VLAN_VID_MASK); /* af fills the destination mac addr */ @@ -2182,7 +2204,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, eth_zero_addr((u8 *)&req->mask.dmac); idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); - req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; req->features = BIT_ULL(NPC_DMAC); req->channel = pf->hw.tx_chan_base; req->intf = NIX_INTF_TX; @@ -2241,10 +2263,63 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, ivi->vf = vf; ether_addr_copy(ivi->mac, config->mac); ivi->vlan = config->vlan; + ivi->trusted = config->trusted; return 0; } +static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, + int req_perm) +{ + struct set_vf_perm *req; + int rc; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox); + if (!req) { + rc = -ENOMEM; + goto out; + } + + /* Let AF reset VF permissions as sriov is disabled */ + if (req_perm == OTX2_RESET_VF_PERM) { + req->flags |= RESET_VF_PERM; + } else if (req_perm == OTX2_TRUSTED_VF) { + if (pf->vf_configs[vf].trusted) + req->flags |= VF_TRUSTED; + } + + req->vf = vf; + rc = otx2_sync_mbox_msg(&pf->mbox); +out: + mutex_unlock(&pf->mbox.lock); + return rc; +} + +static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool enable) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + int rc; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + if (pf->vf_configs[vf].trusted == enable) + return 0; + + pf->vf_configs[vf].trusted = enable; + rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); + + if (rc) + pf->vf_configs[vf].trusted = !enable; + else + netdev_info(pf->netdev, "VF %d is %strusted\n", + vf, enable ? "" : "not "); + return rc; +} + static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, @@ -2261,6 +2336,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, .ndo_setup_tc = otx2_setup_tc, + .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; static int otx2_wq_init(struct otx2_nic *pf) @@ -2315,6 +2391,40 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf) return otx2_register_mbox_intr(pf, false); } +static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) +{ + int i; + + pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs, + sizeof(struct otx2_vf_config), + GFP_KERNEL); + if (!pf->vf_configs) + return -ENOMEM; + + for (i = 0; i < pf->total_vfs; i++) { + pf->vf_configs[i].pf = pf; + pf->vf_configs[i].intf_down = true; + pf->vf_configs[i].trusted = false; + INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, + otx2_vf_link_event_task); + } + + return 0; +} + +static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf) +{ + int i; + + if (!pf->vf_configs) + return; + + for (i = 0; i < pf->total_vfs; i++) { + cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); + otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM); + } +} + static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -2430,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_pf_lmtst_init(pf); + err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; @@ -2509,6 +2619,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_mcam_flow_del; + /* Initialize SR-IOV resources */ + err = otx2_sriov_vfcfg_init(pf); + if (err) + goto err_pf_sriov_init; + /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); @@ -2518,6 +2633,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_pf_sriov_init: + otx2_shutdown_tc(pf); err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: @@ -2527,8 +2644,8 @@ err_del_mcam_entries: err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); @@ -2576,7 +2693,7 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) { struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *pf = netdev_priv(netdev); - int ret, i; + int ret; /* Init PF <=> VF mailbox stuff */ ret = otx2_pfvf_mbox_init(pf, numvfs); @@ -2587,23 +2704,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) if (ret) goto free_mbox; - pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config), - GFP_KERNEL); - if (!pf->vf_configs) { - ret = -ENOMEM; - goto free_intr; - } - - for (i = 0; i < numvfs; i++) { - pf->vf_configs[i].pf = pf; - pf->vf_configs[i].intf_down = true; - INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, - otx2_vf_link_event_task); - } - ret = otx2_pf_flr_init(pf, numvfs); if (ret) - goto free_configs; + goto free_intr; ret = otx2_register_flr_me_intr(pf, numvfs); if (ret) @@ -2618,8 +2721,6 @@ free_flr_intr: otx2_disable_flr_me_intr(pf); free_flr: otx2_flr_wq_destroy(pf); -free_configs: - kfree(pf->vf_configs); free_intr: otx2_disable_pfvf_mbox_intr(pf, numvfs); free_mbox: @@ -2632,17 +2733,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *pf = netdev_priv(netdev); int numvfs = pci_num_vf(pdev); - int i; if (!numvfs) return 0; pci_disable_sriov(pdev); - for (i = 0; i < pci_num_vf(pdev); i++) - cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); - kfree(pf->vf_configs); - otx2_disable_flr_me_intr(pf); otx2_flr_wq_destroy(pf); otx2_disable_pfvf_mbox_intr(pf, numvfs); @@ -2682,6 +2778,7 @@ static void otx2_remove(struct pci_dev *pdev) unregister_netdev(netdev); otx2_sriov_disable(pf->pdev); + otx2_sriov_vfcfg_cleanup(pf); if (pf->otx2_wq) destroy_workqueue(pf->otx2_wq); @@ -2689,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); - if (pf->hw.lmt_base) - iounmap(pf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 51157b283f6f..972b202b9884 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -15,6 +15,7 @@ #include <net/tc_act/tc_vlan.h> #include <net/ipv6.h> +#include "cn10k.h" #include "otx2_common.h" /* Egress rate limiting definitions */ @@ -41,11 +42,14 @@ struct otx2_tc_flow_stats { struct otx2_tc_flow { struct rhash_head node; unsigned long cookie; - u16 entry; unsigned int bitpos; struct rcu_head rcu; struct otx2_tc_flow_stats stats; spinlock_t lock; /* lock for stats */ + u16 rq; + u16 entry; + u16 leaf_profile; + bool is_act_police; }; static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, @@ -220,17 +224,76 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, return err; } +static int otx2_tc_act_set_police(struct otx2_nic *nic, + struct otx2_tc_flow *node, + struct flow_cls_offload *f, + u64 rate, u32 burst, u32 mark, + struct npc_install_flow_req *req, bool pps) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct otx2_hw *hw = &nic->hw; + int rq_idx, rc; + + rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); + if (rq_idx >= hw->rx_queues) { + NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); + return -EINVAL; + } + + mutex_lock(&nic->mbox.lock); + + rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); + if (rc) { + mutex_unlock(&nic->mbox.lock); + return rc; + } + + rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); + if (rc) + goto free_leaf; + + rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); + if (rc) + goto free_leaf; + + mutex_unlock(&nic->mbox.lock); + + req->match_id = mark & 0xFFFFULL; + req->index = rq_idx; + req->op = NIX_RX_ACTIONOP_UCAST; + set_bit(rq_idx, &nic->rq_bmap); + node->is_act_police = true; + node->rq = rq_idx; + + return 0; + +free_leaf: + if (cn10k_free_leaf_profile(nic, node->leaf_profile)) + netdev_err(nic->netdev, + "Unable to free leaf bandwidth profile(%d)\n", + node->leaf_profile); + mutex_unlock(&nic->mbox.lock); + return rc; +} + static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action *flow_action, - struct npc_install_flow_req *req) + struct npc_install_flow_req *req, + struct flow_cls_offload *f, + struct otx2_tc_flow *node) { + struct netlink_ext_ack *extack = f->common.extack; struct flow_action_entry *act; struct net_device *target; struct otx2_nic *priv; + u32 burst, mark = 0; + u8 nr_police = 0; + bool pps = false; + u64 rate; int i; if (!flow_action_has_entries(flow_action)) { - netdev_info(nic->netdev, "no tc actions specified"); + NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); return -EINVAL; } @@ -247,8 +310,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, priv = netdev_priv(target); /* npc_install_flow_req doesn't support passing a target pcifunc */ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { - netdev_info(nic->netdev, - "can't redirect to other pf/vf\n"); + NL_SET_ERR_MSG_MOD(extack, + "can't redirect to other pf/vf"); return -EOPNOTSUPP; } req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; @@ -259,18 +322,55 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; break; + case FLOW_ACTION_POLICE: + /* Ingress ratelimiting is not supported on OcteonTx2 */ + if (is_dev_otx2(nic->pdev)) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress policing not supported on this platform"); + return -EOPNOTSUPP; + } + + if (act->police.rate_bytes_ps > 0) { + rate = act->police.rate_bytes_ps * 8; + burst = act->police.burst; + } else if (act->police.rate_pkt_ps > 0) { + /* The algorithm used to calculate rate + * mantissa, exponent values for a given token + * rate (token can be byte or packet) requires + * token rate to be mutiplied by 8. + */ + rate = act->police.rate_pkt_ps * 8; + burst = act->police.burst_pkt; + pps = true; + } + nr_police++; + break; + case FLOW_ACTION_MARK: + mark = act->mark; + break; default: return -EOPNOTSUPP; } } + if (nr_police > 1) { + NL_SET_ERR_MSG_MOD(extack, + "rate limit police offload requires a single action"); + return -EOPNOTSUPP; + } + + if (nr_police) + return otx2_tc_act_set_police(nic, node, f, rate, burst, + mark, req, pps); + return 0; } -static int otx2_tc_prepare_flow(struct otx2_nic *nic, +static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, struct flow_cls_offload *f, struct npc_install_flow_req *req) { + struct netlink_ext_ack *extack = f->common.extack; struct flow_msg *flow_spec = &req->packet; struct flow_msg *flow_mask = &req->mask; struct flow_dissector *dissector; @@ -335,7 +435,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, flow_rule_match_eth_addrs(rule, &match); if (!is_zero_ether_addr(match.mask->src)) { - netdev_err(nic->netdev, "src mac match not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); return -EOPNOTSUPP; } @@ -353,11 +453,11 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, flow_rule_match_ip(rule, &match); if ((ntohs(flow_spec->etype) != ETH_P_IP) && match.mask->tos) { - netdev_err(nic->netdev, "tos not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "tos not supported"); return -EOPNOTSUPP; } if (match.mask->ttl) { - netdev_err(nic->netdev, "ttl not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); return -EOPNOTSUPP; } flow_spec->tos = match.key->tos; @@ -413,8 +513,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, if (ipv6_addr_loopback(&match.key->dst) || ipv6_addr_loopback(&match.key->src)) { - netdev_err(nic->netdev, - "Flow matching on IPv6 loopback addr is not supported\n"); + NL_SET_ERR_MSG_MOD(extack, + "Flow matching IPv6 loopback addr not supported"); return -EOPNOTSUPP; } @@ -463,7 +563,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, req->features |= BIT_ULL(NPC_SPORT_SCTP); } - return otx2_tc_parse_actions(nic, &rule->action, req); + return otx2_tc_parse_actions(nic, &rule->action, req, f, node); } static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) @@ -498,6 +598,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, { struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *flow_node; + int err; flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, @@ -508,6 +609,27 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, return -EINVAL; } + if (flow_node->is_act_police) { + mutex_lock(&nic->mbox.lock); + + err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, + flow_node->leaf_profile, false); + if (err) + netdev_err(nic->netdev, + "Unmapping RQ %d & profile %d failed\n", + flow_node->rq, flow_node->leaf_profile); + + err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); + if (err) + netdev_err(nic->netdev, + "Unable to free leaf bandwidth profile(%d)\n", + flow_node->leaf_profile); + + __clear_bit(flow_node->rq, &nic->rq_bmap); + + mutex_unlock(&nic->mbox.lock); + } + otx2_del_mcam_flow_entry(nic, flow_node->entry); WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, @@ -524,14 +646,21 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, static int otx2_tc_add_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { + struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *new_node, *old_node; - struct npc_install_flow_req *req; - int rc; + struct npc_install_flow_req *req, dummy; + int rc, err; if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; + if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { + NL_SET_ERR_MSG_MOD(extack, + "Not enough MCAM space to add the flow"); + return -ENOMEM; + } + /* allocate memory for the new flow and it's node */ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); if (!new_node) @@ -539,17 +668,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, spin_lock_init(&new_node->lock); new_node->cookie = tc_flow_cmd->cookie; - mutex_lock(&nic->mbox.lock); - req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); - if (!req) { - mutex_unlock(&nic->mbox.lock); - return -ENOMEM; - } + memset(&dummy, 0, sizeof(struct npc_install_flow_req)); - rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req); + rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); if (rc) { - otx2_mbox_reset(&nic->mbox.mbox, 0); - mutex_unlock(&nic->mbox.lock); + kfree_rcu(new_node, rcu); return rc; } @@ -560,18 +683,22 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (old_node) otx2_tc_del_flow(nic, tc_flow_cmd); - if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { - netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n"); - otx2_mbox_reset(&nic->mbox.mbox, 0); + mutex_lock(&nic->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!req) { mutex_unlock(&nic->mbox.lock); - return -ENOMEM; + rc = -ENOMEM; + goto free_leaf; } + memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); + memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); + new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows); req->channel = nic->hw.rx_chan_base; - req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset + - nic->flow_cfg->tc_max_flows - new_node->bitpos]; + req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset + + nic->flow_cfg->tc_max_flows - new_node->bitpos]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; @@ -579,9 +706,10 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, /* Send message to AF */ rc = otx2_sync_mbox_msg(&nic->mbox); if (rc) { - netdev_err(nic->netdev, "Failed to install MCAM flow entry\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); mutex_unlock(&nic->mbox.lock); - goto out; + kfree_rcu(new_node, rcu); + goto free_leaf; } mutex_unlock(&nic->mbox.lock); @@ -591,12 +719,35 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (rc) { otx2_del_mcam_flow_entry(nic, req->entry); kfree_rcu(new_node, rcu); - goto out; + goto free_leaf; } set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); tc_info->num_entries++; -out: + + return 0; + +free_leaf: + if (new_node->is_act_police) { + mutex_lock(&nic->mbox.lock); + + err = cn10k_map_unmap_rq_policer(nic, new_node->rq, + new_node->leaf_profile, false); + if (err) + netdev_err(nic->netdev, + "Unmapping RQ %d & profile %d failed\n", + new_node->rq, new_node->leaf_profile); + err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); + if (err) + netdev_err(nic->netdev, + "Unable to free leaf bandwidth profile(%d)\n", + new_node->leaf_profile); + + __clear_bit(new_node->rq, &nic->rq_bmap); + + mutex_unlock(&nic->mbox.lock); + } + return rc; } @@ -675,6 +826,87 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, } } +static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action *actions = &cls->rule->action; + struct flow_action_entry *entry; + u64 rate; + int err; + + err = otx2_tc_validate_flow(nic, actions, extack); + if (err) + return err; + + if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only one ingress MATCHALL ratelimitter can be offloaded"); + return -ENOMEM; + } + + entry = &cls->rule->action.entries[0]; + switch (entry->id) { + case FLOW_ACTION_POLICE: + /* Ingress ratelimiting is not supported on OcteonTx2 */ + if (is_dev_otx2(nic->pdev)) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress policing not supported on this platform"); + return -EOPNOTSUPP; + } + + err = cn10k_alloc_matchall_ipolicer(nic); + if (err) + return err; + + /* Convert to bits per second */ + rate = entry->police.rate_bytes_ps * 8; + err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); + if (err) + return err; + nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Only police action supported with Ingress MATCHALL offload"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + int err; + + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + err = cn10k_free_matchall_ipolicer(nic); + nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; + return err; +} + +static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, + struct tc_cls_matchall_offload *cls_matchall) +{ + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return otx2_tc_ingress_matchall_install(nic, cls_matchall); + case TC_CLSMATCHALL_DESTROY: + return otx2_tc_ingress_matchall_delete(nic, cls_matchall); + case TC_CLSMATCHALL_STATS: + default: + break; + } + + return -EOPNOTSUPP; +} + static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { @@ -686,6 +918,8 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return otx2_setup_tc_cls_flower(nic, type_data); + case TC_SETUP_CLSMATCHALL: + return otx2_setup_tc_ingress_matchall(nic, type_data); default: break; } @@ -775,6 +1009,9 @@ int otx2_init_tc(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; + /* Exclude receive queue 0 being used for police action */ + set_bit(0, &nic->rq_bmap); + tc->flow_ht_params = tc_flow_ht_params; return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 52486c1f0973..2f144e2cf436 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -83,6 +83,7 @@ struct otx2_snd_queue { u16 num_sqbs; u16 sqe_thresh; u8 sqe_per_sqb; + u32 lmt_id; u64 io_addr; u64 *aura_fc_addr; u64 *lmt_addr; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 085be90a03eb..a8bee5aefec1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -395,6 +395,42 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } +static void otx2vf_set_rx_mode(struct net_device *netdev) +{ + struct otx2_nic *vf = netdev_priv(netdev); + + queue_work(vf->otx2_wq, &vf->rx_mode_work); +} + +static void otx2vf_do_set_rx_mode(struct work_struct *work) +{ + struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work); + struct net_device *netdev = vf->netdev; + unsigned int flags = netdev->flags; + struct nix_rx_mode *req; + + mutex_lock(&vf->mbox.lock); + + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox); + if (!req) { + mutex_unlock(&vf->mbox.lock); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + if (flags & IFF_PROMISC) + req->mode |= NIX_RX_MODE_PROMISC; + if (flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + req->mode |= NIX_RX_MODE_USE_MCE; + + otx2_sync_mbox_msg(&vf->mbox); + + mutex_unlock(&vf->mbox.lock); +} + static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) { bool if_up = netif_running(netdev); @@ -432,12 +468,24 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_open = otx2vf_open, .ndo_stop = otx2vf_stop, .ndo_start_xmit = otx2vf_xmit, + .ndo_set_rx_mode = otx2vf_set_rx_mode, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2vf_change_mtu, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, }; +static int otx2_wq_init(struct otx2_nic *vf) +{ + vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq"); + if (!vf->otx2_wq) + return -ENOMEM; + + INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode); + INIT_WORK(&vf->reset_task, otx2vf_reset_task); + return 0; +} + static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) { struct otx2_hw *hw = &vf->hw; @@ -561,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_vf_lmtst_init(vf); + err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; @@ -588,8 +636,6 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(vf); - INIT_WORK(&vf->reset_task, otx2vf_reset_task); - /* To distinguish, for LBK VFs set netdev name explicitly */ if (is_otx2_lbkvf(vf->pdev)) { int n; @@ -606,6 +652,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_detach_rsrc; } + err = otx2_wq_init(vf); + if (err) + goto err_unreg_netdev; + otx2vf_set_ethtool_ops(netdev); /* Enable pause frames by default */ @@ -614,9 +664,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_unreg_netdev: + unregister_netdev(netdev); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); @@ -644,12 +696,12 @@ static void otx2vf_remove(struct pci_dev *pdev) cancel_work_sync(&vf->reset_task); unregister_netdev(netdev); + if (vf->otx2_wq) + destroy_workqueue(vf->otx2_wq); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); - - if (vf->hw.lmt_base) - iounmap(vf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile index 93129e32ebc5..0609df8b913d 100644 --- a/drivers/net/ethernet/marvell/prestera/Makefile +++ b/drivers/net/ethernet/marvell/prestera/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \ - prestera_switchdev.o + prestera_switchdev.o prestera_acl.o prestera_flow.o \ + prestera_flower.o prestera_span.o obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index 55aa4bf8a27c..f18fe664b373 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -60,10 +60,22 @@ struct prestera_port_caps { u8 transceiver; }; +struct prestera_lag { + struct net_device *dev; + struct list_head members; + u16 member_count; + u16 lag_id; +}; + +struct prestera_flow_block; + struct prestera_port { struct net_device *dev; struct prestera_switch *sw; + struct prestera_flow_block *flow_block; struct devlink_port dl_port; + struct list_head lag_member; + struct prestera_lag *lag; u32 id; u32 hw_id; u32 dev_id; @@ -127,6 +139,12 @@ struct prestera_port_event { } data; }; +enum prestera_fdb_entry_type { + PRESTERA_FDB_ENTRY_TYPE_REG_PORT, + PRESTERA_FDB_ENTRY_TYPE_LAG, + PRESTERA_FDB_ENTRY_TYPE_MAX +}; + enum prestera_fdb_event_id { PRESTERA_FDB_EVENT_UNSPEC, PRESTERA_FDB_EVENT_LEARNED, @@ -134,7 +152,11 @@ enum prestera_fdb_event_id { }; struct prestera_fdb_event { - u32 port_id; + enum prestera_fdb_entry_type type; + union { + u32 port_id; + u16 lag_id; + } dest; u32 vid; union { u8 mac[ETH_ALEN]; @@ -150,14 +172,20 @@ struct prestera_event { }; struct prestera_switchdev; +struct prestera_span; struct prestera_rxtx; +struct prestera_trap_data; +struct prestera_acl; struct prestera_switch { struct prestera_device *dev; struct prestera_switchdev *swdev; struct prestera_rxtx *rxtx; + struct prestera_acl *acl; + struct prestera_span *span; struct list_head event_handlers; struct notifier_block netdev_nb; + struct prestera_trap_data *trap_data; char base_mac[ETH_ALEN]; struct list_head port_list; rwlock_t port_list_lock; @@ -165,6 +193,9 @@ struct prestera_switch { u32 mtu_min; u32 mtu_max; u8 id; + struct prestera_lag *lags; + u8 lag_member_max; + u8 lag_max; }; struct prestera_rxtx_params { @@ -203,4 +234,10 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid); bool prestera_netdev_check(const struct net_device *dev); +bool prestera_port_is_lag_member(const struct prestera_port *port); + +struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id); + +u16 prestera_port_lag_id(const struct prestera_port *port); + #endif /* _PRESTERA_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c new file mode 100644 index 000000000000..83c75ffb1a1c --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/rhashtable.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_span.h" + +struct prestera_acl { + struct prestera_switch *sw; + struct list_head rules; +}; + +struct prestera_acl_ruleset { + struct rhashtable rule_ht; + struct prestera_switch *sw; + u16 id; +}; + +struct prestera_acl_rule { + struct rhash_head ht_node; + struct list_head list; + struct list_head match_list; + struct list_head action_list; + struct prestera_flow_block *block; + unsigned long cookie; + u32 priority; + u8 n_actions; + u8 n_matches; + u32 id; +}; + +static const struct rhashtable_params prestera_acl_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct prestera_acl_rule, cookie), + .head_offset = offsetof(struct prestera_acl_rule, ht_node), + .automatic_shrinking = true, +}; + +static struct prestera_acl_ruleset * +prestera_acl_ruleset_create(struct prestera_switch *sw) +{ + struct prestera_acl_ruleset *ruleset; + int err; + + ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); + if (!ruleset) + return ERR_PTR(-ENOMEM); + + err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); + if (err) + goto err_rhashtable_init; + + err = prestera_hw_acl_ruleset_create(sw, &ruleset->id); + if (err) + goto err_ruleset_create; + + ruleset->sw = sw; + + return ruleset; + +err_ruleset_create: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); +} + +static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) +{ + prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset); +} + +struct prestera_flow_block * +prestera_acl_block_create(struct prestera_switch *sw, struct net *net) +{ + struct prestera_flow_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + INIT_LIST_HEAD(&block->binding_list); + block->net = net; + block->sw = sw; + + block->ruleset = prestera_acl_ruleset_create(sw); + if (IS_ERR(block->ruleset)) { + kfree(block); + return NULL; + } + + return block; +} + +void prestera_acl_block_destroy(struct prestera_flow_block *block) +{ + prestera_acl_ruleset_destroy(block->ruleset); + WARN_ON(!list_empty(&block->binding_list)); + kfree(block); +} + +static struct prestera_flow_block_binding * +prestera_acl_block_lookup(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + if (binding->port == port) + return binding; + + return NULL; +} + +int prestera_acl_block_bind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + int err; + + if (WARN_ON(prestera_acl_block_lookup(block, port))) + return -EEXIST; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + binding->span_id = PRESTERA_SPAN_INVALID_ID; + binding->port = port; + + err = prestera_hw_acl_port_bind(port, block->ruleset->id); + if (err) + goto err_rules_bind; + + list_add(&binding->list, &block->binding_list); + return 0; + +err_rules_bind: + kfree(binding); + return err; +} + +int prestera_acl_block_unbind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + binding = prestera_acl_block_lookup(block, port); + if (!binding) + return -ENOENT; + + list_del(&binding->list); + + prestera_hw_acl_port_unbind(port, block->ruleset->id); + + kfree(binding); + return 0; +} + +struct prestera_acl_ruleset * +prestera_acl_block_ruleset_get(struct prestera_flow_block *block) +{ + return block->ruleset; +} + +u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule) +{ + return rule->block->ruleset->id; +} + +struct net *prestera_acl_block_net(struct prestera_flow_block *block) +{ + return block->net; +} + +struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block) +{ + return block->sw; +} + +struct prestera_acl_rule * +prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, + unsigned long cookie) +{ + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, + prestera_acl_rule_ht_params); +} + +struct prestera_acl_rule * +prestera_acl_rule_create(struct prestera_flow_block *block, + unsigned long cookie) +{ + struct prestera_acl_rule *rule; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&rule->match_list); + INIT_LIST_HEAD(&rule->action_list); + rule->cookie = cookie; + rule->block = block; + + return rule; +} + +struct list_head * +prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule) +{ + return &rule->match_list; +} + +struct list_head * +prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule) +{ + return &rule->action_list; +} + +int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, + struct prestera_acl_rule_action_entry *entry) +{ + struct prestera_acl_rule_action_entry *a_entry; + + a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL); + if (!a_entry) + return -ENOMEM; + + memcpy(a_entry, entry, sizeof(*entry)); + list_add(&a_entry->list, &rule->action_list); + + rule->n_actions++; + return 0; +} + +u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule) +{ + return rule->n_actions; +} + +u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule) +{ + return rule->priority; +} + +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority) +{ + rule->priority = priority; +} + +int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, + struct prestera_acl_rule_match_entry *entry) +{ + struct prestera_acl_rule_match_entry *m_entry; + + m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL); + if (!m_entry) + return -ENOMEM; + + memcpy(m_entry, entry, sizeof(*entry)); + list_add(&m_entry->list, &rule->match_list); + + rule->n_matches++; + return 0; +} + +u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule) +{ + return rule->n_matches; +} + +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) +{ + struct prestera_acl_rule_action_entry *a_entry; + struct prestera_acl_rule_match_entry *m_entry; + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &rule->match_list) { + m_entry = list_entry(pos, typeof(*m_entry), list); + list_del(pos); + kfree(m_entry); + } + + list_for_each_safe(pos, n, &rule->action_list) { + a_entry = list_entry(pos, typeof(*a_entry), list); + list_del(pos); + kfree(a_entry); + } + + kfree(rule); +} + +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule) +{ + u32 rule_id; + int err; + + /* try to add rule to hash table first */ + err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht, + &rule->ht_node, + prestera_acl_rule_ht_params); + if (err) + return err; + + /* add rule to hw */ + err = prestera_hw_acl_rule_add(sw, rule, &rule_id); + if (err) + goto err_rule_add; + + rule->id = rule_id; + + list_add_tail(&rule->list, &sw->acl->rules); + + return 0; + +err_rule_add: + rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + return err; +} + +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule) +{ + rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + list_del(&rule->list); + prestera_hw_acl_rule_del(sw, rule->id); +} + +int prestera_acl_rule_get_stats(struct prestera_switch *sw, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) +{ + u64 current_packets; + u64 current_bytes; + int err; + + err = prestera_hw_acl_rule_stats_get(sw, rule->id, ¤t_packets, + ¤t_bytes); + if (err) + return err; + + *packets = current_packets; + *bytes = current_bytes; + *last_use = jiffies; + + return 0; +} + +int prestera_acl_init(struct prestera_switch *sw) +{ + struct prestera_acl *acl; + + acl = kzalloc(sizeof(*acl), GFP_KERNEL); + if (!acl) + return -ENOMEM; + + INIT_LIST_HEAD(&acl->rules); + sw->acl = acl; + acl->sw = sw; + + return 0; +} + +void prestera_acl_fini(struct prestera_switch *sw) +{ + struct prestera_acl *acl = sw->acl; + + WARN_ON(!list_empty(&acl->rules)); + kfree(acl); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h new file mode 100644 index 000000000000..39b7869be659 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_ACL_H_ +#define _PRESTERA_ACL_H_ + +enum prestera_acl_rule_match_entry_type { + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE, + PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE +}; + +enum prestera_acl_rule_action { + PRESTERA_ACL_RULE_ACTION_ACCEPT, + PRESTERA_ACL_RULE_ACTION_DROP, + PRESTERA_ACL_RULE_ACTION_TRAP +}; + +struct prestera_switch; +struct prestera_port; +struct prestera_acl_rule; +struct prestera_acl_ruleset; + +struct prestera_flow_block_binding { + struct list_head list; + struct prestera_port *port; + int span_id; +}; + +struct prestera_flow_block { + struct list_head binding_list; + struct prestera_switch *sw; + struct net *net; + struct prestera_acl_ruleset *ruleset; + struct flow_block_cb *block_cb; +}; + +struct prestera_acl_rule_action_entry { + struct list_head list; + enum prestera_acl_rule_action id; +}; + +struct prestera_acl_rule_match_entry { + struct list_head list; + enum prestera_acl_rule_match_entry_type type; + union { + struct { + u8 key; + u8 mask; + } u8; + struct { + u16 key; + u16 mask; + } u16; + struct { + u32 key; + u32 mask; + } u32; + struct { + u64 key; + u64 mask; + } u64; + struct { + u8 key[ETH_ALEN]; + u8 mask[ETH_ALEN]; + } mac; + } keymask; +}; + +int prestera_acl_init(struct prestera_switch *sw); +void prestera_acl_fini(struct prestera_switch *sw); +struct prestera_flow_block * +prestera_acl_block_create(struct prestera_switch *sw, struct net *net); +void prestera_acl_block_destroy(struct prestera_flow_block *block); +struct net *prestera_acl_block_net(struct prestera_flow_block *block); +struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block); +int prestera_acl_block_bind(struct prestera_flow_block *block, + struct prestera_port *port); +int prestera_acl_block_unbind(struct prestera_flow_block *block, + struct prestera_port *port); +struct prestera_acl_ruleset * +prestera_acl_block_ruleset_get(struct prestera_flow_block *block); +struct prestera_acl_rule * +prestera_acl_rule_create(struct prestera_flow_block *block, + unsigned long cookie); +u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule); +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority); +u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule); +struct list_head * +prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule); +u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule); +u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule); +int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, + struct prestera_acl_rule_action_entry *entry); +struct list_head * +prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule); +int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, + struct prestera_acl_rule_match_entry *entry); +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); +struct prestera_acl_rule * +prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, + unsigned long cookie); +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule); +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule); +int prestera_acl_rule_get_stats(struct prestera_switch *sw, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use); + +#endif /* _PRESTERA_ACL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index 94c185a0e2b8..fa7a0682ad1e 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -4,6 +4,352 @@ #include <net/devlink.h> #include "prestera_devlink.h" +#include "prestera_hw.h" + +/* All driver-specific traps must be documented in + * Documentation/networking/devlink/prestera.rst + */ +enum { + DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX, + DEVLINK_PRESTERA_TRAP_ID_ARP_BC, + DEVLINK_PRESTERA_TRAP_ID_IS_IS, + DEVLINK_PRESTERA_TRAP_ID_OSPF, + DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC, + DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC, + DEVLINK_PRESTERA_TRAP_ID_VRRP, + DEVLINK_PRESTERA_TRAP_ID_DHCP, + DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME, + DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS, + DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE, + DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME, + DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6, + DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7, + DEVLINK_PRESTERA_TRAP_ID_BGP, + DEVLINK_PRESTERA_TRAP_ID_SSH, + DEVLINK_PRESTERA_TRAP_ID_TELNET, + DEVLINK_PRESTERA_TRAP_ID_ICMP, + DEVLINK_PRESTERA_TRAP_ID_MET_RED, + DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO, + DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH, + DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR, + DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR, + DEVLINK_PRESTERA_TRAP_ID_INVALID_SA, + DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT, + DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN, + DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP, +}; + +#define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \ + "arp_bc" +#define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \ + "is_is" +#define DEVLINK_PRESTERA_TRAP_NAME_OSPF \ + "ospf" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \ + "ip_bc_mac" +#define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \ + "router_mc" +#define DEVLINK_PRESTERA_TRAP_NAME_VRRP \ + "vrrp" +#define DEVLINK_PRESTERA_TRAP_NAME_DHCP \ + "dhcp" +#define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \ + "mac_to_me" +#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \ + "ipv4_options" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \ + "ip_default_route" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \ + "ip_to_me" +#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \ + "ipv4_icmp_redirect" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \ + "acl_code_0" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \ + "acl_code_1" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \ + "acl_code_2" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \ + "acl_code_3" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \ + "acl_code_4" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \ + "acl_code_5" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \ + "acl_code_6" +#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \ + "acl_code_7" +#define DEVLINK_PRESTERA_TRAP_NAME_BGP \ + "bgp" +#define DEVLINK_PRESTERA_TRAP_NAME_SSH \ + "ssh" +#define DEVLINK_PRESTERA_TRAP_NAME_TELNET \ + "telnet" +#define DEVLINK_PRESTERA_TRAP_NAME_ICMP \ + "icmp" +#define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \ + "rxdma_drop" +#define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \ + "port_no_vlan" +#define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \ + "local_port" +#define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \ + "invalid_sa" +#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \ + "illegal_ip_addr" +#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \ + "illegal_ipv4_hdr" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \ + "ip_uc_dip_da_mismatch" +#define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \ + "ip_sip_is_zero" +#define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \ + "met_red" + +struct prestera_trap { + struct devlink_trap trap; + u8 cpu_code; +}; + +struct prestera_trap_item { + enum devlink_trap_action action; + void *trap_ctx; +}; + +struct prestera_trap_data { + struct prestera_switch *sw; + struct prestera_trap_item *trap_items_arr; + u32 traps_count; +}; + +#define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT + +#define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \ + DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +#define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ + DEVLINK_PRESTERA_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + PRESTERA_TRAP_METADATA) + +static const struct devlink_trap_group prestera_trap_groups_arr[] = { + /* No policer is associated with following groups (policerid == 0)*/ + DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0), + DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0), + DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), + DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0), + DEVLINK_TRAP_GROUP_GENERIC(STP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LACP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0), + DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0), + DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0), + DEVLINK_TRAP_GROUP_GENERIC(BGP, 0), + DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0), + DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0), +}; + +/* Initialize trap list, as well as associate CPU code with them. */ +static struct prestera_trap prestera_trap_items_arr[] = { + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY), + .cpu_code = 5, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY), + .cpu_code = 13, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF), + .cpu_code = 16, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY), + .cpu_code = 19, + }, + { + .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP), + .cpu_code = 26, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP), + .cpu_code = 27, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP), + .cpu_code = 28, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY), + .cpu_code = 29, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP), + .cpu_code = 30, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP), + .cpu_code = 33, + }, + { + .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS), + .cpu_code = 63, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY), + .cpu_code = 65, + }, + { + .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS), + .cpu_code = 133, + }, + { + .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS, + L3_EXCEPTIONS), + .cpu_code = 141, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE, + LOCAL_DELIVERY), + .cpu_code = 160, + }, + { + .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY, + TRAP), + .cpu_code = 161, + }, + { + .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT, + L3_EXCEPTIONS), + .cpu_code = 180, + }, + { + .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY, + TRAP), + .cpu_code = 188, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP), + .cpu_code = 192, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP), + .cpu_code = 193, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP), + .cpu_code = 194, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP), + .cpu_code = 195, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP), + .cpu_code = 196, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP), + .cpu_code = 197, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP), + .cpu_code = 198, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP), + .cpu_code = 199, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP), + .cpu_code = 206, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY), + .cpu_code = 207, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY), + .cpu_code = 208, + }, + { + .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY), + .cpu_code = 209, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS), + .cpu_code = 37, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS), + .cpu_code = 39, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS), + .cpu_code = 56, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS), + .cpu_code = 60, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS), + .cpu_code = 136, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS), + .cpu_code = 137, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH, + L3_DROPS), + .cpu_code = 138, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS), + .cpu_code = 145, + }, + { + .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS), + .cpu_code = 185, + }, +}; + +static void prestera_devlink_traps_fini(struct prestera_switch *sw); + +static int prestera_drop_counter_get(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops); static int prestera_dl_info_get(struct devlink *dl, struct devlink_info_req *req, @@ -27,8 +373,21 @@ static int prestera_dl_info_get(struct devlink *dl, buf); } +static int prestera_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + +static int prestera_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack); + +static int prestera_devlink_traps_register(struct prestera_switch *sw); + static const struct devlink_ops prestera_dl_ops = { .info_get = prestera_dl_info_get, + .trap_init = prestera_trap_init, + .trap_action_set = prestera_trap_action_set, + .trap_drop_counter_get = prestera_drop_counter_get, }; struct prestera_switch *prestera_devlink_alloc(void) @@ -53,17 +412,32 @@ int prestera_devlink_register(struct prestera_switch *sw) int err; err = devlink_register(dl, sw->dev->dev); - if (err) + if (err) { dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); + return err; + } - return err; + err = prestera_devlink_traps_register(sw); + if (err) { + devlink_unregister(dl); + dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n", + err); + return err; + } + + return 0; } void prestera_devlink_unregister(struct prestera_switch *sw) { + struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); + prestera_devlink_traps_fini(sw); devlink_unregister(dl); + + kfree(trap_data->trap_items_arr); + kfree(trap_data); } int prestera_devlink_port_register(struct prestera_port *port) @@ -110,3 +484,157 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev) return &port->dl_port; } + +static int prestera_devlink_traps_register(struct prestera_switch *sw) +{ + const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); + const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); + struct devlink *devlink = priv_to_devlink(sw); + struct prestera_trap_data *trap_data; + struct prestera_trap *prestera_trap; + int err, i; + + trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL); + if (!trap_data) + return -ENOMEM; + + trap_data->trap_items_arr = kcalloc(traps_count, + sizeof(struct prestera_trap_item), + GFP_KERNEL); + if (!trap_data->trap_items_arr) { + err = -ENOMEM; + goto err_trap_items_alloc; + } + + trap_data->sw = sw; + trap_data->traps_count = traps_count; + sw->trap_data = trap_data; + + err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr, + groups_count); + if (err) + goto err_groups_register; + + for (i = 0; i < traps_count; i++) { + prestera_trap = &prestera_trap_items_arr[i]; + err = devlink_traps_register(devlink, &prestera_trap->trap, 1, + sw); + if (err) + goto err_trap_register; + } + + return 0; + +err_trap_register: + for (i--; i >= 0; i--) { + prestera_trap = &prestera_trap_items_arr[i]; + devlink_traps_unregister(devlink, &prestera_trap->trap, 1); + } + devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr, + groups_count); +err_groups_register: + kfree(trap_data->trap_items_arr); +err_trap_items_alloc: + kfree(trap_data); + return err; +} + +static struct prestera_trap_item * +prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code) +{ + struct prestera_trap_data *trap_data = sw->trap_data; + struct prestera_trap *prestera_trap; + int i; + + for (i = 0; i < trap_data->traps_count; i++) { + prestera_trap = &prestera_trap_items_arr[i]; + if (cpu_code == prestera_trap->cpu_code) + return &trap_data->trap_items_arr[i]; + } + + return NULL; +} + +void prestera_devlink_trap_report(struct prestera_port *port, + struct sk_buff *skb, u8 cpu_code) +{ + struct prestera_trap_item *trap_item; + struct devlink *devlink; + + devlink = port->dl_port.devlink; + + trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code); + if (unlikely(!trap_item)) + return; + + devlink_trap_report(devlink, skb, trap_item->trap_ctx, + &port->dl_port, NULL); +} + +static struct prestera_trap_item * +prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id) +{ + struct prestera_trap_data *trap_data = sw->trap_data; + int i; + + for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) { + if (prestera_trap_items_arr[i].trap.id == trap_id) + return &trap_data->trap_items_arr[i]; + } + + return NULL; +} + +static int prestera_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx) +{ + struct prestera_switch *sw = devlink_priv(devlink); + struct prestera_trap_item *trap_item; + + trap_item = prestera_devlink_trap_item_lookup(sw, trap->id); + if (WARN_ON(!trap_item)) + return -EINVAL; + + trap_item->trap_ctx = trap_ctx; + trap_item->action = trap->init_action; + + return 0; +} + +static int prestera_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack) +{ + /* Currently, driver does not support trap action altering */ + return -EOPNOTSUPP; +} + +static int prestera_drop_counter_get(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops) +{ + struct prestera_switch *sw = devlink_priv(devlink); + enum prestera_hw_cpu_code_cnt_t cpu_code_type = + PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP; + struct prestera_trap *prestera_trap = + container_of(trap, struct prestera_trap, trap); + + return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code, + cpu_code_type, p_drops); +} + +static void prestera_devlink_traps_fini(struct prestera_switch *sw) +{ + struct devlink *dl = priv_to_devlink(sw); + const struct devlink_trap *trap; + int i; + + for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) { + trap = &prestera_trap_items_arr[i].trap; + devlink_traps_unregister(dl, trap, 1); + } + + devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, + ARRAY_SIZE(prestera_trap_groups_arr)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index 51bee9f75415..5d73aa9db897 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -20,4 +20,7 @@ void prestera_devlink_port_clear(struct prestera_port *port); struct devlink_port *prestera_devlink_get_port(struct net_device *dev); +void prestera_devlink_trap_report(struct prestera_port *port, + struct sk_buff *skb, u8 cpu_code); + #endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c index a5e01c7a307b..b7e89c0ca5c0 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c @@ -19,6 +19,7 @@ #define PRESTERA_DSA_W1_EXT_BIT BIT(31) #define PRESTERA_DSA_W1_CFI_BIT BIT(30) #define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10) +#define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0) #define PRESTERA_DSA_W2_EXT_BIT BIT(31) #define PRESTERA_DSA_W2_PORT_NUM BIT(20) @@ -74,6 +75,8 @@ int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf) (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) | (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7); + dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]); + return 0; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h index 67018629bdd2..c99342f475cf 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h @@ -27,6 +27,7 @@ struct prestera_dsa { struct prestera_dsa_vlan vlan; u32 hw_dev_num; u32 port_num; + u8 cpu_code; }; int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c new file mode 100644 index 000000000000..c9891e968259 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_span.h" +#include "prestera_flower.h" + +static LIST_HEAD(prestera_block_cb_list); + +static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return prestera_span_replace(block, f); + case TC_CLSMATCHALL_DESTROY: + prestera_span_destroy(block); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + if (f->common.chain_index != 0) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_CLS_REPLACE: + return prestera_flower_replace(block, f); + case FLOW_CLS_DESTROY: + prestera_flower_destroy(block, f); + return 0; + case FLOW_CLS_STATS: + return prestera_flower_stats(block, f); + default: + return -EOPNOTSUPP; + } +} + +static int prestera_flow_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return prestera_flow_block_flower_cb(block, type_data); + case TC_SETUP_CLSMATCHALL: + return prestera_flow_block_mall_cb(block, type_data); + default: + return -EOPNOTSUPP; + } +} + +static void prestera_flow_block_release(void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + prestera_acl_block_destroy(block); +} + +static struct prestera_flow_block * +prestera_flow_block_get(struct prestera_switch *sw, + struct flow_block_offload *f, + bool *register_block) +{ + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + + block_cb = flow_block_cb_lookup(f->block, + prestera_flow_block_cb, sw); + if (!block_cb) { + block = prestera_acl_block_create(sw, f->net); + if (!block) + return ERR_PTR(-ENOMEM); + + block_cb = flow_block_cb_alloc(prestera_flow_block_cb, + sw, block, + prestera_flow_block_release); + if (IS_ERR(block_cb)) { + prestera_acl_block_destroy(block); + return ERR_CAST(block_cb); + } + + block->block_cb = block_cb; + *register_block = true; + } else { + block = flow_block_cb_priv(block_cb); + *register_block = false; + } + + flow_block_cb_incref(block_cb); + + return block; +} + +static void prestera_flow_block_put(struct prestera_flow_block *block) +{ + struct flow_block_cb *block_cb = block->block_cb; + + if (flow_block_cb_decref(block_cb)) + return; + + flow_block_cb_free(block_cb); + prestera_acl_block_destroy(block); +} + +static int prestera_setup_flow_block_bind(struct prestera_port *port, + struct flow_block_offload *f) +{ + struct prestera_switch *sw = port->sw; + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + bool register_block; + int err; + + block = prestera_flow_block_get(sw, f, ®ister_block); + if (IS_ERR(block)) + return PTR_ERR(block); + + block_cb = block->block_cb; + + err = prestera_acl_block_bind(block, port); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &prestera_block_cb_list); + } + + port->flow_block = block; + return 0; + +err_block_bind: + prestera_flow_block_put(block); + + return err; +} + +static void prestera_setup_flow_block_unbind(struct prestera_port *port, + struct flow_block_offload *f) +{ + struct prestera_switch *sw = port->sw; + struct prestera_flow_block *block; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); + if (!block_cb) + return; + + block = flow_block_cb_priv(block_cb); + + prestera_span_destroy(block); + + err = prestera_acl_block_unbind(block, port); + if (err) + goto error; + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +error: + port->flow_block = NULL; +} + +int prestera_flow_block_setup(struct prestera_port *port, + struct flow_block_offload *f) +{ + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &prestera_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return prestera_setup_flow_block_bind(port, f); + case FLOW_BLOCK_UNBIND: + prestera_setup_flow_block_unbind(port, f); + return 0; + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h new file mode 100644 index 000000000000..467c7038cace --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_FLOW_H_ +#define _PRESTERA_FLOW_H_ + +#include <net/flow_offload.h> + +struct prestera_port; + +int prestera_flow_block_setup(struct prestera_port *port, + struct flow_block_offload *f); + +#endif /* _PRESTERA_FLOW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c new file mode 100644 index 000000000000..e571ba09ec08 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include "prestera.h" +#include "prestera_acl.h" +#include "prestera_flower.h" + +static int prestera_flower_parse_actions(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + struct prestera_acl_rule_action_entry a_entry; + const struct flow_action_entry *act; + int err, i; + + if (!flow_action_has_entries(flow_action)) + return 0; + + flow_action_for_each(i, act, flow_action) { + memset(&a_entry, 0, sizeof(a_entry)); + + switch (act->id) { + case FLOW_ACTION_ACCEPT: + a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + break; + case FLOW_ACTION_DROP: + a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP; + break; + case FLOW_ACTION_TRAP: + a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); + pr_err("Unsupported action\n"); + return -EOPNOTSUPP; + } + + err = prestera_acl_rule_action_add(rule, &a_entry); + if (err) + return err; + } + + return 0; +} + +static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, + struct flow_cls_offload *f, + struct prestera_flow_block *block) +{ + struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct prestera_acl_rule_match_entry m_entry = {0}; + struct net_device *ingress_dev; + struct flow_match_meta match; + struct prestera_port *port; + + flow_rule_match_meta(f_rule, &match); + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Unsupported ingress ifindex mask"); + return -EINVAL; + } + + ingress_dev = __dev_get_by_index(prestera_acl_block_net(block), + match.key->ingress_ifindex); + if (!ingress_dev) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Can't find specified ingress port to match on"); + return -EINVAL; + } + + if (!prestera_netdev_check(ingress_dev)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "Can't match on switchdev ingress port"); + return -EINVAL; + } + port = netdev_priv(ingress_dev); + + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT; + m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32); + m_entry.keymask.u64.mask = ~(u64)0; + + return prestera_acl_rule_match_add(rule, &m_entry); +} + +static int prestera_flower_parse(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + struct flow_cls_offload *f) +{ + struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = f_rule->match.dissector; + struct prestera_acl_rule_match_entry m_entry; + u16 n_proto_mask = 0; + u16 n_proto_key = 0; + u16 addr_type = 0; + u8 ip_proto = 0; + int err; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_META) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ICMP) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); + return -EOPNOTSUPP; + } + + prestera_acl_rule_priority_set(rule, f->common.prio); + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) { + err = prestera_flower_parse_meta(rule, f, block); + if (err) + return err; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(f_rule, &match); + addr_type = match.key->addr_type; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(f_rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + + /* add eth type key,mask */ + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE; + m_entry.keymask.u16.key = n_proto_key; + m_entry.keymask.u16.mask = n_proto_mask; + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + /* add ip proto key,mask */ + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO; + m_entry.keymask.u8.key = match.key->ip_proto; + m_entry.keymask.u8.mask = match.mask->ip_proto; + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + ip_proto = match.key->ip_proto; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(f_rule, &match); + + /* add ethernet dst key,mask */ + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC; + memcpy(&m_entry.keymask.mac.key, + &match.key->dst, sizeof(match.key->dst)); + memcpy(&m_entry.keymask.mac.mask, + &match.mask->dst, sizeof(match.mask->dst)); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + /* add ethernet src key,mask */ + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC; + memcpy(&m_entry.keymask.mac.key, + &match.key->src, sizeof(match.key->src)); + memcpy(&m_entry.keymask.mac.mask, + &match.mask->src, sizeof(match.mask->src)); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(f_rule, &match); + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC; + memcpy(&m_entry.keymask.u32.key, + &match.key->src, sizeof(match.key->src)); + memcpy(&m_entry.keymask.u32.mask, + &match.mask->src, sizeof(match.mask->src)); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST; + memcpy(&m_entry.keymask.u32.key, + &match.key->dst, sizeof(match.key->dst)); + memcpy(&m_entry.keymask.u32.mask, + &match.mask->dst, sizeof(match.mask->dst)); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + NL_SET_ERR_MSG_MOD + (f->common.extack, + "Only UDP and TCP keys are supported"); + return -EINVAL; + } + + flow_rule_match_ports(f_rule, &match); + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC; + m_entry.keymask.u16.key = ntohs(match.key->src); + m_entry.keymask.u16.mask = ntohs(match.mask->src); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST; + m_entry.keymask.u16.key = ntohs(match.key->dst); + m_entry.keymask.u16.mask = ntohs(match.mask->dst); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(f_rule, &match); + + if (match.mask->vlan_id != 0) { + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID; + m_entry.keymask.u16.key = match.key->vlan_id; + m_entry.keymask.u16.mask = match.mask->vlan_id; + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID; + m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid); + m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid); + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + + flow_rule_match_icmp(f_rule, &match); + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE; + m_entry.keymask.u8.key = match.key->type; + m_entry.keymask.u8.mask = match.mask->type; + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + + memset(&m_entry, 0, sizeof(m_entry)); + m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE; + m_entry.keymask.u8.key = match.key->code; + m_entry.keymask.u8.mask = match.mask->code; + err = prestera_acl_rule_match_add(rule, &m_entry); + if (err) + return err; + } + + return prestera_flower_parse_actions(block, rule, + &f->rule->action, + f->common.extack); +} + +int prestera_flower_replace(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_rule *rule; + int err; + + rule = prestera_acl_rule_create(block, f->cookie); + if (IS_ERR(rule)) + return PTR_ERR(rule); + + err = prestera_flower_parse(block, rule, f); + if (err) + goto err_flower_parse; + + err = prestera_acl_rule_add(sw, rule); + if (err) + goto err_rule_add; + + return 0; + +err_rule_add: +err_flower_parse: + prestera_acl_rule_destroy(rule); + return err; +} + +void prestera_flower_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_acl_rule *rule; + struct prestera_switch *sw; + + rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), + f->cookie); + if (rule) { + sw = prestera_acl_block_sw(block); + prestera_acl_rule_del(sw, rule); + prestera_acl_rule_destroy(rule); + } +} + +int prestera_flower_stats(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_rule *rule; + u64 packets; + u64 lastuse; + u64 bytes; + int err; + + rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), + f->cookie); + if (!rule) + return -EINVAL; + + err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse); + if (err) + return err; + + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, + FLOW_ACTION_HW_STATS_IMMEDIATE); + return 0; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h new file mode 100644 index 000000000000..91e045eec58b --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_FLOWER_H_ +#define _PRESTERA_FLOWER_H_ + +#include <net/pkt_cls.h> + +struct prestera_flow_block; + +int prestera_flower_replace(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f); +int prestera_flower_stats(struct prestera_flow_block *block, + struct flow_cls_offload *f); + +#endif /* _PRESTERA_FLOWER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 0424718d5998..c1297859e471 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -2,11 +2,13 @@ /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/etherdevice.h> +#include <linux/if_bridge.h> #include <linux/ethtool.h> #include <linux/list.h> #include "prestera.h" #include "prestera_hw.h" +#include "prestera_acl.h" #define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) @@ -36,11 +38,31 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, + PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500, + PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501, + PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510, + PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520, + PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521, + PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530, + PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531, + PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801, + PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900, + PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901, + PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902, + PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903, + PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000, + PRESTERA_CMD_TYPE_SPAN_GET = 0x1100, + PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101, + PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102, + PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103, + + PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000, + PRESTERA_CMD_TYPE_ACK = 0x10000, PRESTERA_CMD_TYPE_MAX }; @@ -86,6 +108,11 @@ enum { }; enum { + PRESTERA_PORT_FLOOD_TYPE_UC = 0, + PRESTERA_PORT_FLOOD_TYPE_MC = 1, +}; + +enum { PRESTERA_PORT_GOOD_OCTETS_RCV_CNT, PRESTERA_PORT_BAD_OCTETS_RCV_CNT, PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT, @@ -127,6 +154,12 @@ enum { PRESTERA_FC_SYMM_ASYMM, }; +enum { + PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0, + PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1, + PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2, +}; + struct prestera_fw_event_handler { struct list_head list; struct rcu_head rcu; @@ -168,6 +201,8 @@ struct prestera_msg_switch_init_resp { u32 port_count; u32 mtu_max; u8 switch_id; + u8 lag_max; + u8 lag_member_max; }; struct prestera_msg_port_autoneg_param { @@ -188,6 +223,11 @@ struct prestera_msg_port_mdix_param { u8 admin_mode; }; +struct prestera_msg_port_flood_param { + u8 type; + u8 enable; +}; + union prestera_msg_port_param { u8 admin_state; u8 oper_state; @@ -205,6 +245,7 @@ union prestera_msg_port_param { struct prestera_msg_port_mdix_param mdix; struct prestera_msg_port_autoneg_param autoneg; struct prestera_msg_port_cap_param cap; + struct prestera_msg_port_flood_param flood_ext; }; struct prestera_msg_port_attr_req { @@ -249,8 +290,13 @@ struct prestera_msg_vlan_req { struct prestera_msg_fdb_req { struct prestera_msg_cmd cmd; u8 dest_type; - u32 port; - u32 dev; + union { + struct { + u32 port; + u32 dev; + }; + u16 lag_id; + } dest; u8 mac[ETH_ALEN]; u16 vid; u8 dynamic; @@ -269,6 +315,85 @@ struct prestera_msg_bridge_resp { u16 bridge; }; +struct prestera_msg_acl_action { + u32 id; +}; + +struct prestera_msg_acl_match { + u32 type; + union { + struct { + u8 key; + u8 mask; + } u8; + struct { + u16 key; + u16 mask; + } u16; + struct { + u32 key; + u32 mask; + } u32; + struct { + u64 key; + u64 mask; + } u64; + struct { + u8 key[ETH_ALEN]; + u8 mask[ETH_ALEN]; + } mac; + } __packed keymask; +}; + +struct prestera_msg_acl_rule_req { + struct prestera_msg_cmd cmd; + u32 id; + u32 priority; + u16 ruleset_id; + u8 n_actions; + u8 n_matches; +}; + +struct prestera_msg_acl_rule_resp { + struct prestera_msg_ret ret; + u32 id; +}; + +struct prestera_msg_acl_rule_stats_resp { + struct prestera_msg_ret ret; + u64 packets; + u64 bytes; +}; + +struct prestera_msg_acl_ruleset_bind_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u16 ruleset_id; +}; + +struct prestera_msg_acl_ruleset_req { + struct prestera_msg_cmd cmd; + u16 id; +}; + +struct prestera_msg_acl_ruleset_resp { + struct prestera_msg_ret ret; + u16 id; +}; + +struct prestera_msg_span_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u8 id; +} __packed __aligned(4); + +struct prestera_msg_span_resp { + struct prestera_msg_ret ret; + u8 id; +} __packed __aligned(4); + struct prestera_msg_stp_req { struct prestera_msg_cmd cmd; u32 port; @@ -293,6 +418,24 @@ struct prestera_msg_rxtx_port_req { u32 dev; }; +struct prestera_msg_lag_req { + struct prestera_msg_cmd cmd; + u32 port; + u32 dev; + u16 lag_id; +}; + +struct prestera_msg_cpu_code_counter_req { + struct prestera_msg_cmd cmd; + u8 counter_type; + u8 code; +}; + +struct mvsw_msg_cpu_code_counter_ret { + struct prestera_msg_ret ret; + u64 packet_count; +}; + struct prestera_msg_event { u16 type; u16 id; @@ -315,7 +458,10 @@ union prestera_msg_event_fdb_param { struct prestera_msg_event_fdb { struct prestera_msg_event id; u8 dest_type; - u32 port_id; + union { + u32 port_id; + u16 lag_id; + } dest; u32 vid; union prestera_msg_event_fdb_param param; }; @@ -386,7 +532,19 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt) { struct prestera_msg_event_fdb *hw_evt = msg; - evt->fdb_evt.port_id = hw_evt->port_id; + switch (hw_evt->dest_type) { + case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT: + evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT; + evt->fdb_evt.dest.port_id = hw_evt->dest.port_id; + break; + case PRESTERA_HW_FDB_ENTRY_TYPE_LAG: + evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG; + evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id; + break; + default: + return -EINVAL; + } + evt->fdb_evt.vid = hw_evt->vid; ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac); @@ -531,6 +689,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw) sw->mtu_min = PRESTERA_MIN_MTU; sw->mtu_max = resp.mtu_max; sw->id = resp.switch_id; + sw->lag_member_max = resp.lag_member_max; + sw->lag_max = resp.lag_max; return 0; } @@ -696,6 +856,274 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port, return 0; } +int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) +{ + struct prestera_msg_acl_ruleset_resp resp; + struct prestera_msg_acl_ruleset_req req; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *ruleset_id = resp.id; + + return 0; +} + +int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id) +{ + struct prestera_msg_acl_ruleset_req req = { + .id = ruleset_id, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE, + &req.cmd, sizeof(req)); +} + +static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action, + struct prestera_acl_rule *rule) +{ + struct list_head *a_list = prestera_acl_rule_action_list_get(rule); + struct prestera_acl_rule_action_entry *a_entry; + int i = 0; + + list_for_each_entry(a_entry, a_list, list) { + action[i].id = a_entry->id; + + switch (a_entry->id) { + case PRESTERA_ACL_RULE_ACTION_ACCEPT: + case PRESTERA_ACL_RULE_ACTION_DROP: + case PRESTERA_ACL_RULE_ACTION_TRAP: + /* just rule action id, no specific data */ + break; + default: + return -EINVAL; + } + + i++; + } + + return 0; +} + +static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, + struct prestera_acl_rule *rule) +{ + struct list_head *m_list = prestera_acl_rule_match_list_get(rule); + struct prestera_acl_rule_match_entry *m_entry; + int i = 0; + + list_for_each_entry(m_entry, m_list, list) { + match[i].type = m_entry->type; + + switch (m_entry->type) { + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID: + match[i].keymask.u16.key = m_entry->keymask.u16.key; + match[i].keymask.u16.mask = m_entry->keymask.u16.mask; + break; + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO: + match[i].keymask.u8.key = m_entry->keymask.u8.key; + match[i].keymask.u8.mask = m_entry->keymask.u8.mask; + break; + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC: + memcpy(match[i].keymask.mac.key, + m_entry->keymask.mac.key, + sizeof(match[i].keymask.mac.key)); + memcpy(match[i].keymask.mac.mask, + m_entry->keymask.mac.mask, + sizeof(match[i].keymask.mac.mask)); + break; + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC: + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST: + match[i].keymask.u32.key = m_entry->keymask.u32.key; + match[i].keymask.u32.mask = m_entry->keymask.u32.mask; + break; + case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT: + match[i].keymask.u64.key = m_entry->keymask.u64.key; + match[i].keymask.u64.mask = m_entry->keymask.u64.mask; + break; + default: + return -EINVAL; + } + + i++; + } + + return 0; +} + +int prestera_hw_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule, + u32 *rule_id) +{ + struct prestera_msg_acl_action *actions; + struct prestera_msg_acl_match *matches; + struct prestera_msg_acl_rule_resp resp; + struct prestera_msg_acl_rule_req *req; + u8 n_actions; + u8 n_matches; + void *buff; + u32 size; + int err; + + n_actions = prestera_acl_rule_action_len(rule); + n_matches = prestera_acl_rule_match_len(rule); + + size = sizeof(*req) + sizeof(*actions) * n_actions + + sizeof(*matches) * n_matches; + + buff = kzalloc(size, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + req = buff; + actions = buff + sizeof(*req); + matches = buff + sizeof(*req) + sizeof(*actions) * n_actions; + + /* put acl actions into the message */ + err = prestera_hw_acl_actions_put(actions, rule); + if (err) + goto free_buff; + + /* put acl matches into the message */ + err = prestera_hw_acl_matches_put(matches, rule); + if (err) + goto free_buff; + + req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule); + req->priority = prestera_acl_rule_priority_get(rule); + req->n_actions = prestera_acl_rule_action_len(rule); + req->n_matches = prestera_acl_rule_match_len(rule); + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD, + &req->cmd, size, &resp.ret, sizeof(resp)); + if (err) + goto free_buff; + + *rule_id = resp.id; +free_buff: + kfree(buff); + return err; +} + +int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id) +{ + struct prestera_msg_acl_rule_req req = { + .id = rule_id + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, + u64 *packets, u64 *bytes) +{ + struct prestera_msg_acl_rule_stats_resp resp; + struct prestera_msg_acl_rule_req req = { + .id = rule_id + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *packets = resp.packets; + *bytes = resp.bytes; + + return 0; +} + +int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id) +{ + struct prestera_msg_acl_ruleset_bind_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .ruleset_id = ruleset_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_acl_port_unbind(const struct prestera_port *port, + u16 ruleset_id) +{ + struct prestera_msg_acl_ruleset_bind_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .ruleset_id = ruleset_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id) +{ + struct prestera_msg_span_resp resp; + struct prestera_msg_span_req req = { + .port = port->hw_id, + .dev = port->dev_id, + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *span_id = resp.id; + + return 0; +} + +int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id) +{ + struct prestera_msg_span_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .id = span_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_span_unbind(const struct prestera_port *port) +{ + struct prestera_msg_span_req req = { + .port = port->hw_id, + .dev = port->dev_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND, + &req.cmd, sizeof(req)); +} + +int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id) +{ + struct prestera_msg_span_req req = { + .id = span_id + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE, + &req.cmd, sizeof(req)); +} + int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) { struct prestera_msg_port_attr_req req = { @@ -988,7 +1416,43 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) &req.cmd, sizeof(req)); } -int prestera_hw_port_flood_set(struct prestera_port *port, bool flood) +static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .flood_ext = { + .type = PRESTERA_PORT_FLOOD_TYPE_UC, + .enable = flood, + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood) +{ + struct prestera_msg_port_attr_req req = { + .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, + .port = port->hw_id, + .dev = port->dev_id, + .param = { + .flood_ext = { + .type = PRESTERA_PORT_FLOOD_TYPE_MC, + .enable = flood, + } + } + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + +static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, @@ -1003,6 +1467,41 @@ int prestera_hw_port_flood_set(struct prestera_port *port, bool flood) &req.cmd, sizeof(req)); } +int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask, + unsigned long val) +{ + int err; + + if (port->sw->dev->fw_rev.maj <= 2) { + if (!(mask & BR_FLOOD)) + return 0; + + return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD); + } + + if (mask & BR_FLOOD) { + err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD); + if (err) + goto err_uc_flood; + } + + if (mask & BR_MCAST_FLOOD) { + err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD); + if (err) + goto err_mc_flood; + } + + return 0; + +err_mc_flood: + prestera_hw_port_mc_flood_set(port, 0); +err_uc_flood: + if (mask & BR_FLOOD) + prestera_hw_port_uc_flood_set(port, 0); + + return err; +} + int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) { struct prestera_msg_vlan_req req = { @@ -1067,8 +1566,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, u16 vid, bool dynamic) { struct prestera_msg_fdb_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .dest = { + .dev = port->dev_id, + .port = port->hw_id, + }, .vid = vid, .dynamic = dynamic, }; @@ -1083,8 +1584,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, u16 vid) { struct prestera_msg_fdb_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .dest = { + .dev = port->dev_id, + .port = port->hw_id, + }, .vid = vid, }; @@ -1094,11 +1597,48 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, &req.cmd, sizeof(req)); } +int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid, bool dynamic) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = lag_id, + }, + .vid = vid, + .dynamic = dynamic, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = lag_id, + }, + .vid = vid, + }; + + ether_addr_copy(req.mac, mac); + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE, + &req.cmd, sizeof(req)); +} + int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) { struct prestera_msg_fdb_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .dest = { + .dev = port->dev_id, + .port = port->hw_id, + }, .flush_mode = mode, }; @@ -1121,8 +1661,10 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, u32 mode) { struct prestera_msg_fdb_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .dest = { + .dev = port->dev_id, + .port = port->hw_id, + }, .vid = vid, .flush_mode = mode, }; @@ -1131,6 +1673,37 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, &req.cmd, sizeof(req)); } +int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, + u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = lag_id, + }, + .flush_mode = mode, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, + u16 lag_id, u16 vid, u32 mode) +{ + struct prestera_msg_fdb_req req = { + .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, + .dest = { + .lag_id = lag_id, + }, + .vid = vid, + .flush_mode = mode, + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, + &req.cmd, sizeof(req)); +} + int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) { struct prestera_msg_bridge_resp resp; @@ -1212,6 +1785,68 @@ int prestera_hw_rxtx_port_init(struct prestera_port *port) &req.cmd, sizeof(req)); } +int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id) +{ + struct prestera_msg_lag_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .lag_id = lag_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id) +{ + struct prestera_msg_lag_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .lag_id = lag_id, + }; + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, + bool enable) +{ + struct prestera_msg_lag_req req = { + .port = port->hw_id, + .dev = port->dev_id, + .lag_id = lag_id, + }; + u32 cmd; + + cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE : + PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE; + + return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req)); +} + +int +prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, + enum prestera_hw_cpu_code_cnt_t counter_type, + u64 *packet_count) +{ + struct prestera_msg_cpu_code_counter_req req = { + .counter_type = counter_type, + .code = code, + }; + struct mvsw_msg_cpu_code_counter_ret resp; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *packet_count = resp.packet_count; + + return 0; +} + int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, prestera_event_cb_t fn, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index b2b5ac95b4e3..546d5fd8240d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -89,12 +89,18 @@ enum { PRESTERA_STP_FORWARD, }; +enum prestera_hw_cpu_code_cnt_t { + PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0, + PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, +}; + struct prestera_switch; struct prestera_port; struct prestera_port_stats; struct prestera_port_caps; enum prestera_event_type; struct prestera_event; +struct prestera_acl_rule; typedef void (*prestera_event_cb_t) (struct prestera_switch *sw, struct prestera_event *evt, void *arg); @@ -138,7 +144,8 @@ int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status, int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode); int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed); int prestera_hw_port_learning_set(struct prestera_port *port, bool enable); -int prestera_hw_port_flood_set(struct prestera_port *port, bool flood); +int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask, + unsigned long val); int prestera_hw_port_accept_frm_type(struct prestera_port *port, enum prestera_accept_frm_type type); /* Vlan API */ @@ -165,6 +172,28 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id); int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id); int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id); +/* ACL API */ +int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, + u16 *ruleset_id); +int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, + u16 ruleset_id); +int prestera_hw_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule, + u32 *rule_id); +int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id); +int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, + u32 rule_id, u64 *packets, u64 *bytes); +int prestera_hw_acl_port_bind(const struct prestera_port *port, + u16 ruleset_id); +int prestera_hw_acl_port_unbind(const struct prestera_port *port, + u16 ruleset_id); + +/* SPAN API */ +int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id); +int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id); +int prestera_hw_span_unbind(const struct prestera_port *port); +int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id); + /* Event handlers */ int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, @@ -179,4 +208,24 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params); int prestera_hw_rxtx_port_init(struct prestera_port *port); +/* LAG API */ +int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id); +int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id); +int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, + bool enable); +int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid, bool dynamic); +int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, + const unsigned char *mac, u16 vid); +int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, + u32 mode); +int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, + u16 lag_id, u16 vid, u32 mode); + +/* HW trap/drop counters API */ +int +prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, + enum prestera_hw_cpu_code_cnt_t counter_type, + u64 *packet_count); + #endif /* _PRESTERA_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 2768c78528a5..226f4ff29f6e 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -8,9 +8,13 @@ #include <linux/netdev_features.h> #include <linux/of.h> #include <linux/of_net.h> +#include <linux/if_vlan.h> #include "prestera.h" #include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_flow.h" +#include "prestera_span.h" #include "prestera_rxtx.h" #include "prestera_devlink.h" #include "prestera_ethtool.h" @@ -199,10 +203,25 @@ static void prestera_port_stats_update(struct work_struct *work) msecs_to_jiffies(PRESTERA_STATS_DELAY_MS)); } +static int prestera_port_setup_tc(struct net_device *dev, + enum tc_setup_type type, + void *type_data) +{ + struct prestera_port *port = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: + return prestera_flow_block_setup(port, type_data); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops prestera_netdev_ops = { .ndo_open = prestera_port_open, .ndo_stop = prestera_port_close, .ndo_start_xmit = prestera_port_xmit, + .ndo_setup_tc = prestera_port_setup_tc, .ndo_change_mtu = prestera_port_change_mtu, .ndo_get_stats64 = prestera_port_get_stats64, .ndo_set_mac_address = prestera_port_set_mac_address, @@ -281,6 +300,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) INIT_LIST_HEAD(&port->vlans_list); port->pvid = PRESTERA_DEFAULT_VID; + port->lag = NULL; port->dev = dev; port->id = id; port->sw = sw; @@ -296,7 +316,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) if (err) goto err_dl_port_register; - dev->features |= NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; dev->netdev_ops = &prestera_netdev_ops; dev->ethtool_ops = &prestera_ethtool_ops; @@ -472,6 +492,149 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) return prestera_hw_switch_mac_set(sw, sw->base_mac); } +struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id) +{ + return id < sw->lag_max ? &sw->lags[id] : NULL; +} + +static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw, + struct net_device *dev) +{ + struct prestera_lag *lag; + u16 id; + + for (id = 0; id < sw->lag_max; id++) { + lag = &sw->lags[id]; + if (lag->dev == dev) + return lag; + } + + return NULL; +} + +static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw, + struct net_device *lag_dev) +{ + struct prestera_lag *lag = NULL; + u16 id; + + for (id = 0; id < sw->lag_max; id++) { + lag = &sw->lags[id]; + if (!lag->dev) + break; + } + if (lag) { + INIT_LIST_HEAD(&lag->members); + lag->dev = lag_dev; + } + + return lag; +} + +static void prestera_lag_destroy(struct prestera_switch *sw, + struct prestera_lag *lag) +{ + WARN_ON(!list_empty(&lag->members)); + lag->member_count = 0; + lag->dev = NULL; +} + +static int prestera_lag_port_add(struct prestera_port *port, + struct net_device *lag_dev) +{ + struct prestera_switch *sw = port->sw; + struct prestera_lag *lag; + int err; + + lag = prestera_lag_by_dev(sw, lag_dev); + if (!lag) { + lag = prestera_lag_create(sw, lag_dev); + if (!lag) + return -ENOSPC; + } + + if (lag->member_count >= sw->lag_member_max) + return -ENOSPC; + + err = prestera_hw_lag_member_add(port, lag->lag_id); + if (err) { + if (!lag->member_count) + prestera_lag_destroy(sw, lag); + return err; + } + + list_add(&port->lag_member, &lag->members); + lag->member_count++; + port->lag = lag; + + return 0; +} + +static int prestera_lag_port_del(struct prestera_port *port) +{ + struct prestera_switch *sw = port->sw; + struct prestera_lag *lag = port->lag; + int err; + + if (!lag || !lag->member_count) + return -EINVAL; + + err = prestera_hw_lag_member_del(port, lag->lag_id); + if (err) + return err; + + list_del(&port->lag_member); + lag->member_count--; + port->lag = NULL; + + if (netif_is_bridge_port(lag->dev)) { + struct net_device *br_dev; + + br_dev = netdev_master_upper_dev_get(lag->dev); + + prestera_bridge_port_leave(br_dev, port); + } + + if (!lag->member_count) + prestera_lag_destroy(sw, lag); + + return 0; +} + +bool prestera_port_is_lag_member(const struct prestera_port *port) +{ + return !!port->lag; +} + +u16 prestera_port_lag_id(const struct prestera_port *port) +{ + return port->lag->lag_id; +} + +static int prestera_lag_init(struct prestera_switch *sw) +{ + u16 id; + + sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL); + if (!sw->lags) + return -ENOMEM; + + for (id = 0; id < sw->lag_max; id++) + sw->lags[id].lag_id = id; + + return 0; +} + +static void prestera_lag_fini(struct prestera_switch *sw) +{ + u8 idx; + + for (idx = 0; idx < sw->lag_max; idx++) + WARN_ON(sw->lags[idx].member_count); + + kfree(sw->lags); +} + bool prestera_netdev_check(const struct net_device *dev) { return dev->netdev_ops == &prestera_netdev_ops; @@ -505,16 +668,119 @@ struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev) return port; } -static int prestera_netdev_port_event(struct net_device *dev, +static int prestera_netdev_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info = ptr; + struct netdev_lag_lower_state_info *lower_state_info; + struct prestera_port *port = netdev_priv(dev); + bool enabled; + + if (!netif_is_lag_port(dev)) + return 0; + if (!prestera_port_is_lag_member(port)) + return 0; + + lower_state_info = info->lower_state_info; + enabled = lower_state_info->link_up && lower_state_info->tx_enabled; + + return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled); +} + +static bool prestera_lag_master_check(struct net_device *lag_dev, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *ext_ack) +{ + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type"); + return false; + } + + return true; +} + +static int prestera_netdev_port_event(struct net_device *lower, + struct net_device *dev, unsigned long event, void *ptr) { + struct netdev_notifier_changeupper_info *info = ptr; + struct prestera_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + struct net_device *upper; + + extack = netdev_notifier_info_to_extack(&info->info); + upper = info->upper_dev; + switch (event) { case NETDEV_PRECHANGEUPPER: + if (!netif_is_bridge_master(upper) && + !netif_is_lag_master(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EINVAL; + } + + if (!info->linking) + break; + + if (netdev_has_any_upper_dev(upper)) { + NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved"); + return -EINVAL; + } + + if (netif_is_lag_master(upper) && + !prestera_lag_master_check(upper, info->upper_info, extack)) + return -EOPNOTSUPP; + if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Master device is a LAG master and port has a VLAN"); + return -EINVAL; + } + if (netif_is_lag_port(dev) && is_vlan_dev(upper) && + !netif_is_lag_master(vlan_dev_real_dev(upper))) { + NL_SET_ERR_MSG_MOD(extack, + "Can not put a VLAN on a LAG port"); + return -EINVAL; + } + break; + case NETDEV_CHANGEUPPER: - return prestera_bridge_port_event(dev, event, ptr); - default: - return 0; + if (netif_is_bridge_master(upper)) { + if (info->linking) + return prestera_bridge_port_join(upper, port); + else + prestera_bridge_port_leave(upper, port); + } else if (netif_is_lag_master(upper)) { + if (info->linking) + return prestera_lag_port_add(port, upper); + else + prestera_lag_port_del(port); + } + break; + + case NETDEV_CHANGELOWERSTATE: + return prestera_netdev_port_lower_event(dev, event, ptr); } + + return 0; +} + +static int prestera_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (prestera_netdev_check(dev)) { + err = prestera_netdev_port_event(lag_dev, dev, event, + ptr); + if (err) + return err; + } + } + + return 0; } static int prestera_netdev_event_handler(struct notifier_block *nb, @@ -524,7 +790,9 @@ static int prestera_netdev_event_handler(struct notifier_block *nb, int err = 0; if (prestera_netdev_check(dev)) - err = prestera_netdev_port_event(dev, event, ptr); + err = prestera_netdev_port_event(dev, dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = prestera_netdevice_lag_event(dev, event, ptr); return notifier_from_errno(err); } @@ -574,10 +842,22 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_handlers_register; + err = prestera_acl_init(sw); + if (err) + goto err_acl_init; + + err = prestera_span_init(sw); + if (err) + goto err_span_init; + err = prestera_devlink_register(sw); if (err) goto err_dl_register; + err = prestera_lag_init(sw); + if (err) + goto err_lag_init; + err = prestera_create_ports(sw); if (err) goto err_ports_create; @@ -585,8 +865,14 @@ static int prestera_switch_init(struct prestera_switch *sw) return 0; err_ports_create: + prestera_lag_fini(sw); +err_lag_init: prestera_devlink_unregister(sw); err_dl_register: + prestera_span_fini(sw); +err_span_init: + prestera_acl_fini(sw); +err_acl_init: prestera_event_handlers_unregister(sw); err_handlers_register: prestera_rxtx_switch_fini(sw); @@ -602,7 +888,10 @@ err_swdev_register: static void prestera_switch_fini(struct prestera_switch *sw) { prestera_destroy_ports(sw); + prestera_lag_fini(sw); prestera_devlink_unregister(sw); + prestera_span_fini(sw); + prestera_acl_fini(sw); prestera_event_handlers_unregister(sw); prestera_rxtx_switch_fini(sw); prestera_switchdev_fini(sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index 298110119272..a250d394da38 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ +#include <linux/bitfield.h> #include <linux/circ_buf.h> #include <linux/device.h> #include <linux/firmware.h> @@ -13,9 +14,12 @@ #define PRESTERA_MSG_MAX_SIZE 1500 -#define PRESTERA_SUPP_FW_MAJ_VER 2 +#define PRESTERA_SUPP_FW_MAJ_VER 3 #define PRESTERA_SUPP_FW_MIN_VER 0 +#define PRESTERA_PREV_FW_MAJ_VER 2 +#define PRESTERA_PREV_FW_MIN_VER 0 + #define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img" #define PRESTERA_FW_HDR_MAGIC 0x351D9D06 @@ -144,6 +148,11 @@ struct prestera_fw_regs { /* PRESTERA_CMD_RCV_CTL_REG flags */ #define PRESTERA_CMD_F_REPL_SENT BIT(0) +#define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0) + +#define PRESTERA_FW_EVT_CTL_STATUS_ON 0 +#define PRESTERA_FW_EVT_CTL_STATUS_OFF 1 + #define PRESTERA_EVTQ_REG_OFFSET(q, f) \ (PRESTERA_FW_REG_OFFSET(evtq_list) + \ (q) * sizeof(struct prestera_fw_evtq_regs) + \ @@ -166,6 +175,8 @@ struct prestera_fw_evtq { }; struct prestera_fw { + struct prestera_fw_rev rev_supp; + const struct firmware *bin; struct workqueue_struct *wq; struct prestera_device dev; u8 __iomem *ldr_regs; @@ -260,6 +271,15 @@ static u8 prestera_fw_evtq_pick(struct prestera_fw *fw) return PRESTERA_EVT_QNUM_MAX; } +static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val) +{ + u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG); + + u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK); + + prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status); +} + static void prestera_fw_evt_work_fn(struct work_struct *work) { struct prestera_fw *fw; @@ -269,6 +289,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work) fw = container_of(work, struct prestera_fw, evt_work); msg = fw->evt_msg; + prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF); + while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) { u32 idx; u32 len; @@ -288,6 +310,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work) if (fw->dev.recv_msg) fw->dev.recv_msg(&fw->dev, msg, len); } + + prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON); } static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, @@ -576,25 +600,24 @@ static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, static int prestera_fw_rev_check(struct prestera_fw *fw) { struct prestera_fw_rev *rev = &fw->dev.fw_rev; - u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER; - u16 min_supp = PRESTERA_SUPP_FW_MIN_VER; - if (rev->maj == maj_supp && rev->min >= min_supp) + if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min) return 0; dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'", - PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER); + fw->rev_supp.maj, fw->rev_supp.min); return -EINVAL; } -static int prestera_fw_hdr_parse(struct prestera_fw *fw, - const struct firmware *img) +static int prestera_fw_hdr_parse(struct prestera_fw *fw) { - struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data; struct prestera_fw_rev *rev = &fw->dev.fw_rev; + struct prestera_fw_header *hdr; u32 magic; + hdr = (struct prestera_fw_header *)fw->bin->data; + magic = be32_to_cpu(hdr->magic_number); if (magic != PRESTERA_FW_HDR_MAGIC) { dev_err(fw->dev.dev, "FW img hdr magic is invalid"); @@ -609,11 +632,52 @@ static int prestera_fw_hdr_parse(struct prestera_fw *fw, return prestera_fw_rev_check(fw); } +static int prestera_fw_get(struct prestera_fw *fw) +{ + int ver_maj = PRESTERA_SUPP_FW_MAJ_VER; + int ver_min = PRESTERA_SUPP_FW_MIN_VER; + char fw_path[128]; + int err; + +pick_fw_ver: + snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT, + ver_maj, ver_min); + + err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); + if (err) { + if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) { + ver_maj = PRESTERA_PREV_FW_MAJ_VER; + ver_min = PRESTERA_PREV_FW_MIN_VER; + + dev_warn(fw->dev.dev, + "missing latest %s firmware, fall-back to previous %u.%u version\n", + fw_path, ver_maj, ver_min); + + goto pick_fw_ver; + } else { + dev_err(fw->dev.dev, "failed to request previous firmware: %s\n", + fw_path); + return err; + } + } + + dev_info(fw->dev.dev, "Loading %s ...", fw_path); + + fw->rev_supp.maj = ver_maj; + fw->rev_supp.min = ver_min; + fw->rev_supp.sub = 0; + + return 0; +} + +static void prestera_fw_put(struct prestera_fw *fw) +{ + release_firmware(fw->bin); +} + static int prestera_fw_load(struct prestera_fw *fw) { size_t hlen = sizeof(struct prestera_fw_header); - const struct firmware *f; - char fw_path[128]; int err; err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG, @@ -632,30 +696,24 @@ static int prestera_fw_load(struct prestera_fw *fw) fw->ldr_wr_idx = 0; - snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT, - PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER); - - err = request_firmware_direct(&f, fw_path, fw->dev.dev); - if (err) { - dev_err(fw->dev.dev, "failed to request firmware file\n"); + err = prestera_fw_get(fw); + if (err) return err; - } - err = prestera_fw_hdr_parse(fw, f); + err = prestera_fw_hdr_parse(fw); if (err) { dev_err(fw->dev.dev, "FW image header is invalid\n"); goto out_release; } - prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen); + prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen); prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START); - dev_info(fw->dev.dev, "Loading %s ...", fw_path); - - err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen); + err = prestera_ldr_fw_send(fw, fw->bin->data + hlen, + fw->bin->size - hlen); out_release: - release_firmware(f); + prestera_fw_put(fw); return err; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 2a13c318048c..73d2eba5262f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -14,6 +14,7 @@ #include "prestera.h" #include "prestera_hw.h" #include "prestera_rxtx.h" +#include "prestera_devlink.h" #define PRESTERA_SDMA_WAIT_MUL 10 @@ -214,9 +215,10 @@ static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma, static int prestera_rxtx_process_skb(struct prestera_sdma *sdma, struct sk_buff *skb) { - const struct prestera_port *port; + struct prestera_port *port; struct prestera_dsa dsa; u32 hw_port, dev_id; + u8 cpu_code; int err; skb_pull(skb, ETH_HLEN); @@ -259,6 +261,9 @@ static int prestera_rxtx_process_skb(struct prestera_sdma *sdma, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); } + cpu_code = dsa.cpu_code; + prestera_devlink_trap_report(port, skb, cpu_code); + return 0; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c new file mode 100644 index 000000000000..3cafca827bb7 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_span.h" + +struct prestera_span_entry { + struct list_head list; + struct prestera_port *port; + refcount_t ref_count; + u8 id; +}; + +struct prestera_span { + struct prestera_switch *sw; + struct list_head entries; +}; + +static struct prestera_span_entry * +prestera_span_entry_create(struct prestera_port *port, u8 span_id) +{ + struct prestera_span_entry *entry; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + refcount_set(&entry->ref_count, 1); + entry->port = port; + entry->id = span_id; + list_add_tail(&entry->list, &port->sw->span->entries); + + return entry; +} + +static void prestera_span_entry_del(struct prestera_span_entry *entry) +{ + list_del(&entry->list); + kfree(entry); +} + +static struct prestera_span_entry * +prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id) +{ + struct prestera_span_entry *entry; + + list_for_each_entry(entry, &span->entries, list) { + if (entry->id == span_id) + return entry; + } + + return NULL; +} + +static struct prestera_span_entry * +prestera_span_entry_find_by_port(struct prestera_span *span, + struct prestera_port *port) +{ + struct prestera_span_entry *entry; + + list_for_each_entry(entry, &span->entries, list) { + if (entry->port == port) + return entry; + } + + return NULL; +} + +static int prestera_span_get(struct prestera_port *port, u8 *span_id) +{ + u8 new_span_id; + struct prestera_switch *sw = port->sw; + struct prestera_span_entry *entry; + int err; + + entry = prestera_span_entry_find_by_port(sw->span, port); + if (entry) { + refcount_inc(&entry->ref_count); + *span_id = entry->id; + return 0; + } + + err = prestera_hw_span_get(port, &new_span_id); + if (err) + return err; + + entry = prestera_span_entry_create(port, new_span_id); + if (IS_ERR(entry)) { + prestera_hw_span_release(sw, new_span_id); + return PTR_ERR(entry); + } + + *span_id = new_span_id; + return 0; +} + +static int prestera_span_put(struct prestera_switch *sw, u8 span_id) +{ + struct prestera_span_entry *entry; + int err; + + entry = prestera_span_entry_find_by_id(sw->span, span_id); + if (!entry) + return false; + + if (!refcount_dec_and_test(&entry->ref_count)) + return 0; + + err = prestera_hw_span_release(sw, span_id); + if (err) + return err; + + prestera_span_entry_del(entry); + return 0; +} + +static int prestera_span_rule_add(struct prestera_flow_block_binding *binding, + struct prestera_port *to_port) +{ + struct prestera_switch *sw = binding->port->sw; + u8 span_id; + int err; + + if (binding->span_id != PRESTERA_SPAN_INVALID_ID) + /* port already in mirroring */ + return -EEXIST; + + err = prestera_span_get(to_port, &span_id); + if (err) + return err; + + err = prestera_hw_span_bind(binding->port, span_id); + if (err) { + prestera_span_put(sw, span_id); + return err; + } + + binding->span_id = span_id; + return 0; +} + +static int prestera_span_rule_del(struct prestera_flow_block_binding *binding) +{ + int err; + + err = prestera_hw_span_unbind(binding->port); + if (err) + return err; + + err = prestera_span_put(binding->port->sw, binding->span_id); + if (err) + return err; + + binding->span_id = PRESTERA_SPAN_INVALID_ID; + return 0; +} + +int prestera_span_replace(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f) +{ + struct prestera_flow_block_binding *binding; + __be16 protocol = f->common.protocol; + struct flow_action_entry *act; + struct prestera_port *port; + int err; + + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG(f->common.extack, + "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + + if (!prestera_netdev_check(act->dev)) { + NL_SET_ERR_MSG(f->common.extack, + "Only Marvell Prestera port is supported"); + return -EINVAL; + } + if (!tc_cls_can_offload_and_chain0(act->dev, &f->common)) + return -EOPNOTSUPP; + if (act->id != FLOW_ACTION_MIRRED) + return -EOPNOTSUPP; + if (protocol != htons(ETH_P_ALL)) + return -EOPNOTSUPP; + + port = netdev_priv(act->dev); + + list_for_each_entry(binding, &block->binding_list, list) { + err = prestera_span_rule_add(binding, port); + if (err) + goto rollback; + } + + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, + &block->binding_list, list) + prestera_span_rule_del(binding); + return err; +} + +void prestera_span_destroy(struct prestera_flow_block *block) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + prestera_span_rule_del(binding); +} + +int prestera_span_init(struct prestera_switch *sw) +{ + struct prestera_span *span; + + span = kzalloc(sizeof(*span), GFP_KERNEL); + if (!span) + return -ENOMEM; + + INIT_LIST_HEAD(&span->entries); + + sw->span = span; + span->sw = sw; + + return 0; +} + +void prestera_span_fini(struct prestera_switch *sw) +{ + struct prestera_span *span = sw->span; + + WARN_ON(!list_empty(&span->entries)); + kfree(span); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h new file mode 100644 index 000000000000..f0644521f78a --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_SPAN_H_ +#define _PRESTERA_SPAN_H_ + +#include <net/pkt_cls.h> + +#define PRESTERA_SPAN_INVALID_ID -1 + +struct prestera_switch; +struct prestera_flow_block; + +int prestera_span_init(struct prestera_switch *sw); +void prestera_span_fini(struct prestera_switch *sw); +int prestera_span_replace(struct prestera_flow_block *block, + struct tc_cls_matchall_offload *f); +void prestera_span_destroy(struct prestera_flow_block *block); + +#endif /* _PRESTERA_SPAN_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index cb564890a3dc..9a309169dbae 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -180,6 +180,45 @@ err_port_vlan_alloc: return ERR_PTR(err); } +static int prestera_fdb_add(struct prestera_port *port, + const unsigned char *mac, u16 vid, bool dynamic) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port), + mac, vid, dynamic); + + return prestera_hw_fdb_add(port, mac, vid, dynamic); +} + +static int prestera_fdb_del(struct prestera_port *port, + const unsigned char *mac, u16 vid) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port), + mac, vid); + else + return prestera_hw_fdb_del(port, mac, vid); +} + +static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, + u32 mode) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port), + vid, mode); + else + return prestera_hw_fdb_flush_port_vlan(port, vid, mode); +} + +static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode) +{ + if (prestera_port_is_lag_member(port)) + return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port), + mode); + else + return prestera_hw_fdb_flush_port(port, mode); +} + static void prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan) { @@ -199,11 +238,11 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan) last_port = port_count == 1; if (last_vlan) - prestera_hw_fdb_flush_port(port, fdb_flush_mode); + prestera_fdb_flush_port(port, fdb_flush_mode); else if (last_port) prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode); else - prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode); + prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode); list_del(&port_vlan->br_vlan_head); prestera_bridge_vlan_put(br_vlan); @@ -312,11 +351,29 @@ __prestera_bridge_port_by_dev(struct prestera_bridge *bridge, return NULL; } +static int prestera_match_upper_bridge_dev(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + if (netif_is_bridge_master(dev)) + priv->data = dev; + + return 0; +} + +static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev) +{ + struct netdev_nested_priv priv = { }; + + netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev, + &priv); + return priv.data; +} + static struct prestera_bridge_port * prestera_bridge_port_by_dev(struct prestera_switchdev *swdev, struct net_device *dev) { - struct net_device *br_dev = netdev_master_upper_dev_get(dev); + struct net_device *br_dev = prestera_get_upper_bridge_dev(dev); struct prestera_bridge *bridge; if (!br_dev) @@ -404,7 +461,8 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port) if (err) return err; - err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD); + err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, + br_port->flags); if (err) goto err_port_flood_set; @@ -415,24 +473,23 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port) return 0; err_port_learning_set: - prestera_hw_port_flood_set(port, false); err_port_flood_set: prestera_hw_bridge_port_delete(port, bridge->bridge_id); return err; } -static int prestera_port_bridge_join(struct prestera_port *port, - struct net_device *upper) +int prestera_bridge_port_join(struct net_device *br_dev, + struct prestera_port *port) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; struct prestera_bridge *bridge; int err; - bridge = prestera_bridge_by_dev(swdev, upper); + bridge = prestera_bridge_by_dev(swdev, br_dev); if (!bridge) { - bridge = prestera_bridge_create(swdev, upper); + bridge = prestera_bridge_create(swdev, br_dev); if (IS_ERR(bridge)) return PTR_ERR(bridge); } @@ -505,14 +562,14 @@ static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, return prestera_hw_vlan_port_stp_set(port, vid, hw_state); } -static void prestera_port_bridge_leave(struct prestera_port *port, - struct net_device *upper) +void prestera_bridge_port_leave(struct net_device *br_dev, + struct prestera_port *port) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; struct prestera_bridge *bridge; - bridge = prestera_bridge_by_dev(swdev, upper); + bridge = prestera_bridge_by_dev(swdev, br_dev); if (!bridge) return; @@ -528,57 +585,11 @@ static void prestera_port_bridge_leave(struct prestera_port *port, prestera_bridge_1d_port_leave(br_port); prestera_hw_port_learning_set(port, false); - prestera_hw_port_flood_set(port, false); + prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0); prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); prestera_bridge_port_put(br_port); } -int prestera_bridge_port_event(struct net_device *dev, unsigned long event, - void *ptr) -{ - struct netdev_notifier_changeupper_info *info = ptr; - struct netlink_ext_ack *extack; - struct prestera_port *port; - struct net_device *upper; - int err; - - extack = netdev_notifier_info_to_extack(&info->info); - port = netdev_priv(dev); - upper = info->upper_dev; - - switch (event) { - case NETDEV_PRECHANGEUPPER: - if (!netif_is_bridge_master(upper)) { - NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); - return -EINVAL; - } - - if (!info->linking) - break; - - if (netdev_has_any_upper_dev(upper)) { - NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved"); - return -EINVAL; - } - break; - - case NETDEV_CHANGEUPPER: - if (!netif_is_bridge_master(upper)) - break; - - if (info->linking) { - err = prestera_port_bridge_join(port, upper); - if (err) - return err; - } else { - prestera_port_bridge_leave(port, upper); - } - break; - } - - return 0; -} - static int prestera_port_attr_br_flags_set(struct prestera_port *port, struct net_device *dev, struct switchdev_brport_flags flags) @@ -590,11 +601,9 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port, if (!br_port) return 0; - if (flags.mask & BR_FLOOD) { - err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD); - if (err) - return err; - } + err = prestera_hw_port_flood_set(port, flags.mask, flags.val); + if (err) + return err; if (flags.mask & BR_LEARNING) { err = prestera_hw_port_learning_set(port, @@ -699,7 +708,7 @@ err_port_stp_set: return err; } -static int prestera_port_obj_attr_set(struct net_device *dev, +static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { @@ -739,7 +748,7 @@ static void prestera_fdb_offload_notify(struct prestera_port *port, struct switchdev_notifier_fdb_info *info) { - struct switchdev_notifier_fdb_info send_info; + struct switchdev_notifier_fdb_info send_info = {}; send_info.addr = info->addr; send_info.vid = info->vid; @@ -771,9 +780,9 @@ static int prestera_port_fdb_set(struct prestera_port *port, vid = bridge->bridge_id; if (adding) - err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false); + err = prestera_fdb_add(port, fdb_info->addr, vid, false); else - err = prestera_hw_fdb_del(port, fdb_info->addr, vid); + err = prestera_fdb_del(port, fdb_info->addr, vid); return err; } @@ -901,7 +910,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan, if (port_vlan->br_port) return 0; - err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD); + err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, + br_port->flags); if (err) return err; @@ -1009,15 +1019,15 @@ static int prestera_port_vlans_add(struct prestera_port *port, { bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - struct net_device *dev = vlan->obj.orig_dev; + struct net_device *orig_dev = vlan->obj.orig_dev; struct prestera_bridge_port *br_port; struct prestera_switch *sw = port->sw; struct prestera_bridge *bridge; - if (netif_is_bridge_master(dev)) + if (netif_is_bridge_master(orig_dev)) return 0; - br_port = prestera_bridge_port_by_dev(sw->swdev, dev); + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); if (WARN_ON(!br_port)) return -EINVAL; @@ -1030,7 +1040,7 @@ static int prestera_port_vlans_add(struct prestera_port *port, flag_pvid, extack); } -static int prestera_port_obj_add(struct net_device *dev, +static int prestera_port_obj_add(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack) { @@ -1049,14 +1059,14 @@ static int prestera_port_obj_add(struct net_device *dev, static int prestera_port_vlans_del(struct prestera_port *port, const struct switchdev_obj_port_vlan *vlan) { - struct net_device *dev = vlan->obj.orig_dev; + struct net_device *orig_dev = vlan->obj.orig_dev; struct prestera_bridge_port *br_port; struct prestera_switch *sw = port->sw; - if (netif_is_bridge_master(dev)) + if (netif_is_bridge_master(orig_dev)) return -EOPNOTSUPP; - br_port = prestera_bridge_port_by_dev(sw->swdev, dev); + br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); if (WARN_ON(!br_port)) return -EINVAL; @@ -1068,7 +1078,7 @@ static int prestera_port_vlans_del(struct prestera_port *port, return 0; } -static int prestera_port_obj_del(struct net_device *dev, +static int prestera_port_obj_del(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj) { struct prestera_port *port = netdev_priv(dev); @@ -1113,11 +1123,27 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, static void prestera_fdb_event(struct prestera_switch *sw, struct prestera_event *evt, void *arg) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; + struct net_device *dev = NULL; struct prestera_port *port; + struct prestera_lag *lag; - port = prestera_find_port(sw, evt->fdb_evt.port_id); - if (!port) + switch (evt->fdb_evt.type) { + case PRESTERA_FDB_ENTRY_TYPE_REG_PORT: + port = prestera_find_port(sw, evt->fdb_evt.dest.port_id); + if (port) + dev = port->dev; + break; + case PRESTERA_FDB_ENTRY_TYPE_LAG: + lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id); + if (lag) + dev = lag->dev; + break; + default: + return; + } + + if (!dev) return; info.addr = evt->fdb_evt.data.mac; @@ -1129,11 +1155,11 @@ static void prestera_fdb_event(struct prestera_switch *sw, switch (evt->id) { case PRESTERA_FDB_EVENT_LEARNED: call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, - port->dev, &info.info, NULL); + dev, &info.info, NULL); break; case PRESTERA_FDB_EVENT_AGED: call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, - port->dev, &info.info, NULL); + dev, &info.info, NULL); break; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h index 606e21d2355b..a91bc35d235f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h @@ -7,7 +7,10 @@ int prestera_switchdev_init(struct prestera_switch *sw); void prestera_switchdev_fini(struct prestera_switch *sw); -int prestera_bridge_port_event(struct net_device *dev, unsigned long event, - void *ptr); +int prestera_bridge_port_join(struct net_device *br_dev, + struct prestera_port *port); + +void prestera_bridge_port_leave(struct net_device *br_dev, + struct prestera_port *port); #endif /* _PRESTERA_SWITCHDEV_H_ */ diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index e967867828d8..9b48ae4bac39 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1528,6 +1528,7 @@ static int pxa168_eth_remove(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); struct pxa168_eth_private *pep = netdev_priv(dev); + cancel_work_sync(&pep->tx_timeout_task); if (pep->htpr) { dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, pep->htpr, pep->htpr_dma); @@ -1539,7 +1540,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - cancel_work_sync(&pep->tx_timeout_task); unregister_netdev(dev); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h index 6928abcec0a3..f72217348eb4 100644 --- a/drivers/net/ethernet/marvell/skge.h +++ b/drivers/net/ethernet/marvell/skge.h @@ -263,7 +263,7 @@ enum { CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ - CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 222c32367b2c..8b8bff59c8fe 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -471,7 +471,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) adv |= fiber_fc_adv[sky2->flow_mode]; } else { reg |= GM_GPCR_AU_FCT_DIS; - reg |= gm_fc_disable[sky2->flow_mode]; + reg |= gm_fc_disable[sky2->flow_mode]; /* Forward pause packets to GMAC? */ if (sky2->flow_mode & FC_RX) @@ -1656,16 +1656,16 @@ static void sky2_hw_up(struct sky2_port *sky2) tx_init(sky2); /* - * On dual port PCI-X card, there is an problem where status + * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions */ if (otherdev && netif_running(otherdev) && - (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { - u16 cmd; + (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { + u16 cmd; cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); - cmd &= ~PCI_X_CMD_MAX_SPLIT; - sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + cmd &= ~PCI_X_CMD_MAX_SPLIT; + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); } sky2_mac_init(hw, port); @@ -1836,8 +1836,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, u16 mss; u8 ctrl; - if (unlikely(tx_avail(sky2) < tx_le_req(skb))) - return NETDEV_TX_BUSY; + if (unlikely(tx_avail(sky2) < tx_le_req(skb))) + return NETDEV_TX_BUSY; len = skb_headlen(skb); mapping = dma_map_single(&hw->pdev->dev, skb->data, len, @@ -1866,9 +1866,9 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, if (!(hw->flags & SKY2_HW_NEW_LE)) mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); - if (mss != sky2->tx_last_mss) { + if (mss != sky2->tx_last_mss) { le = get_tx_le(sky2, &slot); - le->addr = cpu_to_le32(mss); + le->addr = cpu_to_le32(mss); if (hw->flags & SKY2_HW_NEW_LE) le->opcode = OP_MSS | HW_OWNER; @@ -1895,8 +1895,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { /* On Yukon EX (some versions) encoding change. */ - if (hw->flags & SKY2_HW_AUTO_TX_SUM) - ctrl |= CALSUM; /* auto checksum */ + if (hw->flags & SKY2_HW_AUTO_TX_SUM) + ctrl |= CALSUM; /* auto checksum */ else { const unsigned offset = skb_transport_offset(skb); u32 tcpsum; @@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, if (length == 0) { /* don't need this page */ - __skb_frag_unref(frag); + __skb_frag_unref(frag, false); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); @@ -2557,7 +2557,7 @@ nobuf: static struct sk_buff *sky2_receive(struct net_device *dev, u16 length, u32 status) { - struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_port *sky2 = netdev_priv(dev); struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; @@ -5063,11 +5063,11 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); if (err) { - pci_disable_msi(pdev); + pci_disable_msi(pdev); if (err != -EOPNOTSUPP) goto err_out_free_netdev; } - } + } netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index b2dddd8a246c..ddec1627f1a7 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -538,8 +538,8 @@ enum { CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */ CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */ CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */ - CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ - CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ + CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ + CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ @@ -2262,8 +2262,8 @@ struct sky2_port { #define SKY2_FLAG_AUTO_SPEED 0x0002 #define SKY2_FLAG_AUTO_PAUSE 0x0004 - enum flow_control flow_mode; - enum flow_control flow_status; + enum flow_control flow_mode; + enum flow_control flow_status; #ifdef CONFIG_SKY2_DEBUG struct dentry *debugfs; |