diff options
Diffstat (limited to 'drivers/net/ethernet')
123 files changed, 20322 insertions, 1432 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 21407a26f806..5fc94c2f638e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -582,16 +582,12 @@ static int xgbe_get_ts_info(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (pdata->ptp_clock) ts_info->phc_index = ptp_clock_index(pdata->ptp_clock); - else - ts_info->phc_index = -1; ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 482c58c4c584..bec5cdf8d1da 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_ATHEROS bool "Atheros devices" default y - depends on (PCI || ATH79) + depends on PCI || ATH79 || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS config AG71XX tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support" - depends on ATH79 + depends on ATH79 || COMPILE_TEST select PHYLINK imply NET_SELFTESTS help diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index db2a8ade6205..96a6189cc31e 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -149,11 +149,11 @@ #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ #define FIFO_CFG4_DR BIT(10) /* Dribble */ -#define FIFO_CFG4_LE BIT(11) /* Long Event */ -#define FIFO_CFG4_CF BIT(12) /* Control Frame */ -#define FIFO_CFG4_PF BIT(13) /* Pause Frame */ -#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ -#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ +#define FIFO_CFG4_CF BIT(11) /* Control Frame */ +#define FIFO_CFG4_PF BIT(12) /* Pause Frame */ +#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */ +#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */ +#define FIFO_CFG4_LE BIT(15) /* Long Event */ #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ @@ -168,28 +168,28 @@ #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ #define FIFO_CFG5_FC BIT(2) /* False Carrier */ #define FIFO_CFG5_CE BIT(3) /* Code Error */ -#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ -#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ -#define FIFO_CFG5_OK BIT(6) /* Packet is OK */ -#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ -#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ -#define FIFO_CFG5_DR BIT(9) /* Dribble */ -#define FIFO_CFG5_CF BIT(10) /* Control Frame */ -#define FIFO_CFG5_PF BIT(11) /* Pause Frame */ -#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ -#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ -#define FIFO_CFG5_LE BIT(14) /* Long Event */ -#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ -#define FIFO_CFG5_16 BIT(16) /* unknown */ -#define FIFO_CFG5_17 BIT(17) /* unknown */ +#define FIFO_CFG5_CR BIT(4) /* CRC error */ +#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */ +#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */ +#define FIFO_CFG5_OK BIT(7) /* Packet is OK */ +#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */ +#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */ +#define FIFO_CFG5_DR BIT(10) /* Dribble */ +#define FIFO_CFG5_CF BIT(11) /* Control Frame */ +#define FIFO_CFG5_PF BIT(12) /* Pause Frame */ +#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */ +#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */ +#define FIFO_CFG5_LE BIT(15) /* Long Event */ +#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */ +#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */ #define FIFO_CFG5_SF BIT(18) /* Short Frame */ #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ - FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ - FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ - FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ - FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ - FIFO_CFG5_17 | FIFO_CFG5_SF) + FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \ + FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \ + FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \ + FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \ + FIFO_CFG5_UC | FIFO_CFG5_SF) #define AG71XX_REG_TX_CTRL 0x0180 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ @@ -379,7 +379,6 @@ struct ag71xx { u32 fifodata[3]; int mac_idx; - struct reset_control *mdio_reset; struct clk *clk_mdio; }; @@ -509,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset, switch (sset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) - memcpy(data + i * ETH_GSTRING_LEN, - ag71xx_statistics[i].name, ETH_GSTRING_LEN); + ethtool_puts(&data, ag71xx_statistics[i].name); break; case ETH_SS_TEST: net_selftest_get_strings(data); @@ -690,6 +688,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct net_device *ndev = ag->ndev; + struct reset_control *mdio_reset; static struct mii_bus *mii_bus; struct device_node *np, *mnp; int err; @@ -706,10 +705,10 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) if (!mii_bus) return -ENOMEM; - ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); - if (IS_ERR(ag->mdio_reset)) { + mdio_reset = devm_reset_control_get_exclusive(dev, "mdio"); + if (IS_ERR(mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + return PTR_ERR(mdio_reset); } mii_bus->name = "ag71xx_mdio"; @@ -720,12 +719,10 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) mii_bus->parent = dev; snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); - if (!IS_ERR(ag->mdio_reset)) { - reset_control_assert(ag->mdio_reset); - msleep(100); - reset_control_deassert(ag->mdio_reset); - msleep(200); - } + reset_control_assert(mdio_reset); + msleep(100); + reset_control_deassert(mdio_reset); + msleep(200); mnp = of_get_child_by_name(np, "mdio"); err = devm_of_mdiobus_register(dev, mii_bus, mnp); @@ -1853,6 +1850,12 @@ static int ag71xx_probe(struct platform_device *pdev) if (!ag->mac_base) return -ENOMEM; + /* ensure that HW is in manual polling mode before interrupts are + * activated. Otherwise ag71xx_interrupt might call napi_schedule + * before it is initialized by netif_napi_add. + */ + ag71xx_int_disable(ag, AG71XX_INT_POLL); + ndev->irq = platform_get_irq(pdev, 0); err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 0x0, dev_name(&pdev->dev), ndev); @@ -2033,4 +2036,5 @@ static struct platform_driver ag71xx_driver = { }; module_platform_driver(ag71xx_driver); +MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c9248ed9330c..6e422e24750a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -13803,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; struct bnxt_hw_rings hwr = {0}; int rx_rings = rx; + int rc; if (tcs) tx_sets = tcs; @@ -13835,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) hwr.cp_p5 = hwr.tx + rx; - return bnxt_hwrm_check_rings(bp, &hwr); + rc = bnxt_hwrm_check_rings(bp, &hwr); + if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { + if (!bnxt_ulp_registered(bp->edev)) { + hwr.cp += bnxt_get_ulp_msix_num(bp); + hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); + } + if (hwr.cp > bp->total_irqs) { + int total_msix = bnxt_change_msix(bp, hwr.cp); + + if (total_msix < hwr.cp) { + netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", + hwr.cp, total_msix); + rc = -ENOSPC; + } + } + } + return rc; } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3b805ed433ed..69231e85140b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1217,12 +1217,15 @@ struct bnxt_napi { bool in_reset; }; +/* "TxRx", 2 hypens, plus maximum integer */ +#define BNXT_IRQ_NAME_EXTRA 17 + struct bnxt_irq { irq_handler_t handler; unsigned int vector; u8 requested:1; u8 have_cpumask:1; - char name[IFNAMSIZ + 2]; + char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA]; cpumask_var_t cpu_mask; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7392a716f28d..f71cc8188b4e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev, } tx_xdp = req_rx_rings; } - rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); - if (rc) { - netdev_warn(dev, "Unable to allocate the requested rings\n"); - return rc; - } if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && @@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev, return -EINVAL; } + rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to allocate the requested rings\n"); + return rc; + } + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -5043,11 +5044,8 @@ static int bnxt_get_ts_info(struct net_device *dev, struct bnxt_ptp_cfg *ptp; ptp = bp->ptp_cfg; - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; - info->phc_index = -1; if (!ptp) return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b9e7d3e7b15d..fdd6356f21ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev); static int bnxt_set_dflt_ulp_msix(struct bnxt *bp) { - u32 roce_msix = BNXT_VF(bp) ? - BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX; + int roce_msix = BNXT_MAX_ROCE_MSIX; - return ((bp->flags & BNXT_FLAG_ROCE_CAP) ? - min_t(u32, roce_msix, num_online_cpus()) : 0); + if (BNXT_VF(bp)) + roce_msix = BNXT_MAX_ROCE_MSIX_VF; + else if (bp->port_partition_type) + roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF; + + /* NQ MSIX vectors should match the number of CPUs plus 1 more for + * the CREQ MSIX, up to the default. + */ + return min_t(int, roce_msix, num_online_cpus() + 1); } int bnxt_send_msg(struct bnxt_en_dev *edev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 4eafe6ec0abf..4f4914f5c84c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -15,8 +15,10 @@ #define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_STAT_CTXS 1 -#define BNXT_MAX_ROCE_MSIX 9 -#define BNXT_MAX_VF_ROCE_MSIX 2 + +#define BNXT_MAX_ROCE_MSIX_VF 2 +#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5 +#define BNXT_MAX_ROCE_MSIX 64 struct hwrm_async_event_cmpl; struct bnxt; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0ec5f01551f9..378815917741 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6145,9 +6145,7 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info { struct tg3 *tp = netdev_priv(dev); - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; if (tg3_flag(tp, PTP_CAPABLE)) { info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | @@ -6157,8 +6155,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info if (tp->ptp_clock) info->phc_index = ptp_clock_index(tp->ptp_clock); - else - info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 8e1e4b2b2386..f06babec04a0 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3410,8 +3410,6 @@ static int gem_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -3423,7 +3421,8 @@ static int gem_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); - info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 5835965dbc32..c849e2c871a9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2496,37 +2496,31 @@ ret_intrmod: return ret; } +#ifdef PTP_HARDWARE_TIMESTAMPING static int lio_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { struct lio *lio = GET_LIO(netdev); info->so_timestamping = -#ifdef PTP_HARDWARE_TIMESTAMPING SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | -#endif - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); - else - info->phc_index = -1; -#ifdef PTP_HARDWARE_TIMESTAMPING info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); -#endif return 0; } +#endif /* Return register dump len. */ static int lio_get_regs_len(struct net_device *dev) @@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = { .set_coalesce = lio_set_intr_coalesce, .get_priv_flags = lio_get_priv_flags, .set_priv_flags = lio_set_priv_flags, +#ifdef PTP_HARDWARE_TIMESTAMPING .get_ts_info = lio_get_ts_info, +#endif }; static const struct ethtool_ops lio_vf_ethtool_ops = { @@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { .set_coalesce = lio_set_intr_coalesce, .get_priv_flags = lio_get_priv_flags, .set_priv_flags = lio_set_priv_flags, +#ifdef PTP_HARDWARE_TIMESTAMPING .get_ts_info = lio_get_ts_info, +#endif }; void liquidio_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 6a04d2530176..d0ff0c170b1a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -844,8 +844,6 @@ static int nicvf_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f2f1055880b2..31685ee304c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -601,9 +601,7 @@ static int enic_set_rxfh(struct net_device *netdev, static int enic_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2baef59f741d..ecb1703ea150 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; err = of_get_ethdev_address(np, dev); + if (err == -EPROBE_DEFER) + goto err_grp_init; if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c index 7f081e6e8c87..ba83dbf4ed22 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c @@ -1042,12 +1042,9 @@ static int fun_set_rxfh(struct net_device *netdev, static int fun_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_RX_HARDWARE | + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index a19d098f2e2b..6ace55837172 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -418,8 +418,8 @@ do_retry: static void emac_hash_mc(struct emac_instance *dev) { + u32 __iomem *gaht_base = emac_gaht_base(dev); const int regs = EMAC_XAHT_REGS(dev); - u32 *gaht_base = emac_gaht_base(dev); u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 295516b07662..d8664bd65e1f 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -400,7 +400,7 @@ static inline int emac_has_feature(struct emac_instance *dev, ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \ ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1))) -static inline u32 *emac_xaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int offset; @@ -413,10 +413,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev) else offset = offsetof(struct emac_regs, u0.emac4.iaht1); - return (u32 *)((ptrdiff_t)p + offset); + return (u32 __iomem *)((__force ptrdiff_t)p + offset); } -static inline u32 *emac_gaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev) { /* GAHT registers always come after an identical number of * IAHT registers. diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index b4f6fa4ba13d..3307d551f431 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -33,6 +33,8 @@ ice-y := ice_main.o \ ice_idc.o \ devlink/devlink.o \ devlink/devlink_port.o \ + ice_sf_eth.o \ + ice_sf_vsi_vlan_ops.o \ ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 810a901d7afd..415445cefdb2 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -6,9 +6,11 @@ #include "ice.h" #include "ice_lib.h" #include "devlink.h" +#include "devlink_port.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" +#include "ice_sf_eth.h" /* context for devlink info version reporting */ struct ice_info_ctx { @@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_sched_node *tc_node, struct ice_pf *pf) { struct devlink_rate *rate_node = NULL; + struct ice_dynamic_port *sf; struct ice_vf *vf; int i; @@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && pf->vsi[node->vsi_handle]->vf) { vf = pf->vsi[node->vsi_handle]->vf; if (!vf->devlink_port.devlink_rate) @@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node */ devl_rate_leaf_create(&vf->devlink_port, node, node->parent->rate_node); + } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && + pf->vsi[node->vsi_handle]->sf) { + sf = pf->vsi[node->vsi_handle]->sf; + if (!sf->devlink_port.devlink_rate) + /* leaf nodes doesn't have children + * so we don't set rate_node + */ + devl_rate_leaf_create(&sf->devlink_port, node, + node->parent->rate_node); } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && node->parent->rate_node) { rate_node = devl_rate_node_create(devlink, node, node->name, @@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = { .rate_leaf_parent_set = ice_devlink_set_parent, .rate_node_parent_set = ice_devlink_set_parent, + + .port_new = ice_devlink_port_new, }; +static const struct devlink_ops ice_sf_devlink_ops; + static int ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) @@ -1562,6 +1580,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev) } /** + * ice_allocate_sf - Allocate devlink and return SF structure pointer + * @dev: the device to allocate for + * @pf: pointer to the PF structure + * + * Allocate a devlink instance for SF. + * + * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error + */ +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) +{ + struct devlink *devlink; + int err; + + devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), + dev); + if (!devlink) + return ERR_PTR(-ENOMEM); + + err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); + if (err) { + devlink_free(devlink); + return ERR_PTR(err); + } + + return devlink_priv(devlink); +} + +/** * ice_devlink_register - Register devlink interface for this PF * @pf: the PF to register the devlink for. * diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h index d291c0e2e17b..1af3b0763fbb 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h @@ -5,6 +5,7 @@ #define _ICE_DEVLINK_H_ struct ice_pf *ice_allocate_pf(struct device *dev); +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c index 62ef8e2fb5f1..928c8bdb6649 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -5,6 +5,9 @@ #include "ice.h" #include "devlink.h" +#include "devlink_port.h" +#include "ice_lib.h" +#include "ice_fltr.h" static int ice_active_port_option = -1; @@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) devl_rate_leaf_destroy(&vf->devlink_port); devl_port_unregister(&vf->devlink_port); } + +/** + * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Register virtual flavour devlink port for the subfunction auxiliary device + * created after activating a dynamically added devlink port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + struct devlink_port_attrs attrs = {}; + struct ice_dynamic_port *dyn_port; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + + dyn_port = sf_dev->dyn_port; + vsi = dyn_port->vsi; + + devlink_port = &sf_dev->priv->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(sf_dev->priv); + + return devl_port_register(devlink, devlink_port, vsi->idx); +} + +/** + * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Unregisters the virtual port associated with this subfunction. + */ +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + devl_port_unregister(&sf_dev->priv->devlink_port); +} + +/** + * ice_activate_dynamic_port - Activate a dynamic port + * @dyn_port: dynamic port instance to activate + * @extack: extack for reporting error messages + * + * Activate the dynamic port based on its flavour. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + int err; + + if (dyn_port->active) + return 0; + + err = ice_sf_eth_activate(dyn_port, extack); + if (err) + return err; + + dyn_port->active = true; + + return 0; +} + +/** + * ice_deactivate_dynamic_port - Deactivate a dynamic port + * @dyn_port: dynamic port instance to deactivate + * + * Undo activation of a dynamic port. + */ +static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + if (!dyn_port->active) + return; + + ice_sf_eth_deactivate(dyn_port); + dyn_port->active = false; +} + +/** + * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port + * @dyn_port: dynamic port instance to deallocate + * + * Free resources associated with a dynamically added devlink port. Will + * deactivate the port if its currently active. + */ +static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port *devlink_port = &dyn_port->devlink_port; + struct ice_pf *pf = dyn_port->pf; + + ice_deactivate_dynamic_port(dyn_port); + + xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf); + ice_eswitch_detach_sf(pf, dyn_port); + ice_vsi_free(dyn_port->vsi); + xa_erase(&pf->dyn_ports, dyn_port->vsi->idx); + kfree(dyn_port); +} + +/** + * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports + * @pf: pointer to the pf structure + */ +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf) +{ + struct ice_dynamic_port *dyn_port; + unsigned long index; + + xa_for_each(&pf->dyn_ports, index, dyn_port) + ice_dealloc_dynamic_port(dyn_port); +} + +/** + * ice_devlink_port_new_check_attr - Check that new port attributes are valid + * @pf: pointer to the PF structure + * @new_attr: the attributes for the new port + * @extack: extack for reporting error messages + * + * Check that the attributes for the new port are valid before continuing to + * allocate the devlink port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_new_check_attr(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack) +{ + if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) { + NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->controller_valid) { + NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->port_index_valid) { + NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment"); + return -EOPNOTSUPP; + } + + if (new_attr->pfnum != pf->hw.pf_id) { + NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied"); + return -EINVAL; + } + + if (!pci_msix_can_alloc_dyn(pf->pdev)) { + NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * ice_devlink_port_del - devlink handler for port delete + * @devlink: pointer to devlink + * @port: devlink port to be deleted + * @extack: pointer to extack + * + * Deletes devlink port and deallocates all resources associated with + * created subfunction. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + ice_dealloc_dynamic_port(dyn_port); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Sets mac address for the port, verifies arguments and copies address + * to the subfunction structure. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->attached) { + NL_SET_ERR_MSG_MOD(extack, + "Ethernet address can be change only in detached state"); + return -EBUSY; + } + + if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address"); + return -EADDRNOTAVAIL; + } + + ether_addr_copy(dyn_port->hw_addr, hw_addr); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Returns mac address for the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr, + int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + ether_addr_copy(hw_addr, dyn_port->hw_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_fn_state_set - devlink handler for port state set + * @port: pointer to devlink port + * @state: state to set + * @extack: extack for reporting error messages + * + * Activates or deactivates the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_set(struct devlink_port *port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + switch (state) { + case DEVLINK_PORT_FN_STATE_ACTIVE: + return ice_activate_dynamic_port(dyn_port, extack); + + case DEVLINK_PORT_FN_STATE_INACTIVE: + ice_deactivate_dynamic_port(dyn_port); + break; + } + + return 0; +} + +/** + * ice_devlink_port_fn_state_get - devlink handler for port state get + * @port: pointer to devlink port + * @state: admin configured state of the port + * @opstate: current port operational state + * @extack: extack for reporting error messages + * + * Gets port state. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_get(struct devlink_port *port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->active) + *state = DEVLINK_PORT_FN_STATE_ACTIVE; + else + *state = DEVLINK_PORT_FN_STATE_INACTIVE; + + if (dyn_port->attached) + *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED; + else + *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED; + + return 0; +} + +static const struct devlink_port_ops ice_devlink_port_sf_ops = { + .port_del = ice_devlink_port_del, + .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set, + .port_fn_state_get = ice_devlink_port_fn_state_get, + .port_fn_state_set = ice_devlink_port_fn_state_set, +}; + +/** + * ice_reserve_sf_num - Reserve a subfunction number for this port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @sfnum: on success, the sf number reserved + * + * Reserve a subfunction number for this port. Only called for + * DEVLINK_PORT_FLAVOUR_PCI_SF ports. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_reserve_sf_num(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, u32 *sfnum) +{ + int err; + + /* If user didn't request an explicit number, pick one */ + if (!new_attr->sfnum_valid) + return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b, + GFP_KERNEL); + + /* Otherwise, check and use the number provided */ + err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL); + if (err) { + if (err == -EBUSY) + NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists"); + return err; + } + + *sfnum = new_attr->sfnum; + + return 0; +} + +/** + * ice_devlink_create_sf_port - Register PCI subfunction devlink port + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Register PCI subfunction flavour devlink port for a dynamically added + * subfunction port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct ice_pf *pf; + + vsi = dyn_port->vsi; + pf = dyn_port->pf; + + devlink_port = &dyn_port->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF; + attrs.pci_sf.pf = pf->hw.pf_id; + attrs.pci_sf.sf = dyn_port->sfnum; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + return devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_sf_ops); +} + +/** + * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Unregisters the devlink_port structure associated with this SF. + */ +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port) +{ + devl_rate_leaf_destroy(&dyn_port->devlink_port); + devl_port_unregister(&dyn_port->devlink_port); +} + +/** + * ice_alloc_dynamic_port - Allocate new dynamic port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @devlink_port: index of newly created devlink port + * + * Allocate a new dynamic port instance and prepare it for configuration + * with devlink. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_alloc_dynamic_port(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_dynamic_port *dyn_port; + struct ice_vsi *vsi; + u32 sfnum; + int err; + + err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum); + if (err) + return err; + + dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL); + if (!dyn_port) { + err = -ENOMEM; + goto unroll_reserve_sf_num; + } + + vsi = ice_vsi_alloc(pf); + if (!vsi) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI"); + err = -ENOMEM; + goto unroll_dyn_port_alloc; + } + + dyn_port->vsi = vsi; + dyn_port->pf = pf; + dyn_port->sfnum = sfnum; + eth_random_addr(dyn_port->hw_addr); + + err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed"); + goto unroll_vsi_alloc; + } + + err = ice_eswitch_attach_sf(pf, dyn_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch"); + goto unroll_xa_insert; + } + + *devlink_port = &dyn_port->devlink_port; + + return 0; + +unroll_xa_insert: + xa_erase(&pf->dyn_ports, vsi->idx); +unroll_vsi_alloc: + ice_vsi_free(vsi); +unroll_dyn_port_alloc: + kfree(dyn_port); +unroll_reserve_sf_num: + xa_erase(&pf->sf_nums, sfnum); + + return err; +} + +/** + * ice_devlink_port_new - devlink handler for the new port + * @devlink: pointer to devlink + * @new_attr: pointer to the port new attributes + * @extack: extack for reporting error messages + * @devlink_port: pointer to a new port + * + * Creates new devlink port, checks new port attributes and reject + * any unsupported parameters, allocates new subfunction for that port. + * + * Return: zero on success or an error code on failure. + */ +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + err = ice_devlink_port_new_check_attr(pf, new_attr, extack); + if (err) + return err; + + return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h index 9223bcdb6444..d60efc340945 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h @@ -4,9 +4,55 @@ #ifndef _DEVLINK_PORT_H_ #define _DEVLINK_PORT_H_ +#include "../ice.h" +#include "../ice_sf_eth.h" + +/** + * struct ice_dynamic_port - Track dynamically added devlink port instance + * @hw_addr: the HW address for this port + * @active: true if the port has been activated + * @attached: true it the prot is attached + * @devlink_port: the associated devlink port structure + * @pf: pointer to the PF private structure + * @vsi: the VSI associated with this port + * @repr_id: the representor ID + * @sfnum: the subfunction ID + * @sf_dev: pointer to the subfunction device + * + * An instance of a dynamically added devlink port. Each port flavour + */ +struct ice_dynamic_port { + u8 hw_addr[ETH_ALEN]; + u8 active: 1; + u8 attached: 1; + struct devlink_port devlink_port; + struct ice_pf *pf; + struct ice_vsi *vsi; + unsigned long repr_id; + u32 sfnum; + /* Flavour-specific implementation data */ + union { + struct ice_sf_dev *sf_dev; + }; +}; + +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf); + int ice_devlink_create_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf); int ice_devlink_create_vf_port(struct ice_vf *vf); void ice_devlink_destroy_vf_port(struct ice_vf *vf); +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port); +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port); +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev); +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev); + +#define ice_devlink_port_to_dyn(port) \ + container_of(port, struct ice_dynamic_port, devlink_port) +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port); #endif /* _DEVLINK_PORT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index ce8b5505b16d..d6f80da30dec 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -451,7 +451,12 @@ struct ice_vsi { struct_group_tagged(ice_vsi_cfg_params, params, struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_channel *ch; /* VSI's channel structure, may be NULL */ - struct ice_vf *vf; /* VF associated with this VSI, may be NULL */ + union { + /* VF associated with this VSI, may be NULL */ + struct ice_vf *vf; + /* SF associated with this VSI, may be NULL */ + struct ice_dynamic_port *sf; + }; u32 flags; /* VSI flags used for rebuild and configuration */ enum ice_vsi_type type; /* the type of the VSI */ ); @@ -652,6 +657,9 @@ struct ice_pf { struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; + struct xarray dyn_ports; + struct xarray sf_nums; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -918,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +void ice_set_ethtool_sf_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); @@ -1003,6 +1012,14 @@ void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); int ice_init_dev(struct ice_pf *pf); void ice_deinit_dev(struct ice_pf *pf); +int ice_change_mtu(struct net_device *netdev, int new_mtu); +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); +void ice_set_netdev_features(struct net_device *netdev); +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void ice_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c158749a80e0..4a9a6899fc45 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -325,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_SF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + break; default: return; } @@ -540,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ring->rx_buf_len = ring->vsi->rx_buf_len; - if (ring->vsi->type == ICE_VSI_PF) { + if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a94e7072b570..a7c510832824 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); break; case ICE_VSI_CHNL: + case ICE_VSI_SF: vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); vsi->tc_cfg.numtc = 1; break; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 3cfa071e3718..c0b3e70a7ea3 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf) ice_eswitch_start_all_tx_queues(pf); } -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +static int +ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id) { - struct devlink *devlink = priv_to_devlink(pf); - struct ice_repr *repr; int err; if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) @@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_stop_reprs(pf); - devl_lock(devlink); - repr = ice_repr_add_vf(vf); - devl_unlock(devlink); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); + err = repr->ops.add(repr); + if (err) goto err_create_repr; - } err = ice_eswitch_setup_repr(pf, repr); if (err) @@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) if (err) goto err_xa_alloc; - vf->repr_id = repr->id; + *id = repr->id; ice_eswitch_start_reprs(pf); @@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err_xa_alloc: ice_eswitch_release_repr(pf, repr); err_setup_repr: - devl_lock(devlink); - ice_repr_rem_vf(repr); - devl_unlock(devlink); + repr->ops.rem(repr); err_create_repr: if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); @@ -506,14 +498,59 @@ err_create_repr: return err; } -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +/** + * ice_eswitch_attach_vf - attach VF to a eswitch + * @pf: pointer to PF structure + * @vf: pointer to VF structure to be attached + * + * During attaching port representor for VF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct ice_repr *repr = ice_repr_create_vf(vf); struct devlink *devlink = priv_to_devlink(pf); + int err; - if (!repr) - return; + if (IS_ERR(repr)) + return PTR_ERR(repr); + + devl_lock(devlink); + err = ice_eswitch_attach(pf, repr, &vf->repr_id); + if (err) + ice_repr_destroy(repr); + devl_unlock(devlink); + + return err; +} + +/** + * ice_eswitch_attach_sf - attach SF to a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be attached + * + * During attaching port representor for SF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = ice_repr_create_sf(sf); + int err; + if (IS_ERR(repr)) + return PTR_ERR(repr); + + err = ice_eswitch_attach(pf, repr, &sf->repr_id); + if (err) + ice_repr_destroy(repr); + + return err; +} + +static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) +{ ice_eswitch_stop_reprs(pf); xa_erase(&pf->eswitch.reprs, repr->id); @@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); - devl_lock(devlink); - ice_repr_rem_vf(repr); + repr->ops.rem(repr); + ice_repr_destroy(repr); if (xa_empty(&pf->eswitch.reprs)) { + struct devlink *devlink = priv_to_devlink(pf); + /* since all port representors are destroyed, there is * no point in keeping the nodes */ @@ -533,10 +572,42 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) } else { ice_eswitch_start_reprs(pf); } +} + +/** + * ice_eswitch_detach_vf - detach VF from a eswitch + * @pf: pointer to PF structure + * @vf: pointer to VF structure to be detached + */ +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); + + if (!repr) + return; + + devl_lock(devlink); + ice_eswitch_detach(pf, repr); devl_unlock(devlink); } /** + * ice_eswitch_detach_sf - detach SF from a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be detached + */ +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id); + + if (!repr) + return; + + ice_eswitch_detach(pf, repr); +} + +/** * ice_eswitch_get_target - get netdev based on src_vsi from descriptor * @rx_ring: ring used to receive the packet * @rx_desc: descriptor used to get src_vsi value diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 78fd39a6935d..20ce32dda69c 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -5,11 +5,13 @@ #define _ICE_ESWITCH_H_ #include <net/devlink.h> +#include "devlink/devlink_port.h" #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf); +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf); +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac); void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } +static inline void +ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline void +ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { } + +static inline int +ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} static inline int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 793a64d44aa3..d5cc934d1359 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -4412,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); @@ -4424,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct ice_repr *repr = ice_netdev_to_repr(netdev); /* for port representors only ETH_SS_STATS is supported */ - if (ice_check_vf_ready_for_cfg(repr->vf) || - stringset != ETH_SS_STATS) + if (repr->ops.ready(repr) || stringset != ETH_SS_STATS) return; __ice_get_strings(netdev, stringset, data, repr->src_vsi); @@ -4438,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 737c00b02dd0..570425772b63 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -7,6 +7,7 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_dcb_lib.h" +#include "ice_type.h" #include "ice_vsi_vlan_ops.h" /** @@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_PF"; case ICE_VSI_VF: return "ICE_VSI_VF"; + case ICE_VSI_SF: + return "ICE_VSI_SF"; case ICE_VSI_CTRL: return "ICE_VSI_CTRL"; case ICE_VSI_CHNL: @@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SF: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; + case ICE_VSI_SF: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + vsi->num_q_vectors = 1; + vsi->irq_dyn_alloc = true; + break; case ICE_VSI_VF: if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; @@ -423,7 +433,7 @@ err_out: * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI */ -static void ice_vsi_free(struct ice_vsi *vsi) +void ice_vsi_free(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; struct device *dev; @@ -559,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SF: /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; @@ -595,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; @@ -889,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) max_rss_size); vsi->rss_lut_type = ICE_LUT_PF; break; + case ICE_VSI_SF: + vsi->rss_table_size = ICE_LUT_VSI_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); + vsi->rss_lut_type = ICE_LUT_VSI; + break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -1136,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; break; case ICE_VSI_VF: + case ICE_VSI_SF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; break; @@ -1214,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_SF: case ICE_VSI_CHNL: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; @@ -2095,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: + case ICE_VSI_SF: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2264,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_SF: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2648,7 +2668,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); @@ -2676,7 +2697,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); @@ -2747,6 +2769,26 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) } /** + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered + * + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) + */ +void ice_napi_add(struct ice_vsi *vsi) +{ + int v_idx; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, v_idx) + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, + ice_napi_poll); +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 36d86535695d..1a6cfc8693ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -45,6 +45,7 @@ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); void ice_vsi_set_napi_queues(struct ice_vsi *vsi); +void ice_napi_add(struct ice_vsi *vsi); void ice_vsi_clear_napi_queues(struct ice_vsi *vsi); @@ -59,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); int ice_vsi_cfg(struct ice_vsi *vsi); +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf); +void ice_vsi_free(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c7db88b517da..59c0e88153c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -15,6 +15,7 @@ #include "ice_dcb_nl.h" #include "devlink/devlink.h" #include "devlink/devlink_port.h" +#include "ice_sf_eth.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the @@ -2974,6 +2975,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) if (avail < cpus / 2) return -ENOMEM; + if (vsi->type == ICE_VSI_SF) + avail = vsi->alloc_txq; + vsi->num_xdp_txq = min_t(u16, avail, cpus); if (vsi->num_xdp_txq < cpus) @@ -3089,14 +3093,14 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev, * @dev: netdevice * @xdp: XDP command */ -static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; int ret; - if (vsi->type != ICE_VSI_PF) { - NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); return -EINVAL; } @@ -3556,26 +3560,6 @@ skip_req_irq: } /** - * ice_napi_add - register NAPI handler for the VSI - * @vsi: VSI for which NAPI handler is to be registered - * - * This function is only called in the driver's load path. Registering the NAPI - * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, - * reset/rebuild, etc.) - */ -static void ice_napi_add(struct ice_vsi *vsi) -{ - int v_idx; - - if (!vsi->netdev) - return; - - ice_for_each_q_vector(vsi, v_idx) - netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, - ice_napi_poll); -} - -/** * ice_set_ops - set netdev and ethtools ops for the given netdev * @vsi: the VSI associated with the new netdev */ @@ -3608,7 +3592,7 @@ static void ice_set_ops(struct ice_vsi *vsi) * ice_set_netdev_features - set features for the given netdev * @netdev: netdev instance */ -static void ice_set_netdev_features(struct net_device *netdev) +void ice_set_netdev_features(struct net_device *netdev) { struct ice_pf *pf = ice_netdev_to_pf(netdev); bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); @@ -3790,8 +3774,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) * * net_device_ops implementation for adding VLAN IDs */ -static int -ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -3853,8 +3836,7 @@ finish: * * net_device_ops implementation for removing VLAN IDs */ -static int -ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -4023,6 +4005,9 @@ static void ice_deinit_pf(struct ice_pf *pf) if (pf->ptp.clock) ptp_clock_unregister(pf->ptp.clock); + + xa_destroy(&pf->dyn_ports); + xa_destroy(&pf->sf_nums); } /** @@ -4116,6 +4101,9 @@ static int ice_init_pf(struct ice_pf *pf) hash_init(pf->vfs.table); ice_mbx_init_snapshot(&pf->hw); + xa_init(&pf->dyn_ports); + xa_init(&pf->sf_nums); + return 0; } @@ -5458,6 +5446,7 @@ static void ice_remove(struct pci_dev *pdev) ice_remove_arfs(pf); devl_lock(priv_to_devlink(pf)); + ice_dealloc_all_dynamic_ports(pf); ice_deinit_devlink(pf); ice_unload(pf); @@ -5946,8 +5935,16 @@ static int __init ice_module_init(void) goto err_dest_lag_wq; } + status = ice_sf_driver_register(); + if (status) { + pr_err("Failed to register SF driver, err %d\n", status); + goto err_sf_driver; + } + return 0; +err_sf_driver: + pci_unregister_driver(&ice_driver); err_dest_lag_wq: destroy_workqueue(ice_lag_wq); ice_debugfs_exit(); @@ -5965,6 +5962,7 @@ module_init(ice_module_init); */ static void __exit ice_module_exit(void) { + ice_sf_driver_unregister(); pci_unregister_driver(&ice_driver); ice_debugfs_exit(); destroy_workqueue(ice_wq); @@ -6766,7 +6764,8 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev && vsi->type == ICE_VSI_PF) { + ((vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)))) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); @@ -7124,7 +7123,6 @@ void ice_update_pf_stats(struct ice_pf *pf) * @netdev: network interface device structure * @stats: main device statistics structure */ -static void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -7465,7 +7463,7 @@ int ice_vsi_open(struct ice_vsi *vsi) ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); - if (vsi->type == ICE_VSI_PF) { + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); if (err) @@ -7801,7 +7799,7 @@ clear_recovery: * * Returns 0 on success, negative on failure */ -static int ice_change_mtu(struct net_device *netdev, int new_mtu) +int ice_change_mtu(struct net_device *netdev, int new_mtu) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -8225,7 +8223,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, * @netdev: network interface device structure * @txqueue: Tx queue */ -static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tx_ring *tx_ring = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index bdda3401e343..00d4a9125dfa 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -59,12 +59,13 @@ static void ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; struct ice_eth_stats *eth_stats; struct ice_vsi *vsi; - if (ice_is_vf_disabled(np->repr->vf)) + if (repr->ops.ready(repr)) return; - vsi = np->repr->src_vsi; + vsi = repr->src_vsi; ice_update_vsi_stats(vsi); eth_stats = &vsi->eth_stats; @@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) } /** - * ice_repr_open - Enable port representor's network interface + * ice_repr_vf_open - Enable port representor's network interface * @netdev: network interface device structure * * The open entry point is called when a port representor's network @@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_open(struct net_device *netdev) +static int ice_repr_vf_open(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev) return 0; } +static int ice_repr_sf_open(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + /** - * ice_repr_stop - Disable port representor's network interface + * ice_repr_vf_stop - Disable port representor's network interface * @netdev: network interface device structure * * The stop entry point is called when a port representor's network @@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_stop(struct net_device *netdev) +static int ice_repr_vf_stop(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev) return 0; } +static int ice_repr_sf_stop(struct net_device *netdev) +{ + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + /** * ice_repr_sp_stats64 - get slow path stats for port representor * @dev: network interface device structure @@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, } } -static const struct net_device_ops ice_repr_netdev_ops = { +static const struct net_device_ops ice_repr_vf_netdev_ops = { + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_vf_open, + .ndo_stop = ice_repr_vf_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_setup_tc = ice_repr_setup_tc, + .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, +}; + +static const struct net_device_ops ice_repr_sf_netdev_ops = { .ndo_get_stats64 = ice_repr_get_stats64, - .ndo_open = ice_repr_open, - .ndo_stop = ice_repr_stop, + .ndo_open = ice_repr_sf_open, + .ndo_stop = ice_repr_sf_stop, .ndo_start_xmit = ice_eswitch_port_start_xmit, .ndo_setup_tc = ice_repr_setup_tc, .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, @@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = { */ bool ice_is_port_repr_netdev(const struct net_device *netdev) { - return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); + return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops || + netdev->netdev_ops == &ice_repr_sf_netdev_ops); } /** * ice_repr_reg_netdev - register port representor netdev * @netdev: pointer to port representor netdev + * @ops: new ops for netdev */ static int -ice_repr_reg_netdev(struct net_device *netdev) +ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops) { eth_hw_addr_random(netdev); - netdev->netdev_ops = &ice_repr_netdev_ops; + netdev->netdev_ops = ops; ice_set_ethtool_repr_ops(netdev); netdev->hw_features |= NETIF_F_HW_TC; @@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } -static void ice_repr_remove_node(struct devlink_port *devlink_port) +static int ice_repr_ready_vf(struct ice_repr *repr) +{ + return !ice_check_vf_ready_for_cfg(repr->vf); +} + +static int ice_repr_ready_sf(struct ice_repr *repr) { - devl_rate_leaf_destroy(devlink_port); + return !repr->sf->active; } /** - * ice_repr_rem - remove representor from VF + * ice_repr_destroy - remove representor from VF * @repr: pointer to representor structure */ -static void ice_repr_rem(struct ice_repr *repr) +void ice_repr_destroy(struct ice_repr *repr) { free_percpu(repr->stats); free_netdev(repr->netdev); kfree(repr); } -/** - * ice_repr_rem_vf - remove representor from VF - * @repr: pointer to representor structure - */ -void ice_repr_rem_vf(struct ice_repr *repr) +static void ice_repr_rem_vf(struct ice_repr *repr) { - ice_repr_remove_node(&repr->vf->devlink_port); ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); - ice_repr_rem(repr); } -static void ice_repr_set_tx_topology(struct ice_pf *pf) +static void ice_repr_rem_sf(struct ice_repr *repr) { - struct devlink *devlink; + unregister_netdev(repr->netdev); + ice_devlink_destroy_sf_port(repr->sf); +} +static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink) +{ /* only export if ADQ and DCB disabled and eswitch enabled*/ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || !ice_is_switchdev_running(pf)) return; - devlink = priv_to_devlink(pf); ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); } /** - * ice_repr_add - add representor for generic VSI - * @pf: pointer to PF structure + * ice_repr_create - add representor for generic VSI * @src_vsi: pointer to VSI structure of device to represent - * @parent_mac: device MAC address */ -static struct ice_repr * -ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) +static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi) { struct ice_netdev_priv *np; struct ice_repr *repr; @@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) np = netdev_priv(repr->netdev); np->repr = repr; - ether_addr_copy(repr->parent_mac, parent_mac); + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; + + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back)); return repr; @@ -371,34 +402,18 @@ err_alloc: return ERR_PTR(err); } -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +static int ice_repr_add_vf(struct ice_repr *repr) { - struct ice_repr *repr; - struct ice_vsi *vsi; + struct ice_vf *vf = repr->vf; + struct devlink *devlink; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return ERR_PTR(-ENOENT); - err = ice_devlink_create_vf_port(vf); if (err) - return ERR_PTR(err); - - repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); - goto err_repr_add; - } - - repr->vf = vf; - - repr->netdev->min_mtu = ETH_MIN_MTU; - repr->netdev->max_mtu = ICE_MAX_MTU; + return err; - SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); - err = ice_repr_reg_netdev(repr->netdev); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops); if (err) goto err_netdev; @@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) goto err_cfg_vsi; ice_virtchnl_set_repr_ops(vf); - ice_repr_set_tx_topology(vf->pf); - return repr; + devlink = priv_to_devlink(vf->pf); + ice_repr_set_tx_topology(vf->pf, devlink); + + return 0; err_cfg_vsi: unregister_netdev(repr->netdev); err_netdev: - ice_repr_rem(repr); -err_repr_add: ice_devlink_destroy_vf_port(vf); - return ERR_PTR(err); + return err; +} + +/** + * ice_repr_create_vf - add representor for VF VSI + * @vf: VF to create port representor on + * + * Set correct representor type for VF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_repr *repr; + + if (!vsi) + return ERR_PTR(-EINVAL); + + repr = ice_repr_create(vsi); + if (!repr) + return ERR_PTR(-ENOMEM); + + repr->type = ICE_REPR_TYPE_VF; + repr->vf = vf; + repr->ops.add = ice_repr_add_vf; + repr->ops.rem = ice_repr_rem_vf; + repr->ops.ready = ice_repr_ready_vf; + + ether_addr_copy(repr->parent_mac, vf->hw_lan_addr); + + return repr; +} + +static int ice_repr_add_sf(struct ice_repr *repr) +{ + struct ice_dynamic_port *sf = repr->sf; + int err; + + err = ice_devlink_create_sf_port(sf); + if (err) + return err; + + SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops); + if (err) + goto err_netdev; + + ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back)); + + return 0; + +err_netdev: + ice_devlink_destroy_sf_port(sf); + return err; +} + +/** + * ice_repr_create_sf - add representor for SF VSI + * @sf: SF to create port representor on + * + * Set correct representor type for SF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = ice_repr_create(sf->vsi); + + if (!repr) + return ERR_PTR(-ENOMEM); + + repr->type = ICE_REPR_TYPE_SF; + repr->sf = sf; + repr->ops.add = ice_repr_add_sf; + repr->ops.rem = ice_repr_rem_sf; + repr->ops.ready = ice_repr_ready_sf; + + ether_addr_copy(repr->parent_mac, sf->hw_addr); + + return repr; } struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id) diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index 488661b2900b..35bd93165e1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats { u64 tx_drops; }; +enum ice_repr_type { + ICE_REPR_TYPE_VF, + ICE_REPR_TYPE_SF, +}; + struct ice_repr { struct ice_vsi *src_vsi; - struct ice_vf *vf; struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; struct ice_repr_pcpu_stats __percpu *stats; u32 id; u8 parent_mac[ETH_ALEN]; + enum ice_repr_type type; + union { + struct ice_vf *vf; + struct ice_dynamic_port *sf; + }; + struct { + int (*add)(struct ice_repr *repr); + void (*rem)(struct ice_repr *repr); + int (*ready)(struct ice_repr *repr); + } ops; }; -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); -void ice_repr_rem_vf(struct ice_repr *repr); +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf); +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf); + +void ice_repr_destroy(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c new file mode 100644 index 000000000000..d00389c405c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ +#include "ice.h" +#include "ice_lib.h" +#include "ice_txrx.h" +#include "ice_fltr.h" +#include "ice_sf_eth.h" +#include "devlink/devlink_port.h" +#include "devlink/devlink.h" + +static const struct net_device_ops ice_sf_netdev_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp, + .ndo_xdp_xmit = ice_xdp_xmit, + .ndo_xsk_wakeup = ice_xsk_wakeup, +}; + +/** + * ice_sf_cfg_netdev - Allocate, configure and register a netdev + * @dyn_port: subfunction associated with configured netdev + * @devlink_port: subfunction devlink port to be linked with netdev + * + * Return: 0 on success, negative value on failure + */ +static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port, + struct devlink_port *devlink_port) +{ + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_netdev_priv *np; + struct net_device *netdev; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; + + eth_hw_addr_set(netdev, dyn_port->hw_addr); + ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr); + netdev->netdev_ops = &ice_sf_netdev_ops; + SET_NETDEV_DEVLINK_PORT(netdev, devlink_port); + + err = register_netdev(netdev); + if (err) { + free_netdev(netdev); + vsi->netdev = NULL; + return -ENOMEM; + } + set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static void ice_sf_decfg_netdev(struct ice_vsi *vsi) +{ + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); +} + +/** + * ice_sf_dev_probe - subfunction driver probe function + * @adev: pointer to the auxiliary device + * @id: pointer to the auxiliary_device id + * + * Configure VSI and netdev resources for the subfunction device. + * + * Return: zero on success or an error code on failure. + */ +static int ice_sf_dev_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_pf *pf = dyn_port->pf; + struct device *dev = &adev->dev; + struct ice_sf_priv *priv; + struct devlink *devlink; + int err; + + vsi->type = ICE_VSI_SF; + vsi->port_info = pf->hw.port_info; + vsi->flags = ICE_VSI_FLAG_INIT; + + priv = ice_allocate_sf(&adev->dev, pf); + if (!priv) { + dev_err(dev, "Subfunction devlink alloc failed"); + return -ENOMEM; + } + + priv->dev = sf_dev; + sf_dev->priv = priv; + devlink = priv_to_devlink(priv); + + devl_lock(devlink); + + err = ice_vsi_cfg(vsi); + if (err) { + dev_err(dev, "Subfunction vsi config failed"); + goto err_free_devlink; + } + vsi->sf = dyn_port; + + ice_eswitch_update_repr(&dyn_port->repr_id, vsi); + + err = ice_devlink_create_sf_dev_port(sf_dev); + if (err) { + dev_err(dev, "Cannot add ice virtual devlink port for subfunction"); + goto err_vsi_decfg; + } + + err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port); + if (err) { + dev_err(dev, "Subfunction netdev config failed"); + goto err_devlink_destroy; + } + + err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink); + if (err) { + dev_err(dev, "Can't link devlink instance to SF devlink port"); + goto err_netdev_decfg; + } + + ice_napi_add(vsi); + + devl_register(devlink); + devl_unlock(devlink); + + dyn_port->attached = true; + + return 0; + +err_netdev_decfg: + ice_sf_decfg_netdev(vsi); +err_devlink_destroy: + ice_devlink_destroy_sf_dev_port(sf_dev); +err_vsi_decfg: + ice_vsi_decfg(vsi); +err_free_devlink: + devl_unlock(devlink); + devlink_free(devlink); + return err; +} + +/** + * ice_sf_dev_remove - subfunction driver remove function + * @adev: pointer to the auxiliary device + * + * Deinitalize VSI and netdev resources for the subfunction device. + */ +static void ice_sf_dev_remove(struct auxiliary_device *adev) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct devlink *devlink; + + devlink = priv_to_devlink(sf_dev->priv); + devl_lock(devlink); + + ice_vsi_close(vsi); + + ice_sf_decfg_netdev(vsi); + ice_devlink_destroy_sf_dev_port(sf_dev); + devl_unregister(devlink); + devl_unlock(devlink); + devlink_free(devlink); + ice_vsi_decfg(vsi); + + dyn_port->attached = false; +} + +static const struct auxiliary_device_id ice_sf_dev_id_table[] = { + { .name = "ice.sf", }, + { }, +}; + +MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table); + +static struct auxiliary_driver ice_sf_driver = { + .name = "sf", + .probe = ice_sf_dev_probe, + .remove = ice_sf_dev_remove, + .id_table = ice_sf_dev_id_table +}; + +static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id); + +/** + * ice_sf_driver_register - Register new auxiliary subfunction driver + * + * Return: zero on success or an error code on failure. + */ +int ice_sf_driver_register(void) +{ + return auxiliary_driver_register(&ice_sf_driver); +} + +/** + * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver + * + */ +void ice_sf_driver_unregister(void) +{ + auxiliary_driver_unregister(&ice_sf_driver); +} + +/** + * ice_sf_dev_release - Release device associated with auxiliary device + * @device: pointer to the device + * + * Since most of the code for subfunction deactivation is handled in + * the remove handler, here just free tracking resources. + */ +static void ice_sf_dev_release(struct device *device) +{ + struct auxiliary_device *adev = to_auxiliary_dev(device); + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + + xa_erase(&ice_sf_aux_id, adev->id); + kfree(sf_dev); +} + +/** + * ice_sf_eth_activate - Activate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * @extack: extack for reporting error messages + * + * Activate the dynamic port as an Ethernet subfunction. Setup the netdev + * resources associated and initialize the auxiliary device. + * + * Return: zero on success or an error code on failure. + */ +int +ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = dyn_port->pf; + struct ice_sf_dev *sf_dev; + struct pci_dev *pdev; + int err; + u32 id; + + err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b, + GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID"); + return err; + } + + sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL); + if (!sf_dev) { + err = -ENOMEM; + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory"); + goto xa_erase; + } + pdev = pf->pdev; + + sf_dev->dyn_port = dyn_port; + sf_dev->adev.id = id; + sf_dev->adev.name = "sf"; + sf_dev->adev.dev.release = ice_sf_dev_release; + sf_dev->adev.dev.parent = &pdev->dev; + + err = auxiliary_device_init(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device"); + goto sf_dev_free; + } + + err = auxiliary_device_add(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device"); + goto aux_dev_uninit; + } + + dyn_port->sf_dev = sf_dev; + + return 0; + +aux_dev_uninit: + auxiliary_device_uninit(&sf_dev->adev); +sf_dev_free: + kfree(sf_dev); +xa_erase: + xa_erase(&ice_sf_aux_id, id); + + return err; +} + +/** + * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * + * Deactivate the Ethernet subfunction, removing its auxiliary device and the + * associated resources. + */ +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port) +{ + struct ice_sf_dev *sf_dev = dyn_port->sf_dev; + + auxiliary_device_delete(&sf_dev->adev); + auxiliary_device_uninit(&sf_dev->adev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h new file mode 100644 index 000000000000..c558cad0a183 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _ICE_SF_ETH_H_ +#define _ICE_SF_ETH_H_ + +#include <linux/auxiliary_bus.h> +#include "ice.h" + +struct ice_sf_dev { + struct auxiliary_device adev; + struct ice_dynamic_port *dyn_port; + struct ice_sf_priv *priv; +}; + +struct ice_sf_priv { + struct ice_sf_dev *dev; + struct devlink_port devlink_port; +}; + +static inline struct +ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev) +{ + return container_of(adev, struct ice_sf_dev, adev); +} + +int ice_sf_driver_register(void); +void ice_sf_driver_unregister(void); + +int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack); +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port); +#endif /* _ICE_SF_ETH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c new file mode 100644 index 000000000000..3d7e96721cf9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023, Intel Corporation. */ + +#include "ice_vsi_vlan_ops.h" +#include "ice_vsi_vlan_lib.h" +#include "ice_vlan_mode.h" +#include "ice.h" +#include "ice_sf_vsi_vlan_ops.h" + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + + if (ice_is_dvm_ena(&vsi->back->hw)) + vlan_ops = &vsi->outer_vlan_ops; + else + vlan_ops = &vsi->inner_vlan_ops; + + vlan_ops->add_vlan = ice_vsi_add_vlan; + vlan_ops->del_vlan = ice_vsi_del_vlan; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h new file mode 100644 index 000000000000..8c44eafceea0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023, Intel Corporation. */ + +#ifndef _ICE_SF_VSI_VLAN_OPS_H_ +#define _ICE_SF_VSI_VLAN_OPS_H_ + +#include "ice_vsi_vlan_ops.h" + +struct ice_vsi; + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi); + +#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 55ef33208456..e34fe2516ccc 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); ice_dis_vf_qs(vf); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { @@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } - retval = ice_eswitch_attach(pf, vf); + retval = ice_eswitch_attach_vf(pf, vf); if (retval) { dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", vf->vf_id, retval); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c9bc3f1add5d..8208055d6e7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2368,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back)) + if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index b9e443232335..45768796691f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -159,6 +159,7 @@ enum ice_vsi_type { ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, + ICE_VSI_SF = 9, }; struct ice_link_status { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 5635e9da2212..a69e91f88d81 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -766,7 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -782,7 +782,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); - ice_eswitch_attach(pf, vf); + ice_eswitch_attach_vf(pf, vf); mutex_unlock(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c index 7aae7fdcfcdb..8c7a9b41fb63 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c @@ -3,6 +3,7 @@ #include "ice_pf_vsi_vlan_ops.h" #include "ice_vf_vsi_vlan_ops.h" +#include "ice_sf_vsi_vlan_ops.h" #include "ice_lib.h" #include "ice.h" @@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) case ICE_VSI_VF: ice_vf_vsi_init_vlan_ops(vsi); break; + case ICE_VSI_SF: + ice_sf_vsi_init_vlan_ops(vsi); + break; default: dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n", ice_vsi_type_str(vsi->type)); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 5dee829bfc47..334ae945d640 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -289,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { int err; - if (vsi->type != ICE_VSI_PF) + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) return -EINVAL; if (qid >= vsi->netdev->real_num_rx_queues || diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 685335832a93..ea6070180c96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -172,6 +172,16 @@ config MLX5_SW_STEERING help Build support for software-managed steering in the NIC. +config MLX5_HW_STEERING + bool "Mellanox Technologies hardware-managed steering" + depends on MLX5_CORE_EN && MLX5_ESWITCH + default y + help + Build support for Hardware-Managed Flow Steering (HMFS) in the NIC. + HMFS is a new approach to managing steering rules where STEs are + written to ICM by HW (as opposed to SW in software-managed steering), + which allows higher rate of rule insertion. + config MLX5_SF bool "Mellanox Technologies subfunction device support using auxiliary device" depends on MLX5_CORE && MLX5_CORE_EN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1289475e7be7..5912f7e614f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_action.o steering/fs_dr.o \ steering/dr_definer.o steering/dr_ptrn.o \ steering/dr_arg.o steering/dr_dbg.o lib/smfs.o + +# +# HW Steering +# +mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \ + steering/hws/mlx5hws_context.o \ + steering/hws/mlx5hws_pat_arg.o \ + steering/hws/mlx5hws_buddy.o \ + steering/hws/mlx5hws_pool.o \ + steering/hws/mlx5hws_table.o \ + steering/hws/mlx5hws_action.o \ + steering/hws/mlx5hws_rule.o \ + steering/hws/mlx5hws_matcher.o \ + steering/hws/mlx5hws_send.o \ + steering/hws/mlx5hws_definer.o \ + steering/hws/mlx5hws_bwc.o \ + steering/hws/mlx5hws_debug.o \ + steering/hws/mlx5hws_vport.o \ + steering/hws/mlx5hws_bwc_complex.o + + # # SF device # diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 78eb6b7097e1..6201647d6156 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -110,7 +110,9 @@ enum fs_flow_table_type { FS_FT_RDMA_RX = 0X7, FS_FT_RDMA_TX = 0X8, FS_FT_PORT_SEL = 0X9, - FS_FT_MAX_TYPE = FS_FT_PORT_SEL, + FS_FT_FDB_RX = 0xa, + FS_FT_FDB_TX = 0xb, + FS_FT_MAX_TYPE = FS_FT_FDB_TX, }; enum fs_flow_table_op_mod { @@ -368,7 +370,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node); (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \ - (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\ + (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ + (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\ ) #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 8c2a34a0d6be..baefb9a3fa05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level); output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out, - flow_table_context.sw_owner_icm_root_1); + flow_table_context.sws.sw_owner_icm_root_1); output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out, - flow_table_context.sw_owner_icm_root_0); + flow_table_context.sws.sw_owner_icm_root_0); return 0; } @@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, */ if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_rx); + sws.sw_owner_icm_root_0, attr->icm_addr_rx); } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_tx); + sws.sw_owner_icm_root_0, attr->icm_addr_tx); } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_rx); + sws.sw_owner_icm_root_0, attr->icm_addr_rx); MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_1, attr->icm_addr_tx); + sws.sw_owner_icm_root_1, attr->icm_addr_tx); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile new file mode 100644 index 000000000000..c78512eed8d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h new file mode 100644 index 000000000000..9a85d4e12f77 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -0,0 +1,954 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_H_ +#define MLX5HWS_H_ + +struct mlx5hws_context; +struct mlx5hws_table; +struct mlx5hws_matcher; +struct mlx5hws_rule; + +enum mlx5hws_table_type { + MLX5HWS_TABLE_TYPE_FDB, + MLX5HWS_TABLE_TYPE_MAX, +}; + +enum mlx5hws_matcher_resource_mode { + /* Allocate resources based on number of rules with minimal failure probability */ + MLX5HWS_MATCHER_RESOURCE_MODE_RULE, + /* Allocate fixed size hash table based on given column and rows */ + MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE, +}; + +enum mlx5hws_action_type { + MLX5HWS_ACTION_TYP_LAST, + MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2, + MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3, + MLX5HWS_ACTION_TYP_DROP, + MLX5HWS_ACTION_TYP_MISS, + MLX5HWS_ACTION_TYP_TBL, + MLX5HWS_ACTION_TYP_CTR, + MLX5HWS_ACTION_TYP_TAG, + MLX5HWS_ACTION_TYP_MODIFY_HDR, + MLX5HWS_ACTION_TYP_VPORT, + MLX5HWS_ACTION_TYP_POP_VLAN, + MLX5HWS_ACTION_TYP_PUSH_VLAN, + MLX5HWS_ACTION_TYP_ASO_METER, + MLX5HWS_ACTION_TYP_INSERT_HEADER, + MLX5HWS_ACTION_TYP_REMOVE_HEADER, + MLX5HWS_ACTION_TYP_RANGE, + MLX5HWS_ACTION_TYP_SAMPLER, + MLX5HWS_ACTION_TYP_DEST_ARRAY, + MLX5HWS_ACTION_TYP_MAX, +}; + +enum mlx5hws_action_flags { + MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0, + /* Shared action can be used over a few threads, since the + * data is written only once at the creation of the action. + */ + MLX5HWS_ACTION_FLAG_SHARED = 1 << 1, +}; + +enum mlx5hws_action_aso_meter_color { + MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0, + MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1, + MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2, + MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3, +}; + +enum mlx5hws_send_queue_actions { + /* Start executing all pending queued rules */ + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0, + /* Start executing all pending queued rules wait till completion */ + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1, +}; + +struct mlx5hws_context_attr { + u16 queues; + u16 queue_size; + bool bwc; /* add support for backward compatible API*/ +}; + +struct mlx5hws_table_attr { + enum mlx5hws_table_type type; + u32 level; +}; + +enum mlx5hws_matcher_flow_src { + MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0, + MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1, + MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2, +}; + +enum mlx5hws_matcher_insert_mode { + MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0, + MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1, +}; + +enum mlx5hws_matcher_distribute_mode { + MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0, + MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1, +}; + +struct mlx5hws_matcher_attr { + /* Processing priority inside table */ + u32 priority; + /* Provide all rules with unique rule_idx in num_log range to reduce locking */ + bool optimize_using_rule_idx; + /* Resource mode and corresponding size */ + enum mlx5hws_matcher_resource_mode mode; + /* Optimize insertion in case packet origin is the same for all rules */ + enum mlx5hws_matcher_flow_src optimize_flow_src; + /* Define the insertion and distribution modes for this matcher */ + enum mlx5hws_matcher_insert_mode insert_mode; + enum mlx5hws_matcher_distribute_mode distribute_mode; + /* Define whether the created matcher supports resizing into a bigger matcher */ + bool resizable; + union { + struct { + u8 sz_row_log; + u8 sz_col_log; + } table; + + struct { + u8 num_log; + } rule; + }; + /* Optional AT attach configuration - Max number of additional AT */ + u8 max_num_of_at_attach; +}; + +struct mlx5hws_rule_attr { + void *user_data; + /* Valid if matcher optimize_using_rule_idx is set or + * if matcher is configured to insert rules by index. + */ + u32 rule_idx; + u32 flow_source; + u16 queue_id; + u32 burst:1; +}; + +/* In actions that take offset, the offset is unique, pointing to a single + * resource and the user should not reuse the same index because data changing + * is not atomic. + */ +struct mlx5hws_rule_action { + struct mlx5hws_action *action; + union { + struct { + u32 value; + } tag; + + struct { + u32 offset; + } counter; + + struct { + u32 offset; + u8 *data; + } modify_header; + + struct { + u32 offset; + u8 hdr_idx; + u8 *data; + } reformat; + + struct { + __be32 vlan_hdr; + } push_vlan; + + struct { + u32 offset; + enum mlx5hws_action_aso_meter_color init_color; + } aso_meter; + }; +}; + +struct mlx5hws_action_reformat_header { + size_t sz; + void *data; +}; + +struct mlx5hws_action_insert_header { + struct mlx5hws_action_reformat_header hdr; + /* PRM start anchor to which header will be inserted */ + u8 anchor; + /* Header insertion offset in bytes, from the start + * anchor to the location where new header will be inserted. + */ + u8 offset; + /* Indicates this header insertion adds encapsulation header to the packet, + * requiring device to update offloaded fields (for example IPv4 total length). + */ + bool encap; +}; + +struct mlx5hws_action_remove_header_attr { + /* PRM start anchor from which header will be removed */ + u8 anchor; + /* Header remove offset in bytes, from the start + * anchor to the location where remove header starts. + */ + u8 offset; + /* Indicates the removed header size in bytes */ + size_t size; +}; + +struct mlx5hws_action_mh_pattern { + /* Byte size of modify actions provided by "data" */ + size_t sz; + /* PRM format modify actions pattern */ + __be64 *data; +}; + +struct mlx5hws_action_dest_attr { + /* Required destination action to forward the packet */ + struct mlx5hws_action *dest; + /* Optional reformat action */ + struct mlx5hws_action *reformat; +}; + +/* Check whether HWS is supported + * + * @param[in] mdev + * The device to check. + * @return true if supported, false otherwise. + */ +static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev) +{ + u8 ignore_flow_level_rtc_valid; + u8 wqe_based_flow_table_update; + + wqe_based_flow_table_update = + MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap); + ignore_flow_level_rtc_valid = + MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); + + return wqe_based_flow_table_update && ignore_flow_level_rtc_valid; +} + +/* Open a context used for direct rule insertion using hardware steering. + * Each context can contain multiple tables of different types. + * + * @param[in] mdev + * The device to be used for HWS. + * @param[in] attr + * Attributes used for context open. + * @return pointer to mlx5hws_context on success NULL otherwise. + */ +struct mlx5hws_context * +mlx5hws_context_open(struct mlx5_core_dev *mdev, + struct mlx5hws_context_attr *attr); + +/* Close a context used for direct hardware steering. + * + * @param[in] ctx + * mlx5hws context to close. + * @return zero on success non zero otherwise. + */ +int mlx5hws_context_close(struct mlx5hws_context *ctx); + +/* Set a peer context, each context can have multiple contexts as peers. + * + * @param[in] ctx + * The context in which the peer_ctx will be peered to it. + * @param[in] peer_ctx + * The peer context. + * @param[in] peer_vhca_id + * The peer context vhca id. + */ +void mlx5hws_context_set_peer(struct mlx5hws_context *ctx, + struct mlx5hws_context *peer_ctx, + u16 peer_vhca_id); + +/* Create a new direct rule table. Each table can contain multiple matchers. + * + * @param[in] ctx + * The context in which the new table will be opened. + * @param[in] attr + * Attributes used for table creation. + * @return pointer to mlx5hws_table on success NULL otherwise. + */ +struct mlx5hws_table * +mlx5hws_table_create(struct mlx5hws_context *ctx, + struct mlx5hws_table_attr *attr); + +/* Destroy direct rule table. + * + * @param[in] tbl + * Table to destroy. + * @return zero on success non zero otherwise. + */ +int mlx5hws_table_destroy(struct mlx5hws_table *tbl); + +/* Get ID of the flow table. + * + * @param[in] tbl + * Table to get ID of. + * @return ID of the table. + */ +u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl); + +/* Set default miss table for mlx5hws_table by using another mlx5hws_table + * Traffic which all table matchers miss will be forwarded to miss table. + * + * @param[in] tbl + * Source table + * @param[in] miss_tbl + * Target (miss) table, or NULL to remove current miss table + * @return zero on success non zero otherwise. + */ +int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl); + +/* Create new match template based on items mask, the match template + * will be used for matcher creation. + * + * @param[in] ctx + * The context in which the new template will be created. + * @param[in] match_param + * Describe the mask based on PRM match parameters + * @param[in] match_param_sz + * Size of match param buffer + * @param[in] match_criteria_enable + * Bitmap for each sub-set in match_criteria buffer + * @return pointer to mlx5hws_match_template on success NULL otherwise + */ +struct mlx5hws_match_template * +mlx5hws_match_template_create(struct mlx5hws_context *ctx, + u32 *match_param, + u32 match_param_sz, + u8 match_criteria_enable); + +/* Destroy match template. + * + * @param[in] mt + * Match template to destroy. + * @return zero on success non zero otherwise. + */ +int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt); + +/* Create new action template based on action_type array, the action template + * will be used for matcher creation. + * + * @param[in] action_type + * An array of actions based on the order of actions which will be provided + * with rule_actions to mlx5hws_rule_create. The last action is marked + * using MLX5HWS_ACTION_TYP_LAST. + * @return pointer to mlx5hws_action_template on success NULL otherwise + */ +struct mlx5hws_action_template * +mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]); + +/* Destroy action template. + * + * @param[in] at + * Action template to destroy. + * @return zero on success non zero otherwise. + */ +int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at); + +/* Create a new direct rule matcher. Each matcher can contain multiple rules. + * Matchers on the table will be processed by priority. Matching fields and + * mask are described by the match template. In some cases multiple match + * templates can be used on the same matcher. + * + * @param[in] table + * The table in which the new matcher will be opened. + * @param[in] mt + * Array of match templates to be used on matcher. + * @param[in] num_of_mt + * Number of match templates in mt array. + * @param[in] at + * Array of action templates to be used on matcher. + * @param[in] num_of_at + * Number of action templates in mt array. + * @param[in] attr + * Attributes used for matcher creation. + * @return pointer to mlx5hws_matcher on success NULL otherwise. + */ +struct mlx5hws_matcher * +mlx5hws_matcher_create(struct mlx5hws_table *table, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at, + struct mlx5hws_matcher_attr *attr); + +/* Destroy direct rule matcher. + * + * @param[in] matcher + * Matcher to destroy. + * @return zero on success non zero otherwise. + */ +int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher); + +/* Attach new action template to direct rule matcher. + * + * @param[in] matcher + * Matcher to attach at to. + * @param[in] at + * Action template to be attached to the matcher. + * @return zero on success non zero otherwise. + */ +int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at); + +/* Link two matchers and enable moving rules from src matcher to dst matcher. + * Both matchers must be in the same table type, must be created with 'resizable' + * property, and should have the same characteristics (e.g. same mt, same at). + * + * It is the user's responsibility to make sure that the dst matcher + * was allocated with the appropriate size. + * + * Once the function is completed, the user is: + * - allowed to move rules from src into dst matcher + * - no longer allowed to insert rules to the src matcher + * + * The user is always allowed to insert rules to the dst matcher and + * to delete rules from any matcher. + * + * @param[in] src_matcher + * source matcher for moving rules from + * @param[in] dst_matcher + * destination matcher for moving rules to + * @return zero on successful move, non zero otherwise. + */ +int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher); + +/* Enqueue moving rule operation: moving rule from src matcher to a dst matcher + * + * @param[in] src_matcher + * matcher that the rule belongs to + * @param[in] rule + * the rule to move + * @param[in] attr + * rule attributes + * @return zero on success, non zero otherwise. + */ +int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +/* Enqueue create rule operation. + * + * @param[in] matcher + * The matcher in which the new rule will be created. + * @param[in] mt_idx + * Match template index to create the match with. + * @param[in] match_param + * The match parameter PRM buffer used for the value matching. + * @param[in] rule_actions + * Rule action to be executed on match. + * @param[in] at_idx + * Action template index to apply the actions with. + * @param[in] num_of_actions + * Number of rule actions. + * @param[in] attr + * Rule creation attributes. + * @param[in, out] rule_handle + * A valid rule handle. The handle doesn't require any initialization. + * @return zero on successful enqueue non zero otherwise. + */ +int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr, + struct mlx5hws_rule *rule_handle); + +/* Enqueue destroy rule operation. + * + * @param[in] rule + * The rule destruction to enqueue. + * @param[in] attr + * Rule destruction attributes. + * @return zero on successful enqueue non zero otherwise. + */ +int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +/* Enqueue update actions on an existing rule. + * + * @param[in, out] rule_handle + * A valid rule handle to update. + * @param[in] at_idx + * Action template index to update the actions with. + * @param[in] rule_actions + * Rule action to be executed on match. + * @param[in] attr + * Rule update attributes. + * @return zero on successful enqueue non zero otherwise. + */ +int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr); + +/* Get action type. + * + * @param[in] action + * The action to get the type of. + * @return action type. + */ +enum mlx5hws_action_type +mlx5hws_action_get_type(struct mlx5hws_action *action); + +/* Create direct rule drop action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, + u32 flags); + +/* Create direct rule default miss action. + * Defaults are RX: Drop TX: Wire. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, + u32 flags); + +/* Create direct rule goto table action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] tbl + * Destination table. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl, + u32 flags); + +/* Create direct rule goto table number action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] tbl_num + * Destination table number. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx, + u32 table_num, u32 flags); + +/* Create direct rule range match action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] field + * Field to comapare the value. + * @param[in] hit_ft + * Flow table to go to on hit. + * @param[in] miss_ft + * Flow table to go to on miss. + * @param[in] min + * Minimal value of the field to be considered as hit. + * @param[in] max + * Maximal value of the field to be considered as hit. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, + u32 field, + struct mlx5_flow_table *hit_ft, + struct mlx5_flow_table *miss_ft, + u32 min, u32 max, u32 flags); + +/* Create direct rule flow sampler action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] sampler_id + * Flow sampler object ID. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, + u32 sampler_id, u32 flags); + +/* Create direct rule goto vport action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] vport_num + * Destination vport number. + * @param[in] vhca_id_valid + * Tells if the vhca_id parameter is valid. + * @param[in] vhca_id + * VHCA ID of the destination vport. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, + u16 vport_num, + bool vhca_id_valid, + u16 vhca_id, + u32 flags); + +/* Create direct rule TAG action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_tag(struct mlx5hws_context *ctx, + u32 flags); + +/* Create direct rule counter action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] obj_id + * Direct rule counter object ID. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_counter(struct mlx5hws_context *ctx, + u32 obj_id, + u32 flags); + +/* Create direct rule reformat action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] reformat_type + * Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT. + * @param[in] num_of_hdrs + * Number of provided headers in "hdrs" array. + * @param[in] hdrs + * Headers array containing header information. + * @param[in] log_bulk_size + * Number of unique values used with this reformat. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, + enum mlx5hws_action_type reformat_type, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_size, + u32 flags); + +/* Create direct rule modify header action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] num_of_patterns + * Number of provided patterns in "patterns" array. + * @param[in] patterns + * Patterns array containing pattern information. + * @param[in] log_bulk_size + * Number of unique values used with this pattern. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *patterns, + u32 log_bulk_size, + u32 flags); + +/* Create direct rule ASO flow meter action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] obj_id + * ASO object ID. + * @param[in] return_reg_c + * Copy the ASO object value into this reg_c, after a packet hits a rule with this ASO object. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, + u32 obj_id, + u8 return_reg_c, + u32 flags); + +/* Create direct rule pop vlan action. + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags); + +/* Create direct rule push vlan action. + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags); + +/* Create a dest array action, this action can duplicate packets and forward to + * multiple destinations in the destination list. + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] num_dest + * The number of dests attributes. + * @param[in] dests + * The destination array. Each contains a destination action and can have + * additional actions. + * @param[in] ignore_flow_level + * Boolean that says whether to turn on 'ignore_flow_level' for this dest. + * @param[in] flow_source + * Source port of the traffic for this actions. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, + size_t num_dest, + struct mlx5hws_action_dest_attr *dests, + bool ignore_flow_level, + u32 flow_source, + u32 flags); + +/* Create insert header action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] num_of_hdrs + * Number of provided headers in "hdrs" array. + * @param[in] hdrs + * Headers array containing header information. + * @param[in] log_bulk_size + * Number of unique values used with this insert header. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, + u8 num_of_hdrs, + struct mlx5hws_action_insert_header *hdrs, + u32 log_bulk_size, + u32 flags); + +/* Create remove header action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] attr + * attributes: specifies the remove header type, PRM start anchor and + * the PRM end anchor or the PRM start anchor and remove size in bytes. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx, + struct mlx5hws_action_remove_header_attr *attr, + u32 flags); + +/* Create direct rule LAST action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] flags + * Action creation flags. (enum mlx5hws_action_flags) + * @return pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags); + +/* Destroy direct rule action. + * + * @param[in] action + * The action to destroy. + * @return zero on success non zero otherwise. + */ +int mlx5hws_action_destroy(struct mlx5hws_action *action); + +enum mlx5hws_flow_op_status { + MLX5HWS_FLOW_OP_SUCCESS, + MLX5HWS_FLOW_OP_ERROR, +}; + +struct mlx5hws_flow_op_result { + enum mlx5hws_flow_op_status status; + void *user_data; +}; + +/* Poll queue for rule creation and deletions completions. + * + * @param[in] ctx + * The context to which the queue belong to. + * @param[in] queue_id + * The id of the queue to poll. + * @param[in, out] res + * Completion array. + * @param[in] res_nb + * Maximum number of results to return. + * @return negative number on failure, the number of completions otherwise. + */ +int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + struct mlx5hws_flow_op_result res[], + u32 res_nb); + +/* Perform an action on the queue + * + * @param[in] ctx + * The context to which the queue belong to. + * @param[in] queue_id + * The id of the queue to perform the action on. + * @param[in] actions + * Actions to perform on the queue. (enum mlx5hws_send_queue_actions) + * @return zero on success non zero otherwise. + */ +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions); + +/* Dump HWS info + * + * @param[in] ctx + * The context which to dump the info from. + * @return zero on success non zero otherwise. + */ +int mlx5hws_debug_dump(struct mlx5hws_context *ctx); + +struct mlx5hws_bwc_matcher; +struct mlx5hws_bwc_rule; + +struct mlx5hws_match_parameters { + size_t match_sz; + u32 *match_buf; /* Device spec format */ +}; + +/* Create a new BWC direct rule matcher. + * This function does the following: + * - creates match template based on flow items + * - creates an empty action template + * - creates a usual mlx5hws_matcher with these mt and at, setting + * its size to minimal + * Notes: + * - table->ctx must have BWC support + * - complex rules are not supported + * + * @param[in] table + * The table in which the new matcher will be opened + * @param[in] priority + * Priority for this BWC matcher + * @param[in] match_criteria_enable + * Bitmask that defines matching criteria + * @param[in] mask + * Match parameters + * @return pointer to mlx5hws_bwc_matcher on success or NULL otherwise. + */ +struct mlx5hws_bwc_matcher * +mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +/* Destroy BWC direct rule matcher. + * + * @param[in] bwc_matcher + * Matcher to destroy + * @return zero on success, non zero otherwise + */ +int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher); + +/* Create a new BWC rule. + * Unlike the usual rule creation function, this one is blocking: when the + * function returns, the rule is written to its place (no need to poll). + * This function does the following: + * - finds matching action template based on the provided rule_actions, or + * creates new action template if matching action template doesn't exist + * - updates corresponding BWC matcher stats + * - if needed, the function performs rehash: + * - creates a new matcher based on mt, at, new_sz + * - moves all the existing matcher rules to the new matcher + * - removes the old matcher + * - inserts new rule + * - polls till completion is received + * Notes: + * - matcher->tbl->ctx must have BWC support + * - separate BWC ctx queues are used + * + * @param[in] bwc_matcher + * The BWC matcher in which the new rule will be created. + * @param[in] params + * Match perameters + * @param[in] flow_source + * Flow source for this rule + * @param[in] rule_actions + * Rule action to be executed on match + * @return valid BWC rule handle on success, NULL otherwise + */ +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[]); + +/* Destroy BWC direct rule. + * + * @param[in] bwc_rule + * Rule to destroy + * @return zero on success, non zero otherwise + */ +int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule); + +/* Update actions on an existing BWC rule. + * + * @param[in] bwc_rule + * Rule to update + * @param[in] rule_actions + * Rule action to update with + * @return zero on successful update, non zero otherwise. + */ +int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c new file mode 100644 index 000000000000..b27bb4106532 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c @@ -0,0 +1,2604 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1 + +/* Header removal size limited to 128B (64 words) */ +#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128 + +/* This is the longest supported action sequence for FDB table: + * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term. + */ +static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = { + [MLX5HWS_TABLE_TYPE_FDB] = { + BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), + BIT(MLX5HWS_ACTION_TYP_CTR), + BIT(MLX5HWS_ACTION_TYP_TAG), + BIT(MLX5HWS_ACTION_TYP_ASO_METER), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_TBL) | + BIT(MLX5HWS_ACTION_TYP_VPORT) | + BIT(MLX5HWS_ACTION_TYP_DROP) | + BIT(MLX5HWS_ACTION_TYP_SAMPLER) | + BIT(MLX5HWS_ACTION_TYP_RANGE) | + BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY), + BIT(MLX5HWS_ACTION_TYP_LAST), + }, +}; + +static const char * const mlx5hws_action_type_str[] = { + [MLX5HWS_ACTION_TYP_LAST] = "LAST", + [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3", + [MLX5HWS_ACTION_TYP_DROP] = "DROP", + [MLX5HWS_ACTION_TYP_TBL] = "TBL", + [MLX5HWS_ACTION_TYP_CTR] = "CTR", + [MLX5HWS_ACTION_TYP_TAG] = "TAG", + [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR", + [MLX5HWS_ACTION_TYP_VPORT] = "VPORT", + [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS", + [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN", + [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN", + [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER", + [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY", + [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER", + [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER", + [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER", + [MLX5HWS_ACTION_TYP_RANGE] = "RANGE", +}; + +static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX, + "Missing mlx5hws_action_type_str"); + +const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type) +{ + return mlx5hws_action_type_str[action_type]; +} + +enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action) +{ + return action->type; +} + +static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, + enum mlx5hws_context_shared_stc_type stc_type, + u8 tbl_type) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_action_shared_stc *shared_stc; + int ret; + + mutex_lock(&ctx->ctrl_lock); + if (ctx->common_res[tbl_type].shared_stc[stc_type]) { + ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++; + mutex_unlock(&ctx->ctrl_lock); + return 0; + } + + shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL); + if (!shared_stc) { + ret = -ENOMEM; + goto unlock_and_out; + } + switch (stc_type) { + case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3: + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.remove_header.decap = 0; + stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4; + break; + case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP: + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; + stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN; + break; + default: + mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type); + pr_warn("HWS: Invalid stc_type: %d\n", stc_type); + ret = -EINVAL; + goto unlock_and_out; + } + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &shared_stc->stc_chunk); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n"); + goto free_shared_stc; + } + + ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc; + ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1; + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +free_shared_stc: + kfree(shared_stc); +unlock_and_out: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static int hws_action_get_shared_stc(struct mlx5hws_action *action, + enum mlx5hws_context_shared_stc_type stc_type) +{ + struct mlx5hws_context *ctx = action->ctx; + int ret; + + if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) { + pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type); + return -EINVAL; + } + + if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) { + pr_warn("HWS: Invalid action->flags: %d\n", action->flags); + return -EINVAL; + } + + ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB); + if (ret) { + mlx5hws_err(ctx, + "Failed to allocate memory for FDB shared STCs (type: %d)\n", + stc_type); + return ret; + } + + return 0; +} + +static void hws_action_put_shared_stc(struct mlx5hws_action *action, + enum mlx5hws_context_shared_stc_type stc_type) +{ + enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB; + struct mlx5hws_action_shared_stc *shared_stc; + struct mlx5hws_context *ctx = action->ctx; + + if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) { + pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type); + return; + } + + mutex_lock(&ctx->ctrl_lock); + if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) { + mutex_unlock(&ctx->ctrl_lock); + return; + } + + shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type]; + + mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk); + kfree(shared_stc); + ctx->common_res[tbl_type].shared_stc[stc_type] = NULL; + mutex_unlock(&ctx->ctrl_lock); +} + +static void hws_action_print_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions) +{ + mlx5hws_err(ctx, "Invalid action_type sequence"); + while (*user_actions != MLX5HWS_ACTION_TYP_LAST) { + mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions)); + user_actions++; + } + mlx5hws_err(ctx, "\n"); +} + +bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions, + enum mlx5hws_table_type table_type) +{ + const u32 *order_arr = action_order_arr[table_type]; + u8 order_idx = 0; + u8 user_idx = 0; + bool valid_combo; + + if (table_type >= MLX5HWS_TABLE_TYPE_MAX) { + mlx5hws_err(ctx, "Invalid table_type %d", table_type); + return false; + } + + while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) { + /* User action order validated move to next user action */ + if (BIT(user_actions[user_idx]) & order_arr[order_idx]) + user_idx++; + + /* Iterate to the next supported action in the order */ + order_idx++; + } + + /* Combination is valid if all user action were processed */ + valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST; + if (!valid_combo) + hws_action_print_combo(ctx, user_actions); + + return valid_combo; +} + +static bool +hws_action_fixup_stc_attr(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr, + enum mlx5hws_table_type table_type, + bool is_mirror) +{ + bool use_fixup = false; + u32 fw_tbl_type; + u32 base_id; + + fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror); + + switch (stc_attr->action_type) { + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: + if (is_mirror && stc_attr->ste_table.ignore_tx) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + use_fixup = true; + break; + } + if (!is_mirror) + base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool, + &stc_attr->ste_table.ste); + else + base_id = + mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool, + &stc_attr->ste_table.ste); + + *fixup_stc_attr = *stc_attr; + fixup_stc_attr->ste_table.ste_obj_id = base_id; + use_fixup = true; + break; + + case MLX5_IFC_STC_ACTION_TYPE_TAG: + if (fw_tbl_type == FS_FT_FDB_TX) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; + fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + use_fixup = true; + } + break; + + case MLX5_IFC_STC_ACTION_TYPE_ALLOW: + if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; + fixup_stc_attr->action_offset = stc_attr->action_offset; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id; + fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number; + fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = + ctx->caps->merged_eswitch; + use_fixup = true; + } + break; + + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: + if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK) + break; + + if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { + /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */ + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK; + fixup_stc_attr->action_offset = stc_attr->action_offset; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + fixup_stc_attr->vport.vport_num = 0; + fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id; + fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = + stc_attr->vport.eswitch_owner_vhca_id_valid; + } + use_fixup = true; + break; + + default: + break; + } + + return use_fixup; +} + +int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + u32 table_type, + struct mlx5hws_pool_chunk *stc) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0}; + struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; + struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0}; + bool use_fixup; + u32 obj_0_id; + int ret; + + ret = mlx5hws_pool_chunk_alloc(stc_pool, stc); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate single action STC\n"); + return ret; + } + + stc_attr->stc_offset = stc->offset; + + /* Dynamic reparse not supported, overwrite and use default */ + if (!mlx5hws_context_cap_dynamic_reparse(ctx)) + stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + + /* According to table/action limitation change the stc_attr */ + use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false); + ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, + use_fixup ? &fixup_stc_attr : stc_attr); + if (ret) { + mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n", + stc_attr->action_type, table_type); + goto free_chunk; + } + + /* Modify the FDB peer */ + if (table_type == MLX5HWS_TABLE_TYPE_FDB) { + u32 obj_1_id; + + obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + + use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, + &fixup_stc_attr, + table_type, true); + ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id, + use_fixup ? &fixup_stc_attr : stc_attr); + if (ret) { + mlx5hws_err(ctx, + "Failed to modify peer STC action_type %d tbl_type %d\n", + stc_attr->action_type, table_type); + goto clean_obj_0; + } + } + + return 0; + +clean_obj_0: + cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + cleanup_stc_attr.stc_offset = stc->offset; + mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr); +free_chunk: + mlx5hws_pool_chunk_free(stc_pool, stc); + return ret; +} + +void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx, + u32 table_type, + struct mlx5hws_pool_chunk *stc) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + u32 obj_id; + + /* Modify the STC not to point to an object */ + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.stc_offset = stc->offset; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); + + if (table_type == MLX5HWS_TABLE_TYPE_FDB) { + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); + } + + mlx5hws_pool_chunk_free(stc_pool, stc); +} + +static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx, + __be64 pattern) +{ + u8 action_type = MLX5_GET(set_action_in, &pattern, action_type); + + switch (action_type) { + case MLX5_MODIFICATION_TYPE_SET: + return MLX5_IFC_STC_ACTION_TYPE_SET; + case MLX5_MODIFICATION_TYPE_ADD: + return MLX5_IFC_STC_ACTION_TYPE_ADD; + case MLX5_MODIFICATION_TYPE_COPY: + return MLX5_IFC_STC_ACTION_TYPE_COPY; + case MLX5_MODIFICATION_TYPE_ADD_FIELD: + return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD; + default: + mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type); + return MLX5_IFC_STC_ACTION_TYPE_NOP; + } +} + +static void hws_action_fill_stc_attr(struct mlx5hws_action *action, + u32 obj_id, + struct mlx5hws_cmd_stc_modify_attr *attr) +{ + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_TAG: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + break; + case MLX5HWS_ACTION_TYP_DROP: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + break; + case MLX5HWS_ACTION_TYP_MISS: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + break; + case MLX5HWS_ACTION_TYP_CTR: + attr->id = obj_id; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + if (action->modify_header.require_reparse) + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + + if (action->modify_header.num_of_actions == 1) { + attr->modify_action.data = action->modify_header.single_action; + attr->action_type = hws_action_get_mh_stc_type(action->ctx, + attr->modify_action.data); + + if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD || + attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET) + MLX5_SET(set_action_in, &attr->modify_action.data, data, 0); + } else { + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST; + attr->modify_header.arg_id = action->modify_header.arg_id; + attr->modify_header.pattern_id = action->modify_header.pat_id; + } + break; + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + attr->dest_table_id = obj_id; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->remove_header.decap = 1; + attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + if (!action->reformat.require_reparse) + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->insert_header.encap = action->reformat.encap; + attr->insert_header.insert_anchor = action->reformat.anchor; + attr->insert_header.arg_id = action->reformat.arg_id; + attr->insert_header.header_size = action->reformat.header_size; + attr->insert_header.insert_offset = action->reformat.offset; + break; + case MLX5HWS_ACTION_TYP_ASO_METER: + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; + attr->aso.aso_type = ASO_OPC_MOD_POLICER; + attr->aso.devx_obj_id = obj_id; + attr->aso.return_reg_id = action->aso.return_reg_id; + break; + case MLX5HWS_ACTION_TYP_VPORT: + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; + attr->vport.vport_num = action->vport.vport_num; + attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id; + attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid; + break; + case MLX5HWS_ACTION_TYP_POP_VLAN: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; + attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2; + break; + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->insert_header.encap = 0; + attr->insert_header.is_inline = 1; + attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS; + attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN; + break; + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + attr->remove_header.decap = 0; /* the mode we support decap is 0 */ + attr->remove_words.start_anchor = action->remove_header.anchor; + /* the size is in already in words */ + attr->remove_words.num_of_words = action->remove_header.size; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + break; + default: + mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type); + } +} + +static int +hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_context *ctx = action->ctx; + int ret; + + hws_action_fill_stc_attr(action, obj_id, &stc_attr); + + /* Block unsupported parallel obj modify over the same base */ + mutex_lock(&ctx->ctrl_lock); + + /* Allocate STC for FDB */ + if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) { + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, + MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + if (ret) + goto out_err; + } + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +out_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static void +hws_action_destroy_stcs(struct mlx5hws_action *action) +{ + struct mlx5hws_context *ctx = action->ctx; + + /* Block unsupported parallel obj modify over the same base */ + mutex_lock(&ctx->ctrl_lock); + + if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) + mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + + mutex_unlock(&ctx->ctrl_lock); +} + +static bool hws_action_is_flag_hws_fdb(u32 flags) +{ + return flags & MLX5HWS_ACTION_FLAG_HWS_FDB; +} + +static bool +hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) { + mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n"); + return false; + } + + if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) { + mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n"); + return false; + } + + return true; +} + +static struct mlx5hws_action * +hws_action_create_generic_bulk(struct mlx5hws_context *ctx, + u32 flags, + enum mlx5hws_action_type action_type, + u8 bulk_sz) +{ + struct mlx5hws_action *action; + int i; + + if (!hws_action_is_flag_hws_fdb(flags)) { + mlx5hws_err(ctx, + "Action (type: %d) flags must specify only HWS FDB\n", action_type); + return NULL; + } + + if (!hws_action_validate_hws_action(ctx, flags)) + return NULL; + + action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL); + if (!action) + return NULL; + + for (i = 0; i < bulk_sz; i++) { + action[i].ctx = ctx; + action[i].flags = flags; + action[i].type = action_type; + } + + return action; +} + +static struct mlx5hws_action * +hws_action_create_generic(struct mlx5hws_context *ctx, + u32 flags, + enum mlx5hws_action_type action_type) +{ + return hws_action_create_generic_bulk(ctx, flags, action_type, 1); +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx, + u32 table_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, table_id); + if (ret) + goto free_action; + + action->dest_obj.obj_id = table_id; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl, + u32 flags) +{ + return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags); +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +static struct mlx5hws_action * +hws_action_create_aso(struct mlx5hws_context *ctx, + enum mlx5hws_action_type action_type, + u32 obj_id, + u8 return_reg_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, action_type); + if (!action) + return NULL; + + action->aso.obj_id = obj_id; + action->aso.return_reg_id = return_reg_id; + + ret = hws_action_create_stcs(action, obj_id); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, + u32 obj_id, + u8 return_reg_id, + u32 flags) +{ + return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER, + obj_id, return_reg_id, flags); +} + +struct mlx5hws_action * +mlx5hws_action_create_counter(struct mlx5hws_context *ctx, + u32 obj_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, obj_id); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, + u16 vport_num, + bool vhca_id_valid, + u16 vhca_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) { + mlx5hws_err(ctx, "Vport action is supported for FDB only\n"); + return NULL; + } + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT); + if (!action) + return NULL; + + if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) { + mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n"); + goto free_action; + } + + action->vport.vport_num = vport_num; + action->vport.esw_owner_vhca_id_valid = vhca_id_valid; + + if (vhca_id_valid) + action->vport.esw_owner_vhca_id = vhca_id; + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for push vlan\n"); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN); + if (!action) + return NULL; + + ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); + if (ret) { + mlx5hws_err(ctx, "Failed to create remove stc for reformat\n"); + goto free_action; + } + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for pop vlan\n"); + goto free_shared; + } + + return action; + +free_shared: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); +free_action: + kfree(action); + return NULL; +} + +static int +hws_action_handle_insert_with_ptr(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + size_t max_sz = 0; + u32 arg_id; + int ret, i; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz % W_SIZE != 0) { + mlx5hws_err(action->ctx, + "Header data size should be in WORD granularity\n"); + return -EINVAL; + } + max_sz = max(hdrs[i].sz, max_sz); + } + + /* Allocate single shared arg object for all headers */ + ret = mlx5hws_arg_create(action->ctx, + hdrs->data, + max_sz, + log_bulk_sz, + action->flags & MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + return ret; + + for (i = 0; i < num_of_hdrs; i++) { + action[i].reformat.arg_id = arg_id; + action[i].reformat.header_size = hdrs[i].sz; + action[i].reformat.num_of_hdrs = num_of_hdrs; + action[i].reformat.max_hdr_sz = max_sz; + action[i].reformat.require_reparse = true; + + if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 || + action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) { + action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START; + action[i].reformat.offset = 0; + action[i].reformat.encap = 1; + } + + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + mlx5hws_err(action->ctx, "Failed to create stc for reformat\n"); + goto free_stc; + } + } + + return 0; + +free_stc: + while (i--) + hws_action_destroy_stcs(&action[i]); + + mlx5hws_arg_destroy(action->ctx, arg_id); + return ret; +} + +static int +hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + int ret; + + /* The action is remove-l2-header + insert-l3-header */ + ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + if (ret) { + mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n"); + return ret; + } + + /* Reuse the insert with pointer for the L2L3 header */ + ret = hws_action_handle_insert_with_ptr(action, + num_of_hdrs, + hdrs, + log_bulk_sz); + if (ret) + goto put_shared_stc; + + return 0; + +put_shared_stc: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + return ret; +} + +static void hws_action_prepare_decap_l3_actions(size_t data_sz, + u8 *mh_data, + int *num_of_actions) +{ + int actions; + u32 i; + + /* Remove L2L3 outer headers */ + MLX5_SET(stc_ste_param_remove, mh_data, action_type, + MLX5_MODIFICATION_TYPE_REMOVE); + MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1); + MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor, + MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4); + mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */ + actions = 1; + + /* Add the new header using inline action 4Byte at a time, the header + * is added in reversed order to the beginning of the packet to avoid + * incorrect parsing by the HW. Since header is 14B or 18B an extra + * two bytes are padded and later removed. + */ + for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) { + MLX5_SET(stc_ste_param_insert, mh_data, action_type, + MLX5_MODIFICATION_TYPE_INSERT); + MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1); + MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2); + mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; + actions++; + } + + /* Remove first 2 extra bytes */ + MLX5_SET(stc_ste_param_remove_words, mh_data, action_type, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS); + MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + /* The hardware expects here size in words (2 bytes) */ + MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1); + actions++; + + *num_of_actions = actions; +} + +static int +hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0}; + struct mlx5hws_context *ctx = action->ctx; + u32 arg_id, pat_id; + int num_of_actions; + int mh_data_size; + int ret, i; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 && + hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) { + mlx5hws_err(ctx, "Data size is not supported for decap-l3\n"); + return -EINVAL; + } + } + + /* Create a full modify header action list in case shared */ + hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); + if (action->flags & MLX5HWS_ACTION_FLAG_SHARED) + mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); + + /* All DecapL3 cases require the same max arg size */ + ret = mlx5hws_arg_create_modify_header_arg(ctx, + (__be64 *)mh_data, + num_of_actions, + log_bulk_sz, + action->flags & MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + return ret; + + for (i = 0; i < num_of_hdrs; i++) { + memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE); + hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions); + mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE; + + ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n"); + goto free_stc_and_pat; + } + + action[i].modify_header.max_num_of_actions = num_of_actions; + action[i].modify_header.num_of_actions = num_of_actions; + action[i].modify_header.num_of_patterns = num_of_hdrs; + action[i].modify_header.arg_id = arg_id; + action[i].modify_header.pat_id = pat_id; + action[i].modify_header.require_reparse = + mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions); + + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + mlx5hws_pat_put_pattern(ctx, pat_id); + goto free_stc_and_pat; + } + } + + return 0; + +free_stc_and_pat: + while (i--) { + hws_action_destroy_stcs(&action[i]); + mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id); + } + + mlx5hws_arg_destroy(action->ctx, arg_id); + return ret; +} + +static int +hws_action_create_reformat_hws(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 bulk_size) +{ + int ret; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + ret = hws_action_create_stcs(action, 0); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size); + break; + default: + mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n"); + return -EINVAL; + } + + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, + enum mlx5hws_action_type reformat_type, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!num_of_hdrs) { + mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n"); + return NULL; + } + + action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs); + if (!action) + return NULL; + + if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) { + mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags); + goto free_action; + } + + ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size); + if (ret) { + mlx5hws_err(ctx, "Failed to create HWS reformat action\n"); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +static int +hws_action_create_modify_header_hws(struct mlx5hws_action *action, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *pattern, + u32 log_bulk_size) +{ + struct mlx5hws_context *ctx = action->ctx; + u16 num_actions, max_mh_actions = 0; + int i, ret, size_in_bytes; + u32 pat_id, arg_id = 0; + __be64 *new_pattern; + size_t pat_max_sz; + + pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE; + size_in_bytes = pat_max_sz * sizeof(__be64); + new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL); + if (!new_pattern) + return -ENOMEM; + + /* Calculate maximum number of mh actions for shared arg allocation */ + for (i = 0; i < num_of_patterns; i++) { + size_t new_num_actions; + size_t cur_num_actions; + u32 nope_location; + + cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; + + mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions, + pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE, + &new_num_actions, &nope_location, + &new_pattern[i * pat_max_sz]); + + action[i].modify_header.nope_locations = nope_location; + action[i].modify_header.num_of_actions = new_num_actions; + + max_mh_actions = max(max_mh_actions, new_num_actions); + } + + if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) { + mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n", + max_mh_actions); + ret = -EINVAL; + goto free_new_pat; + } + + /* Allocate single shared arg for all patterns based on the max size */ + if (max_mh_actions > 1) { + ret = mlx5hws_arg_create_modify_header_arg(ctx, + pattern->data, + max_mh_actions, + log_bulk_size, + action->flags & + MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + goto free_new_pat; + } + + for (i = 0; i < num_of_patterns; i++) { + if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) { + mlx5hws_err(ctx, "Fail to verify pattern modify actions\n"); + ret = -EINVAL; + goto free_stc_and_pat; + } + num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; + action[i].modify_header.num_of_patterns = num_of_patterns; + action[i].modify_header.max_num_of_actions = max_mh_actions; + + action[i].modify_header.require_reparse = + mlx5hws_pat_require_reparse(pattern[i].data, num_actions); + + if (num_actions == 1) { + pat_id = 0; + /* Optimize single modify action to be used inline */ + action[i].modify_header.single_action = pattern[i].data[0]; + action[i].modify_header.single_action_type = + MLX5_GET(set_action_in, pattern[i].data, action_type); + } else { + /* Multiple modify actions require a pattern */ + if (unlikely(action[i].modify_header.nope_locations)) { + size_t pattern_sz; + + pattern_sz = action[i].modify_header.num_of_actions * + MLX5HWS_MODIFY_ACTION_SIZE; + ret = + mlx5hws_pat_get_pattern(ctx, + &new_pattern[i * pat_max_sz], + pattern_sz, &pat_id); + } else { + ret = mlx5hws_pat_get_pattern(ctx, + pattern[i].data, + pattern[i].sz, + &pat_id); + } + if (ret) { + mlx5hws_err(ctx, + "Failed to allocate pattern for modify header\n"); + goto free_stc_and_pat; + } + + action[i].modify_header.arg_id = arg_id; + action[i].modify_header.pat_id = pat_id; + } + /* Allocate STC for each action representing a header */ + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + if (pat_id) + mlx5hws_pat_put_pattern(ctx, pat_id); + goto free_stc_and_pat; + } + } + + kfree(new_pattern); + return 0; + +free_stc_and_pat: + while (i--) { + hws_action_destroy_stcs(&action[i]); + if (action[i].modify_header.pat_id) + mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id); + } + + if (arg_id) + mlx5hws_arg_destroy(ctx, arg_id); +free_new_pat: + kfree(new_pattern); + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *patterns, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!num_of_patterns) { + mlx5hws_err(ctx, "Invalid number of patterns\n"); + return NULL; + } + action = hws_action_create_generic_bulk(ctx, flags, + MLX5HWS_ACTION_TYP_MODIFY_HDR, + num_of_patterns); + if (!action) + return NULL; + + if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) { + mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n"); + goto free_action; + } + + ret = hws_action_create_modify_header_hws(action, + num_of_patterns, + patterns, + log_bulk_size); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, + size_t num_dest, + struct mlx5hws_action_dest_attr *dests, + bool ignore_flow_level, + u32 flow_source, + u32 flags) +{ + struct mlx5hws_cmd_set_fte_dest *dest_list = NULL; + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; + struct mlx5hws_cmd_forward_tbl *fw_island; + struct mlx5hws_action *action; + u32 i /*, packet_reformat_id*/; + int ret; + + if (num_dest <= 1) { + mlx5hws_err(ctx, "Action must have multiple dests\n"); + return NULL; + } + + if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) { + ft_attr.type = FS_FT_FDB; + ft_attr.level = ctx->caps->fdb_ft.max_level - 1; + } else { + mlx5hws_err(ctx, "Action flags not supported\n"); + return NULL; + } + + dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL); + if (!dest_list) + return NULL; + + for (i = 0; i < num_dest; i++) { + enum mlx5hws_action_type action_type = dests[i].dest->type; + struct mlx5hws_action *reformat_action = dests[i].reformat; + + switch (action_type) { + case MLX5HWS_ACTION_TYP_TBL: + dest_list[i].destination_type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id; + fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_attr.ignore_flow_level = ignore_flow_level; + /* ToDo: In SW steering we have a handling of 'go to WIRE' + * destination here by upper layer setting 'is_wire_ft' flag + * if the destination is wire. + * This is because uplink should be last dest in the list. + */ + break; + case MLX5HWS_ACTION_TYP_VPORT: + dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest_list[i].destination_id = dests[i].dest->vport.vport_num; + fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (ctx->caps->merged_eswitch) { + dest_list[i].ext_flags |= + MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID; + dest_list[i].esw_owner_vhca_id = + dests[i].dest->vport.esw_owner_vhca_id; + } + break; + default: + mlx5hws_err(ctx, "Unsupported action in dest_array\n"); + goto free_dest_list; + } + + if (reformat_action) { + mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n"); + goto free_dest_list; + } + } + + fte_attr.dests_num = num_dest; + fte_attr.dests = dest_list; + + fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr); + if (!fw_island) + goto free_dest_list; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY); + if (!action) + goto destroy_fw_island; + + ret = hws_action_create_stcs(action, fw_island->ft_id); + if (ret) + goto free_action; + + action->dest_array.fw_island = fw_island; + action->dest_array.num_dest = num_dest; + action->dest_array.dest_list = dest_list; + + return action; + +free_action: + kfree(action); +destroy_fw_island: + mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island); +free_dest_list: + for (i = 0; i < num_dest; i++) { + if (dest_list[i].ext_reformat_id) + mlx5hws_cmd_packet_reformat_destroy(ctx->mdev, + dest_list[i].ext_reformat_id); + } + kfree(dest_list); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, + u8 num_of_hdrs, + struct mlx5hws_action_insert_header *hdrs, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action_reformat_header *reformat_hdrs; + struct mlx5hws_action *action; + int ret; + int i; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER); + if (!action) + return NULL; + + reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL); + if (!reformat_hdrs) + goto free_action; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].offset % W_SIZE != 0) { + mlx5hws_err(ctx, "Header offset should be in WORD granularity\n"); + goto free_reformat_hdrs; + } + + action[i].reformat.anchor = hdrs[i].anchor; + action[i].reformat.encap = hdrs[i].encap; + action[i].reformat.offset = hdrs[i].offset; + + reformat_hdrs[i].sz = hdrs[i].hdr.sz; + reformat_hdrs[i].data = hdrs[i].hdr.data; + } + + ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, + reformat_hdrs, log_bulk_size); + if (ret) { + mlx5hws_err(ctx, "Failed to create HWS reformat action\n"); + goto free_reformat_hdrs; + } + + kfree(reformat_hdrs); + + return action; + +free_reformat_hdrs: + kfree(reformat_hdrs); +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx, + struct mlx5hws_action_remove_header_attr *attr, + u32 flags) +{ + struct mlx5hws_action *action; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER); + if (!action) + return NULL; + + /* support only remove anchor with size */ + if (attr->size % W_SIZE != 0) { + mlx5hws_err(ctx, + "Invalid size, HW supports header remove in WORD granularity\n"); + goto free_action; + } + + if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) { + mlx5hws_err(ctx, "Header removal size limited to %u bytes\n", + MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE); + goto free_action; + } + + action->remove_header.anchor = attr->anchor; + action->remove_header.size = attr->size / W_SIZE; + + if (hws_action_create_stcs(action, 0)) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +static struct mlx5hws_definer * +hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx) +{ + struct mlx5hws_definer *definer; + __be32 *tag; + int ret; + + definer = kzalloc(sizeof(*definer), GFP_KERNEL); + if (!definer) + return NULL; + + definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4; + /* Set DW0 tag mask */ + tag = (__force __be32 *)definer->mask.jumbo; + tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16); + + mutex_lock(&ctx->ctrl_lock); + + ret = mlx5hws_definer_get_obj(ctx, definer); + if (ret < 0) { + mutex_unlock(&ctx->ctrl_lock); + kfree(definer); + return NULL; + } + + mutex_unlock(&ctx->ctrl_lock); + definer->obj_id = ret; + + return definer; +} + +static struct mlx5hws_matcher_action_ste * +hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer, + u32 miss_ft_id) +{ + struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_pool_attr pool_attr = {0}; + struct mlx5hws_pool *ste_pool, *stc_pool; + struct mlx5hws_pool_chunk *ste; + u32 *rtc_0_id, *rtc_1_id; + u32 obj_id; + int ret; + + /* Check if STE range is supported */ + if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) { + mlx5hws_err(ctx, "Range STE format not supported\n"); + return NULL; + } + + table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL); + if (!table_ste) + return NULL; + + mutex_lock(&ctx->ctrl_lock); + + pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; + pool_attr.alloc_log_sz = 1; + table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!table_ste->pool) { + mlx5hws_err(ctx, "Failed to allocate memory ste pool\n"); + goto free_ste; + } + + /* Allocate RTC */ + rtc_0_id = &table_ste->rtc_0_id; + rtc_1_id = &table_ste->rtc_1_id; + ste_pool = table_ste->pool; + ste = &table_ste->ste; + ste->order = 1; + + rtc_attr.log_size = 0; + rtc_attr.log_depth = 0; + rtc_attr.miss_ft_id = miss_ft_id; + rtc_attr.num_hash_definer = 1; + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; + rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; + rtc_attr.fw_gen_wqe = true; + rtc_attr.is_scnd_range = true; + + obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + + rtc_attr.pd = ctx->pd_num; + rtc_attr.ste_base = obj_id; + rtc_attr.ste_offset = ste->offset; + rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false); + + /* STC is a single resource (obj_id), use any STC for the ID */ + stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB]; + default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create RTC"); + goto pool_destroy; + } + + /* Create mirror RTC */ + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + rtc_attr.ste_base = obj_id; + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true); + + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create mirror RTC"); + goto destroy_rtc_0; + } + + mutex_unlock(&ctx->ctrl_lock); + + return table_ste; + +destroy_rtc_0: + mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id); +pool_destroy: + mlx5hws_pool_destroy(table_ste->pool); +free_ste: + mutex_unlock(&ctx->ctrl_lock); + kfree(table_ste); + return NULL; +} + +static void +hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx, + struct mlx5hws_matcher_action_ste *table_ste) +{ + mutex_lock(&ctx->ctrl_lock); + + mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id); + mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id); + mlx5hws_pool_destroy(table_ste->pool); + kfree(table_ste); + + mutex_unlock(&ctx->ctrl_lock); +} + +static int +hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx, + struct mlx5hws_matcher_action_ste *table_ste, + struct mlx5hws_action *hit_ft_action, + struct mlx5hws_definer *range_definer, + u32 min, u32 max) +{ + struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0}; + struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0}; + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0}; + u32 no_use, used_rtc_0_id, used_rtc_1_id, ret; + struct mlx5hws_context_common_res *common_res; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + __be32 *wqe_data_arr; + + mutex_lock(&ctx->ctrl_lock); + + /* Get the control queue */ + queue = &ctx->send_queue[ctx->queues - 1]; + if (unlikely(mlx5hws_send_engine_err(queue))) { + ret = -EIO; + goto error; + } + + /* Init default send STE attributes */ + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.user_data = &no_use; + ste_attr.send_attr.rule = NULL; + ste_attr.send_attr.fence = 1; + ste_attr.send_attr.notify_hw = true; + ste_attr.rtc_0 = table_ste->rtc_0_id; + ste_attr.rtc_1 = table_ste->rtc_1_id; + ste_attr.used_id_rtc_0 = &used_rtc_0_id; + ste_attr.used_id_rtc_1 = &used_rtc_1_id; + + common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB]; + + /* init an empty match STE which will always hit */ + ste_attr.wqe_ctrl = &wqe_ctrl; + ste_attr.wqe_data = &match_wqe_data; + ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer; + + /* Fill WQE control data */ + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] = + htonl(common_res->default_stc->nop_ctr.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(common_res->default_stc->nop_dw5.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = + htonl(common_res->default_stc->nop_dw6.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = + htonl(common_res->default_stc->nop_dw7.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |= + htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = + htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset); + + wqe_data_arr = (__force __be32 *)&range_wqe_data; + + ste_attr.range_wqe_data = &range_wqe_data; + ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer); + + /* Fill range matching fields, + * min/max_value_2 corresponds to match_dw_0 in its definer, + * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE. + */ + wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16); + wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16); + + /* Send WQEs to FW */ + mlx5hws_send_stes_fw(ctx, queue, &ste_attr); + + /* Poll for completion */ + ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + if (ret) { + mlx5hws_err(ctx, "Failed to drain control queue"); + goto error; + } + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +error: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, + u32 field, + struct mlx5_flow_table *hit_ft, + struct mlx5_flow_table *miss_ft, + u32 min, u32 max, u32 flags) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_action *hit_ft_action; + struct mlx5hws_definer *definer; + struct mlx5hws_action *action; + u32 miss_ft_id = miss_ft->id; + u32 hit_ft_id = hit_ft->id; + int ret; + + if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN || + min > 0xffff || max > 0xffff) { + mlx5hws_err(ctx, "Invalid match range parameters\n"); + return NULL; + } + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE); + if (!action) + return NULL; + + definer = hws_action_create_dest_match_range_definer(ctx); + if (!definer) + goto free_action; + + table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id); + if (!table_ste) + goto destroy_definer; + + hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags); + if (!hit_ft_action) + goto destroy_table_ste; + + ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste, + hit_ft_action, + definer, min, max); + if (ret) + goto destroy_hit_ft_action; + + action->range.table_ste = table_ste; + action->range.definer = definer; + action->range.hit_ft_action = hit_ft_action; + + /* Allocate STC for jumps to STE */ + mutex_lock(&ctx->ctrl_lock); + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.ste_table.ste = table_ste->ste; + stc_attr.ste_table.ste_pool = table_ste->pool; + stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + if (ret) + goto error_unlock; + + mutex_unlock(&ctx->ctrl_lock); + + return action; + +error_unlock: + mutex_unlock(&ctx->ctrl_lock); +destroy_hit_ft_action: + mlx5hws_action_destroy(hit_ft_action); +destroy_table_ste: + hws_action_destroy_dest_match_range_table(ctx, table_ste); +destroy_definer: + mlx5hws_definer_free(ctx, definer); +free_action: + kfree(action); + mlx5hws_err(ctx, "Failed to create action dest match range"); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags) +{ + return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST); +} + +struct mlx5hws_action * +mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, + u32 sampler_id, u32 flags) +{ + mlx5hws_err(ctx, "Flow sampler action - unsupported\n"); + return NULL; +} + +static void hws_action_destroy_hws(struct mlx5hws_action *action) +{ + u32 ext_reformat_id; + bool shared_arg; + u32 obj_id; + u32 i; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_MISS: + case MLX5HWS_ACTION_TYP_TAG: + case MLX5HWS_ACTION_TYP_DROP: + case MLX5HWS_ACTION_TYP_CTR: + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + case MLX5HWS_ACTION_TYP_ASO_METER: + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + case MLX5HWS_ACTION_TYP_VPORT: + hws_action_destroy_stcs(action); + break; + case MLX5HWS_ACTION_TYP_POP_VLAN: + hws_action_destroy_stcs(action); + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); + break; + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + hws_action_destroy_stcs(action); + mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island); + for (i = 0; i < action->dest_array.num_dest; i++) { + ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id; + if (ext_reformat_id) + mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev, + ext_reformat_id); + } + kfree(action->dest_array.dest_list); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + shared_arg = false; + for (i = 0; i < action->modify_header.num_of_patterns; i++) { + hws_action_destroy_stcs(&action[i]); + if (action[i].modify_header.num_of_actions > 1) { + mlx5hws_pat_put_pattern(action[i].ctx, + action[i].modify_header.pat_id); + /* Save shared arg object to be freed after */ + obj_id = action[i].modify_header.arg_id; + shared_arg = true; + } + } + if (shared_arg) + mlx5hws_arg_destroy(action->ctx, obj_id); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + for (i = 0; i < action->reformat.num_of_hdrs; i++) + hws_action_destroy_stcs(&action[i]); + mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id); + break; + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + for (i = 0; i < action->reformat.num_of_hdrs; i++) + hws_action_destroy_stcs(&action[i]); + mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id); + break; + case MLX5HWS_ACTION_TYP_RANGE: + hws_action_destroy_stcs(action); + hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste); + mlx5hws_definer_free(action->ctx, action->range.definer); + mlx5hws_action_destroy(action->range.hit_ft_action); + break; + case MLX5HWS_ACTION_TYP_LAST: + break; + default: + pr_warn("HWS: Invalid action type: %d\n", action->type); + } +} + +int mlx5hws_action_destroy(struct mlx5hws_action *action) +{ + hws_action_destroy_hws(action); + + kfree(action); + return 0; +} + +int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_action_default_stc *default_stc; + int ret; + + if (ctx->common_res[tbl_type].default_stc) { + ctx->common_res[tbl_type].default_stc->refcount++; + return 0; + } + + default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL); + if (!default_stc) + return -ENOMEM; + + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_ctr); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default counter STC\n"); + goto free_default_stc; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw5); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n"); + goto free_nop_ctr; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw6); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n"); + goto free_nop_dw5; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw7); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n"); + goto free_nop_dw6; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->default_hit); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default allow STC\n"); + goto free_nop_dw7; + } + + ctx->common_res[tbl_type].default_stc = default_stc; + ctx->common_res[tbl_type].default_stc->refcount++; + + return 0; + +free_nop_dw7: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); +free_nop_dw6: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); +free_nop_dw5: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); +free_nop_ctr: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); +free_default_stc: + kfree(default_stc); + return ret; +} + +void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_action_default_stc *default_stc; + + default_stc = ctx->common_res[tbl_type].default_stc; + + default_stc = ctx->common_res[tbl_type].default_stc; + if (--default_stc->refcount) + return; + + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); + kfree(default_stc); + ctx->common_res[tbl_type].default_stc = NULL; +} + +static void hws_action_modify_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions, + u32 nope_locations) +{ + u8 *new_arg_data = NULL; + int i, j; + + if (unlikely(nope_locations)) { + new_arg_data = kcalloc(num_of_actions, + MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); + if (unlikely(!new_arg_data)) + return; + + for (i = 0, j = 0; i < num_of_actions; i++, j++) { + memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE); + if (BIT(i) & nope_locations) + j++; + } + } + + mlx5hws_arg_write(queue, NULL, arg_idx, + new_arg_data ? new_arg_data : arg_data, + num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE); + + kfree(new_arg_data); +} + +void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions) +{ + u8 *e_src; + int i; + + /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes + * copy from end of src to the start of dst. + * move to the end, 2 is the leftover from 14B or 18B + */ + if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN) + e_src = src + MLX5HWS_ACTION_HDR_LEN_L2; + else + e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN; + + /* Move dst over the first remove action + zero data */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE; + /* Move dst over the first insert ctrl action */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2; + /* Actions: + * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. + * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. + * the loop is without the last insertion. + */ + for (i = 0; i < num_of_actions - 3; i++) { + e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE; + memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE; + } + /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */ + e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2; + dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2; + memcpy(dst, e_src, 2); +} + +static int +hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res, + enum mlx5hws_context_shared_stc_type stc_type) +{ + return common_res->shared_stc[stc_type]->stc_chunk.offset; +} + +static struct mlx5hws_actions_wqe_setter * +hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter, + u8 req_flags) +{ + /* Use a new setter if requested flags are taken */ + while (setter->flags & req_flags) + setter++; + + /* Use current setter in required flags are not used */ + return setter; +} + +static void +hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply, + enum mlx5hws_action_stc_idx stc_idx, + u8 action_idx) +{ + struct mlx5hws_action *action = apply->rule_action[action_idx].action; + + apply->wqe_ctrl->stc_ix[stc_idx] = + htonl(action->stc[apply->tbl_type].offset); +} + +static void +hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_double]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr; + + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; +} + +static void +hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_sz, arg_idx; + u8 *single_action; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action; + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + + if (action->modify_header.num_of_actions == 1) { + if (action->modify_header.single_action_type == + MLX5_MODIFICATION_TYPE_COPY || + action->modify_header.single_action_type == + MLX5_MODIFICATION_TYPE_ADD_FIELD) { + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + return; + } + + if (action->flags & MLX5HWS_ACTION_FLAG_SHARED) + single_action = (u8 *)&action->modify_header.single_action; + else + single_action = rule_action->modify_header.data; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = + *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); + } else { + /* Argument offset multiple with number of args per these actions */ + arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); + arg_idx = rule_action->modify_header.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + hws_action_modify_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->modify_header.data, + action->modify_header.num_of_actions, + action->modify_header.nope_locations); + } + } +} + +static void +hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_idx, arg_sz; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action + rule_action->reformat.hdr_idx; + + /* Argument offset multiple on args required for header size */ + arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz); + arg_idx = rule_action->reformat.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + mlx5hws_arg_write(apply->queue, NULL, + action->reformat.arg_id + arg_idx, + rule_action->reformat.data, + action->reformat.header_size); + } +} + +static void +hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_sz, arg_idx; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action + rule_action->reformat.hdr_idx; + + /* Argument offset multiple on args required for num of actions */ + arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); + arg_idx = rule_action->reformat.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + mlx5hws_arg_decapl3_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->reformat.data, + action->modify_header.num_of_actions); + } +} + +static void +hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + u32 exe_aso_ctrl; + u32 offset; + + rule_action = &apply->rule_action[setter->idx_double]; + + switch (rule_action->action->type) { + case MLX5HWS_ACTION_TYP_ASO_METER: + /* exe_aso_ctrl format: + * [STC only and reserved bits 29b][init_color 2b][meter_id 1b] + */ + offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ; + exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ; + exe_aso_ctrl |= rule_action->aso_meter.init_color << + MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET; + break; + default: + mlx5hws_err(rule_action->action->ctx, + "Unsupported ASO action type: %d\n", rule_action->action->type); + return; + } + + /* aso_object_offset format: [24B] */ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset); + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl); + + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; +} + +static void +hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_single]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value); + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single); +} + +static void +hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_ctr]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset); + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr); +} + +static void +hws_action_setter_single(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single); +} + +static void +hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(hws_action_get_shared_stc_offset(apply->common_res, + MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP)); +} + +static void +hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit); +} + +static void +hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = + htonl(apply->common_res->default_stc->default_hit.offset); +} + +static void +hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc); +} + +static void +hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(hws_action_get_shared_stc_offset(apply->common_res, + MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3)); +} + +static void +hws_action_setter_range(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + /* Always jump to index zero */ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit); +} + +int mlx5hws_action_template_process(struct mlx5hws_action_template *at) +{ + struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1; + enum mlx5hws_action_type *action_type = at->action_type_arr; + struct mlx5hws_actions_wqe_setter *setter = at->setters; + struct mlx5hws_actions_wqe_setter *pop_setter = NULL; + struct mlx5hws_actions_wqe_setter *last_setter; + int i; + + /* Note: Given action combination must be valid */ + + /* Check if action were already processed */ + if (at->num_of_action_stes) + return 0; + + for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++) + setter[i].set_hit = &hws_action_setter_hit_next_action; + + /* The same action template setters can be used with jumbo or match + * STE, to support both cases we reserve the first setter for cases + * with jumbo STE to allow jump to the first action STE. + * This extra setter can be reduced in some cases on rule creation. + */ + setter = start_setter; + last_setter = start_setter; + + for (i = 0; i < at->num_actions; i++) { + switch (action_type[i]) { + case MLX5HWS_ACTION_TYP_DROP: + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + case MLX5HWS_ACTION_TYP_VPORT: + case MLX5HWS_ACTION_TYP_MISS: + /* Hit action */ + last_setter->flags |= ASF_HIT; + last_setter->set_hit = &hws_action_setter_hit; + last_setter->idx_hit = i; + break; + + case MLX5HWS_ACTION_TYP_RANGE: + last_setter->flags |= ASF_HIT; + last_setter->set_hit = &hws_action_setter_range; + last_setter->idx_hit = i; + break; + + case MLX5HWS_ACTION_TYP_POP_VLAN: + /* Single remove header to header */ + if (pop_setter) { + /* We have 2 pops, use the shared */ + pop_setter->set_single = &hws_action_setter_single_double_pop; + break; + } + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_MODIFY | + ASF_INSERT); + setter->flags |= ASF_SINGLE1 | ASF_REMOVE; + setter->set_single = &hws_action_setter_single; + setter->idx_single = i; + pop_setter = setter; + break; + + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + /* Double insert inline */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_INSERT; + setter->set_double = &hws_action_setter_push_vlan; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + /* Double modify header list */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_MODIFY; + setter->set_double = &hws_action_setter_modify_header; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_ASO_METER: + /* Double ASO action */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE); + setter->flags |= ASF_DOUBLE; + setter->set_double = &hws_action_setter_aso; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + /* Single remove header to header */ + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_MODIFY); + setter->flags |= ASF_SINGLE1 | ASF_REMOVE; + setter->set_single = &hws_action_setter_single; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + /* Double insert header with pointer */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_INSERT; + setter->set_double = &hws_action_setter_insert_ptr; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + /* Single remove + Double insert header with pointer */ + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_DOUBLE); + setter->flags |= ASF_SINGLE1 | ASF_DOUBLE; + setter->set_double = &hws_action_setter_insert_ptr; + setter->idx_double = i; + setter->set_single = &hws_action_setter_common_decap; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + /* Double modify header list with remove and push inline */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT; + setter->set_double = &hws_action_setter_tnl_l3_to_l2; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_TAG: + /* Single TAG action, search for any room from the start */ + setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1); + setter->flags |= ASF_SINGLE1; + setter->set_single = &hws_action_setter_tag; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_CTR: + /* Control counter action + * TODO: Current counter executed first. Support is needed + * for single ation counter action which is done last. + * Example: Decap + CTR + */ + setter = hws_action_setter_find_first(start_setter, ASF_CTR); + setter->flags |= ASF_CTR; + setter->set_ctr = &hws_action_setter_ctrl_ctr; + setter->idx_ctr = i; + break; + default: + pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n", + i, action_type[i]); + return -EOPNOTSUPP; + } + + last_setter = max(setter, last_setter); + } + + /* Set default hit on the last STE if no hit action provided */ + if (!(last_setter->flags & ASF_HIT)) + last_setter->set_hit = &hws_action_setter_default_hit; + + at->num_of_action_stes = last_setter - start_setter + 1; + + /* Check if action template doesn't require any action DWs */ + at->only_term = (at->num_of_action_stes == 1) && + !(last_setter->flags & ~(ASF_CTR | ASF_HIT)); + + return 0; +} + +struct mlx5hws_action_template * +mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]) +{ + struct mlx5hws_action_template *at; + u8 num_actions = 0; + int i; + + at = kzalloc(sizeof(*at), GFP_KERNEL); + if (!at) + return NULL; + + while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST) + ; + + at->num_actions = num_actions - 1; + at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL); + if (!at->action_type_arr) + goto free_at; + + for (i = 0; i < num_actions; i++) + at->action_type_arr[i] = action_type[i]; + + return at; + +free_at: + kfree(at); + return NULL; +} + +int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at) +{ + kfree(at->action_type_arr); + kfree(at); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h new file mode 100644 index 000000000000..bf5c1b241006 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_ACTION_H_ +#define MLX5HWS_ACTION_H_ + +/* Max number of STEs needed for a rule (including match) */ +#define MLX5HWS_ACTION_MAX_STE 20 + +/* Max number of internal subactions of ipv6_ext */ +#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4 + +enum mlx5hws_action_stc_idx { + MLX5HWS_ACTION_STC_IDX_CTRL = 0, + MLX5HWS_ACTION_STC_IDX_HIT = 1, + MLX5HWS_ACTION_STC_IDX_DW5 = 2, + MLX5HWS_ACTION_STC_IDX_DW6 = 3, + MLX5HWS_ACTION_STC_IDX_DW7 = 4, + MLX5HWS_ACTION_STC_IDX_MAX = 5, + /* STC Jumvo STE combo: CTR, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1, + /* STC combo1: CTR, SINGLE, DOUBLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3, + /* STC combo2: CTR, 3 x SINGLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4, + /* STC combo2: CTR, TRIPLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2, +}; + +enum mlx5hws_action_offset { + MLX5HWS_ACTION_OFFSET_DW0 = 0, + MLX5HWS_ACTION_OFFSET_DW5 = 5, + MLX5HWS_ACTION_OFFSET_DW6 = 6, + MLX5HWS_ACTION_OFFSET_DW7 = 7, + MLX5HWS_ACTION_OFFSET_HIT = 3, + MLX5HWS_ACTION_OFFSET_HIT_LSB = 4, +}; + +enum { + MLX5HWS_ACTION_DOUBLE_SIZE = 8, + MLX5HWS_ACTION_INLINE_DATA_SIZE = 4, + MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12, + MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4, + MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2, + MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS + + MLX5HWS_ACTION_HDR_LEN_L2_ETHER), + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 + + MLX5HWS_ACTION_HDR_LEN_L2_VLAN), + MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64, + DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6, + DECAP_L3_NUM_ACTIONS_W_VLAN = 7, +}; + +enum mlx5hws_action_setter_flag { + ASF_SINGLE1 = 1 << 0, + ASF_SINGLE2 = 1 << 1, + ASF_SINGLE3 = 1 << 2, + ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3, + ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE, + ASF_INSERT = 1 << 3, + ASF_REMOVE = 1 << 4, + ASF_MODIFY = 1 << 5, + ASF_CTR = 1 << 6, + ASF_HIT = 1 << 7, +}; + +struct mlx5hws_action_default_stc { + struct mlx5hws_pool_chunk nop_ctr; + struct mlx5hws_pool_chunk nop_dw5; + struct mlx5hws_pool_chunk nop_dw6; + struct mlx5hws_pool_chunk nop_dw7; + struct mlx5hws_pool_chunk default_hit; + u32 refcount; +}; + +struct mlx5hws_action_shared_stc { + struct mlx5hws_pool_chunk stc_chunk; + u32 refcount; +}; + +struct mlx5hws_actions_apply_data { + struct mlx5hws_send_engine *queue; + struct mlx5hws_rule_action *rule_action; + __be32 *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + u32 jump_to_action_stc; + struct mlx5hws_context_common_res *common_res; + enum mlx5hws_table_type tbl_type; + u32 next_direct_idx; + u8 require_dep; +}; + +struct mlx5hws_actions_wqe_setter; + +typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter); + +struct mlx5hws_actions_wqe_setter { + mlx5hws_action_setter_fp set_single; + mlx5hws_action_setter_fp set_double; + mlx5hws_action_setter_fp set_triple; + mlx5hws_action_setter_fp set_hit; + mlx5hws_action_setter_fp set_ctr; + u8 idx_single; + u8 idx_double; + u8 idx_triple; + u8 idx_ctr; + u8 idx_hit; + u8 stage_idx; + u8 flags; +}; + +struct mlx5hws_action_template { + struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE]; + enum mlx5hws_action_type *action_type_arr; + u8 num_of_action_stes; + u8 num_actions; + u8 only_term; +}; + +struct mlx5hws_action { + u8 type; + u8 flags; + struct mlx5hws_context *ctx; + union { + struct { + struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX]; + union { + struct { + u32 pat_id; + u32 arg_id; + __be64 single_action; + u32 nope_locations; + u8 num_of_patterns; + u8 single_action_type; + u8 num_of_actions; + u8 max_num_of_actions; + u8 require_reparse; + } modify_header; + struct { + u32 arg_id; + u32 header_size; + u16 max_hdr_sz; + u8 num_of_hdrs; + u8 anchor; + u8 e_anchor; + u8 offset; + bool encap; + u8 require_reparse; + } reformat; + struct { + u32 obj_id; + u8 return_reg_id; + } aso; + struct { + u16 vport_num; + u16 esw_owner_vhca_id; + bool esw_owner_vhca_id_valid; + } vport; + struct { + u32 obj_id; + } dest_obj; + struct { + struct mlx5hws_cmd_forward_tbl *fw_island; + size_t num_dest; + struct mlx5hws_cmd_set_fte_dest *dest_list; + } dest_array; + struct { + u8 type; + u8 start_anchor; + u8 end_anchor; + u8 num_of_words; + bool decap; + } insert_hdr; + struct { + /* PRM start anchor from which header will be removed */ + u8 anchor; + /* Header remove offset in bytes, from the start + * anchor to the location where remove header starts. + */ + u8 offset; + /* Indicates the removed header size in bytes */ + size_t size; + } remove_header; + struct { + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_action *hit_ft_action; + struct mlx5hws_definer *definer; + } range; + }; + }; + + struct ibv_flow_action *flow_action; + u32 obj_id; + struct ibv_qp *qp; + }; +}; + +const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type); + +int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, + u8 tbl_type); + +void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, + u8 tbl_type); + +void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, + u16 num_of_actions); + +int mlx5hws_action_template_process(struct mlx5hws_action_template *at); + +bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions, + enum mlx5hws_table_type table_type); + +int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + u32 table_type, + struct mlx5hws_pool_chunk *stc); + +void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx, + u32 table_type, + struct mlx5hws_pool_chunk *stc); + +static inline void +mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(apply->common_res->default_stc->nop_dw5.offset); +} + +static inline void +mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = + htonl(apply->common_res->default_stc->nop_dw6.offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = + htonl(apply->common_res->default_stc->nop_dw7.offset); +} + +static inline void +mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] = + htonl(apply->common_res->default_stc->nop_ctr.offset); +} + +static inline void +mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter, + bool is_jumbo) +{ + u8 num_of_actions; + + /* Set control counter */ + if (setter->set_ctr) + setter->set_ctr(apply, setter); + else + mlx5hws_action_setter_default_ctr(apply, setter); + + if (!is_jumbo) { + if (unlikely(setter->set_triple)) { + /* Set triple on match */ + setter->set_triple(apply, setter); + num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3; + } else { + /* Set single and double on match */ + if (setter->set_single) + setter->set_single(apply, setter); + else + mlx5hws_action_setter_default_single(apply, setter); + + if (setter->set_double) + setter->set_double(apply, setter); + else + mlx5hws_action_setter_default_double(apply, setter); + + num_of_actions = setter->set_double ? + MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 : + MLX5HWS_ACTION_STC_IDX_LAST_COMBO2; + } + } else { + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE; + } + + /* Set next/final hit action */ + setter->set_hit(apply, setter); + + /* Set number of actions */ + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |= + htonl(num_of_actions << 29); +} + +#endif /* MLX5HWS_ACTION_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c new file mode 100644 index 000000000000..e6ed66202a40 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "mlx5hws_buddy.h" + +static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order) +{ + int i, s, ret = 0; + + buddy->max_order = max_order; + + buddy->bitmap = kcalloc(buddy->max_order + 1, + sizeof(*buddy->bitmap), + GFP_KERNEL); + if (!buddy->bitmap) + return -ENOMEM; + + buddy->num_free = kcalloc(buddy->max_order + 1, + sizeof(*buddy->num_free), + GFP_KERNEL); + if (!buddy->num_free) { + ret = -ENOMEM; + goto err_out_free_bits; + } + + for (i = 0; i <= (int)buddy->max_order; ++i) { + s = 1 << (buddy->max_order - i); + + buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL); + if (!buddy->bitmap[i]) { + ret = -ENOMEM; + goto err_out_free_num_free; + } + } + + bitmap_set(buddy->bitmap[buddy->max_order], 0, 1); + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free_num_free: + for (i = 0; i <= (int)buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + +err_out_free_bits: + kfree(buddy->bitmap); + return ret; +} + +struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order) +{ + struct mlx5hws_buddy_mem *buddy; + + buddy = kzalloc(sizeof(*buddy), GFP_KERNEL); + if (!buddy) + return NULL; + + if (hws_buddy_init(buddy, max_order)) + goto free_buddy; + + return buddy; + +free_buddy: + kfree(buddy); + return NULL; +} + +void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy) +{ + int i; + + for (i = 0; i <= (int)buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + kfree(buddy->bitmap); +} + +static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy, + u32 start_order, + u32 *segment, + u32 *order) +{ + unsigned int seg, order_iter, m; + + for (order_iter = start_order; + order_iter <= buddy->max_order; ++order_iter) { + if (!buddy->num_free[order_iter]) + continue; + + m = 1 << (buddy->max_order - order_iter); + seg = find_first_bit(buddy->bitmap[order_iter], m); + + if (WARN(seg >= m, + "ICM Buddy: failed finding free mem for order %d\n", + order_iter)) + return -ENOMEM; + + break; + } + + if (order_iter > buddy->max_order) + return -ENOMEM; + + *segment = seg; + *order = order_iter; + return 0; +} + +int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order) +{ + u32 seg, order_iter, err; + + err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter); + if (err) + return err; + + bitmap_clear(buddy->bitmap[order_iter], seg, 1); + --buddy->num_free[order_iter]; + + while (order_iter > order) { + --order_iter; + seg <<= 1; + bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1); + ++buddy->num_free[order_iter]; + } + + seg <<= order; + + return seg; +} + +void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order) +{ + seg >>= order; + + while (test_bit(seg ^ 1, buddy->bitmap[order])) { + bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + + bitmap_set(buddy->bitmap[order], seg, 1); + ++buddy->num_free[order]; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h new file mode 100644 index 000000000000..338c44bbedaf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BUDDY_H_ +#define MLX5HWS_BUDDY_H_ + +struct mlx5hws_buddy_mem { + unsigned long **bitmap; + unsigned int *num_free; + u32 max_order; +}; + +struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order); + +void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy); + +int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order); + +void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order); + +#endif /* MLX5HWS_BUDDY_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c new file mode 100644 index 000000000000..bd52b05db367 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c @@ -0,0 +1,997 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx) +{ + /* assign random queue */ + return get_random_u8() % mlx5hws_bwc_queues(ctx); +} + +static u16 +hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id) +{ + return min(ctx->send_queue[queue_id].num_entries / 2, + MLX5HWS_BWC_MATCHER_REHASH_BURST_TH); +} + +static struct mutex * +hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx) +{ + return &ctx->bwc_send_queue_locks[idx]; +} + +static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx) +{ + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mutex *queue_lock; /* Protect the queue */ + int i; + + for (i = 0; i < bwc_queues; i++) { + queue_lock = hws_bwc_get_queue_lock(ctx, i); + mutex_lock(queue_lock); + } +} + +static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx) +{ + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mutex *queue_lock; /* Protect the queue */ + int i = bwc_queues; + + while (i--) { + queue_lock = hws_bwc_get_queue_lock(ctx, i); + mutex_unlock(queue_lock); + } +} + +static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, + u32 priority, + u8 size_log) +{ + memset(attr, 0, sizeof(*attr)); + + attr->priority = priority; + attr->optimize_using_rule_idx = 0; + attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE; + attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY; + attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH; + attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH; + attr->rule.num_log = size_log; + attr->resizable = true; + attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; +} + +int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask, + enum mlx5hws_action_type action_types[]) +{ + enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST }; + struct mlx5hws_context *ctx = table->ctx; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_matcher_attr attr = {0}; + int i; + + bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL); + if (!bwc_matcher->rules) + goto err; + + for (i = 0; i < bwc_queues; i++) + INIT_LIST_HEAD(&bwc_matcher->rules[i]); + + hws_bwc_matcher_init_attr(&attr, + priority, + MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG); + + bwc_matcher->priority = priority; + bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG; + + /* create dummy action template */ + bwc_matcher->at[0] = + mlx5hws_action_template_create(action_types ? + action_types : init_action_types); + if (!bwc_matcher->at[0]) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n"); + goto free_bwc_matcher_rules; + } + + bwc_matcher->num_of_at = 1; + + bwc_matcher->mt = mlx5hws_match_template_create(ctx, + mask->match_buf, + mask->match_sz, + match_criteria_enable); + if (!bwc_matcher->mt) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n"); + goto free_at; + } + + bwc_matcher->matcher = mlx5hws_matcher_create(table, + &bwc_matcher->mt, 1, + &bwc_matcher->at[0], + bwc_matcher->num_of_at, + &attr); + if (!bwc_matcher->matcher) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n"); + goto free_mt; + } + + return 0; + +free_mt: + mlx5hws_match_template_destroy(bwc_matcher->mt); +free_at: + mlx5hws_action_template_destroy(bwc_matcher->at[0]); +free_bwc_matcher_rules: + kfree(bwc_matcher->rules); +err: + return -EINVAL; +} + +struct mlx5hws_bwc_matcher * +mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_bwc_matcher *bwc_matcher; + bool is_complex; + int ret; + + if (!mlx5hws_context_bwc_supported(table->ctx)) { + mlx5hws_err(table->ctx, + "BWC matcher: context created w/o BWC API compatibility\n"); + return NULL; + } + + bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL); + if (!bwc_matcher) + return NULL; + + /* Check if the required match params can be all matched + * in single STE, otherwise complex matcher is needed. + */ + + is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask); + if (is_complex) + ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher, + table, + priority, + match_criteria_enable, + mask); + else + ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher, + table, + priority, + match_criteria_enable, + mask, + NULL); + if (ret) + goto free_bwc_matcher; + + return bwc_matcher; + +free_bwc_matcher: + kfree(bwc_matcher); + + return NULL; +} + +int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + int i; + + mlx5hws_matcher_destroy(bwc_matcher->matcher); + bwc_matcher->matcher = NULL; + + for (i = 0; i < bwc_matcher->num_of_at; i++) + mlx5hws_action_template_destroy(bwc_matcher->at[i]); + + mlx5hws_match_template_destroy(bwc_matcher->mt); + kfree(bwc_matcher->rules); + + return 0; +} + +int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + if (bwc_matcher->num_of_rules) + mlx5hws_err(bwc_matcher->matcher->tbl->ctx, + "BWC matcher destroy: matcher still has %d rules\n", + bwc_matcher->num_of_rules); + + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + + kfree(bwc_matcher); + return 0; +} + +static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain) +{ + struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH]; + u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id); + bool got_comp = *pending_rules >= burst_th; + bool queue_full; + int err = 0; + int ret; + int i; + + /* Check if there are any completions at all */ + if (!got_comp && !drain) + return 0; + + queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]); + while (queue_full || ((got_comp || drain) && *pending_rules)) { + ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th); + if (unlikely(ret < 0)) { + mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n", + queue_id, ret); + return -EINVAL; + } + + if (ret) { + (*pending_rules) -= ret; + for (i = 0; i < ret; i++) { + if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) { + mlx5hws_err(ctx, + "BWC poll error: polling queue %d returned completion with error\n", + queue_id); + err = -EINVAL; + } + } + queue_full = false; + } + + got_comp = !!ret; + } + + return err; +} + +void +mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, + u16 bwc_queue_idx, + u32 flow_source, + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + + /* no use of INSERT_BY_INDEX in bwc rule */ + rule_attr->rule_idx = 0; + + /* notify HW at each rule insertion/deletion */ + rule_attr->burst = 0; + + /* We don't need user data, but the API requires it to exist */ + rule_attr->user_data = (void *)0xFACADE; + + rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx); + rule_attr->flow_source = flow_source; +} + +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_bwc_rule *bwc_rule; + + bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL); + if (unlikely(!bwc_rule)) + goto out_err; + + bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL); + if (unlikely(!bwc_rule->rule)) + goto free_rule; + + bwc_rule->bwc_matcher = bwc_matcher; + return bwc_rule; + +free_rule: + kfree(bwc_rule); +out_err: + return NULL; +} + +void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule) +{ + if (likely(bwc_rule->rule)) + kfree(bwc_rule->rule); + kfree(bwc_rule); +} + +static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + + bwc_matcher->num_of_rules++; + bwc_rule->bwc_queue_idx = idx; + list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]); +} + +static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + + bwc_matcher->num_of_rules--; + list_del_init(&bwc_rule->list_node); +} + +static int +hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_attr *attr) +{ + return mlx5hws_rule_destroy(bwc_rule->rule, attr); +} + +static int +hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_flow_op_result completion; + int ret; + + ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr); + if (unlikely(ret)) + return ret; + + do { + ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1); + } while (ret != 1); + + if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS || + (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED && + bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) { + mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n", + completion.status, bwc_rule->rule->status); + return -EINVAL; + } + + return 0; +} + +int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u16 idx = bwc_rule->bwc_queue_idx; + struct mlx5hws_rule_attr attr; + struct mutex *queue_lock; /* Protect the queue */ + int ret; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr); + + queue_lock = hws_bwc_get_queue_lock(ctx, idx); + + mutex_lock(queue_lock); + + ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr); + hws_bwc_rule_list_remove(bwc_rule); + + mutex_unlock(queue_lock); + + return ret; +} + +int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule) +{ + int ret; + + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + + mlx5hws_bwc_rule_free(bwc_rule); + return ret; +} + +static int +hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) +{ + return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher, + 0, /* only one match template supported */ + match_param, + at_idx, + rule_actions, + rule_attr, + bwc_rule->rule); +} + +static int +hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) + +{ + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + u32 expected_completions = 1; + int ret; + + ret = hws_bwc_rule_create_async(bwc_rule, match_param, + at_idx, rule_actions, + rule_attr); + if (unlikely(ret)) + return ret; + + ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + + return ret; +} + +static int +hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u32 expected_completions = 1; + int ret; + + ret = mlx5hws_rule_action_update(bwc_rule->rule, + at_idx, + rule_actions, + rule_attr); + if (unlikely(ret)) + return ret; + + ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + if (unlikely(ret)) + mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret); + + return ret; +} + +static bool +hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps; + + return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >= + caps->ste_alloc_log_max - 1; +} + +static bool +hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher, + u32 num_of_rules) +{ + if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) + return false; + + if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >= + (1UL << bwc_matcher->size_log))) + return true; + + return false; +} + +static void +hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[], + enum mlx5hws_action_type action_types[]) +{ + int i = 0; + + for (i = 0; + rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST); + i++) { + action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type; + } + + action_types[i] = MLX5HWS_ACTION_TYP_LAST; +} + +static int +hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_rule_action rule_actions[]) +{ + enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS]; + + hws_bwc_rule_actions_to_action_types(rule_actions, action_types); + + bwc_matcher->at[bwc_matcher->num_of_at] = + mlx5hws_action_template_create(action_types); + + if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at])) + return -ENOMEM; + + bwc_matcher->num_of_at++; + return 0; +} + +static int +hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) { + mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n", + caps->rtc_log_depth_max); + return -ENOMEM; + } + + bwc_matcher->size_log = + min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, + caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH); + + return 0; +} + +static int +hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_rule_action rule_actions[]) +{ + enum mlx5hws_action_type *action_type_arr; + int i, j; + + /* start from index 1 - first action template is a dummy */ + for (i = 1; i < bwc_matcher->num_of_at; i++) { + j = 0; + action_type_arr = bwc_matcher->at[i]->action_type_arr; + + while (rule_actions[j].action && + rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) { + if (action_type_arr[j] != rule_actions[j].action->type) + break; + j++; + } + + if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST && + (!rule_actions[j].action || + rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST)) + return i; + } + + return -1; +} + +static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_bwc_rule **bwc_rules; + struct mlx5hws_rule_attr rule_attr; + u32 *pending_rules; + int i, j, ret = 0; + bool all_done; + u16 burst_th; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); + + pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL); + if (!pending_rules) + return -ENOMEM; + + bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL); + if (!bwc_rules) { + ret = -ENOMEM; + goto free_pending_rules; + } + + for (i = 0; i < bwc_queues; i++) { + if (list_empty(&bwc_matcher->rules[i])) + bwc_rules[i] = NULL; + else + bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i], + struct mlx5hws_bwc_rule, + list_node); + } + + do { + all_done = true; + + for (i = 0; i < bwc_queues; i++) { + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id); + + for (j = 0; j < burst_th && bwc_rules[i]; j++) { + rule_attr.burst = !!((j + 1) % burst_th); + ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher, + bwc_rules[i]->rule, + &rule_attr); + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Moving BWC rule failed during rehash (%d)\n", + ret); + goto free_bwc_rules; + } + + all_done = false; + pending_rules[i]++; + bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node, + &bwc_matcher->rules[i]) ? + NULL : list_next_entry(bwc_rules[i], list_node); + + ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id, + &pending_rules[i], false); + if (unlikely(ret)) + goto free_bwc_rules; + } + } + } while (!all_done); + + /* drain all the bwc queues */ + for (i = 0; i < bwc_queues; i++) { + if (pending_rules[i]) { + u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + + mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); + ret = hws_bwc_queue_poll(ctx, queue_id, + &pending_rules[i], true); + if (unlikely(ret)) + goto free_bwc_rules; + } + } + +free_bwc_rules: + kfree(bwc_rules); +free_pending_rules: + kfree(pending_rules); + + return ret; +} + +static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + return hws_bwc_matcher_move_all_simple(bwc_matcher); +} + +static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher_attr matcher_attr = {0}; + struct mlx5hws_matcher *old_matcher; + struct mlx5hws_matcher *new_matcher; + int ret; + + hws_bwc_matcher_init_attr(&matcher_attr, + bwc_matcher->priority, + bwc_matcher->size_log); + + old_matcher = bwc_matcher->matcher; + new_matcher = mlx5hws_matcher_create(old_matcher->tbl, + &bwc_matcher->mt, 1, + bwc_matcher->at, + bwc_matcher->num_of_at, + &matcher_attr); + if (!new_matcher) { + mlx5hws_err(ctx, "Rehash error: matcher creation failed\n"); + return -ENOMEM; + } + + ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher); + if (ret) { + mlx5hws_err(ctx, "Rehash error: failed setting resize target\n"); + return ret; + } + + ret = hws_bwc_matcher_move_all(bwc_matcher); + if (ret) { + mlx5hws_err(ctx, "Rehash error: moving rules failed\n"); + return -ENOMEM; + } + + bwc_matcher->matcher = new_matcher; + mlx5hws_matcher_destroy(old_matcher); + + return 0; +} + +static int +hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + u32 num_of_rules; + int ret; + + /* If the current matcher size is already at its max size, we can't + * do the rehash. Skip it and try adding the rule again - perhaps + * there was some change. + */ + if (hws_bwc_matcher_size_maxed_out(bwc_matcher)) + return 0; + + /* It is possible that other rule has already performed rehash. + * Need to check again if we really need rehash. + * If the reason for rehash was size, but not any more - skip rehash. + */ + num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED); + if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules)) + return 0; + + /* Now we're done all the checking - do the rehash: + * - extend match RTC size + * - create new matcher + * - move all the rules to the new matcher + * - destroy the old matcher + */ + + ret = hws_bwc_matcher_extend_size(bwc_matcher); + if (ret) + return ret; + + return hws_bwc_matcher_move(bwc_matcher); +} + +static int +hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + /* Rehash by action template doesn't require any additional checking. + * The bwc_matcher already contains the new action template. + * Just do the usual rehash: + * - create new matcher + * - move all the rules to the new matcher + * - destroy the old matcher + */ + return hws_bwc_matcher_move(bwc_matcher); +} + +int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + struct mlx5hws_rule_action rule_actions[], + u32 flow_source, + u16 bwc_queue_idx) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_rule_attr rule_attr; + struct mutex *queue_lock; /* Protect the queue */ + u32 num_of_rules; + int ret = 0; + int at_idx; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr); + + queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx); + + mutex_lock(queue_lock); + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (unlikely(at_idx < 0)) { + /* we need to extend BWC matcher action templates array */ + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + hws_bwc_unlock_all_queues(ctx); + return ret; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + /* Action template attach failed, possibly due to + * requiring more action STEs. + * Need to attempt creating new matcher with all + * the action templates, including the new one. + */ + ret = hws_bwc_matcher_rehash_at(bwc_matcher); + if (unlikely(ret)) { + mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); + bwc_matcher->at[at_idx] = NULL; + bwc_matcher->num_of_at--; + + hws_bwc_unlock_all_queues(ctx); + + mlx5hws_err(ctx, + "BWC rule insertion: rehash AT failed (%d)\n", ret); + return ret; + } + } + + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + } + + /* check if number of rules require rehash */ + num_of_rules = bwc_matcher->num_of_rules; + + if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) { + mutex_unlock(queue_lock); + + hws_bwc_lock_all_queues(ctx); + ret = hws_bwc_matcher_rehash_size(bwc_matcher); + hws_bwc_unlock_all_queues(ctx); + + if (ret) { + mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n", + bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, + bwc_matcher->size_log, + ret); + return ret; + } + + mutex_lock(queue_lock); + } + + ret = hws_bwc_rule_create_sync(bwc_rule, + match_param, + at_idx, + rule_actions, + &rule_attr); + if (likely(!ret)) { + hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx); + mutex_unlock(queue_lock); + return 0; /* rule inserted successfully */ + } + + /* At this point the rule wasn't added. + * It could be because there was collision, or some other problem. + * If we don't dive deeper than API, the only thing we know is that + * the status of completion is RTE_FLOW_OP_ERROR. + * Try rehash by size and insert rule again - last chance. + */ + + mutex_unlock(queue_lock); + + hws_bwc_lock_all_queues(ctx); + ret = hws_bwc_matcher_rehash_size(bwc_matcher); + hws_bwc_unlock_all_queues(ctx); + + if (ret) { + mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret); + return ret; + } + + /* Rehash done, but we still have that pesky rule to add */ + mutex_lock(queue_lock); + + ret = hws_bwc_rule_create_sync(bwc_rule, + match_param, + at_idx, + rule_actions, + &rule_attr); + + if (unlikely(ret)) { + mutex_unlock(queue_lock); + mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret); + return ret; + } + + hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx); + mutex_unlock(queue_lock); + + return 0; +} + +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_bwc_rule *bwc_rule; + u16 bwc_queue_idx; + int ret; + + if (unlikely(!mlx5hws_context_bwc_supported(ctx))) { + mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n"); + return NULL; + } + + bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher); + if (unlikely(!bwc_rule)) + return NULL; + + bwc_queue_idx = hws_bwc_gen_queue_idx(ctx); + + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions, + flow_source, + bwc_queue_idx); + if (unlikely(ret)) { + mlx5hws_bwc_rule_free(bwc_rule); + return NULL; + } + + return bwc_rule; +} + +static int +hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_rule_attr rule_attr; + struct mutex *queue_lock; /* Protect the queue */ + int at_idx, ret; + u16 idx; + + idx = bwc_rule->bwc_queue_idx; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr); + queue_lock = hws_bwc_get_queue_lock(ctx, idx); + + mutex_lock(queue_lock); + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (unlikely(at_idx < 0)) { + /* we need to extend BWC matcher action templates array */ + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + /* check again - perhaps other thread already did extend_at */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (likely(at_idx < 0)) { + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + hws_bwc_unlock_all_queues(ctx); + mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret); + return -EINVAL; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + /* Action template attach failed, possibly due to + * requiring more action STEs. + * Need to attempt creating new matcher with all + * the action templates, including the new one. + */ + ret = hws_bwc_matcher_rehash_at(bwc_matcher); + if (unlikely(ret)) { + mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); + bwc_matcher->at[at_idx] = NULL; + bwc_matcher->num_of_at--; + + hws_bwc_unlock_all_queues(ctx); + + mlx5hws_err(ctx, + "BWC rule update: rehash AT failed (%d)\n", + ret); + return ret; + } + } + } + + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + } + + ret = hws_bwc_rule_update_sync(bwc_rule, + at_idx, + rule_actions, + &rule_attr); + mutex_unlock(queue_lock); + + if (unlikely(ret)) + mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret); + + return ret; +} + +int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + + if (unlikely(!mlx5hws_context_bwc_supported(ctx))) { + mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n"); + return -EINVAL; + } + + return hws_bwc_rule_action_update(bwc_rule, rule_actions); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h new file mode 100644 index 000000000000..4fe8c32d8fbe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BWC_H_ +#define MLX5HWS_BWC_H_ + +#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1 +#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1 +#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70 +#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32 +#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255 + +#define MLX5HWS_BWC_MAX_ACTS 16 + +struct mlx5hws_bwc_matcher { + struct mlx5hws_matcher *matcher; + struct mlx5hws_match_template *mt; + struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM]; + u8 num_of_at; + u16 priority; + u8 size_log; + u32 num_of_rules; /* atomically accessed */ + struct list_head *rules; +}; + +struct mlx5hws_bwc_rule { + struct mlx5hws_bwc_matcher *bwc_matcher; + struct mlx5hws_rule *rule; + u16 bwc_queue_idx; + struct list_head list_node; +}; + +int +mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask, + enum mlx5hws_action_type action_types[]); + +int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher); + +struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher); + +void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule); + +int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + struct mlx5hws_rule_action rule_actions[], + u32 flow_source, + u16 bwc_queue_idx); + +int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule); + +void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, + u16 bwc_queue_idx, + u32 flow_source, + struct mlx5hws_rule_attr *rule_attr); + +static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) +{ + /* Besides the control queue, half of the queues are + * reguler HWS queues, and the other half are BWC queues. + */ + return (ctx->queues - 1) / 2; +} + +static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx) +{ + return idx + mlx5hws_bwc_queues(ctx); +} + +#endif /* MLX5HWS_BWC_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c new file mode 100644 index 000000000000..bb563f50ef09 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_definer match_layout = {0}; + struct mlx5hws_match_template *mt; + bool is_complex = false; + int ret; + + if (!match_criteria_enable) + return false; /* empty matcher */ + + mt = mlx5hws_match_template_create(ctx, + mask->match_buf, + mask->match_sz, + match_criteria_enable); + if (!mt) { + mlx5hws_err(ctx, "BWC: failed creating match template\n"); + return false; + } + + ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout); + if (ret) { + /* The only case that we're interested in is E2BIG, + * which means that the match parameters need to be + * split into complex martcher. + * For all other cases (good or bad) - just return true + * and let the usual match creation path handle it, + * both for good and bad flows. + */ + if (ret == E2BIG) { + is_complex = true; + mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n"); + } else { + mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n"); + } + } + + mlx5hws_match_template_destroy(mt); + + return is_complex; +} + +int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n"); + return -EOPNOTSUPP; +} + +void +mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + /* nothing to do here */ +} + +int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx) +{ + mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx, + "Complex rule is not supported yet\n"); + return -EOPNOTSUPP; +} + +int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule) +{ + return 0; +} + +int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_err(bwc_matcher->matcher->tbl->ctx, + "Moving complex rule is not supported yet\n"); + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h new file mode 100644 index 000000000000..068ee8118609 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BWC_COMPLEX_H_ +#define MLX5HWS_BWC_COMPLEX_H_ + +bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher); + +int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher); + +int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx); + +int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule); + +#endif /* MLX5HWS_BWC_COMPLEX_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c new file mode 100644 index 000000000000..2c7b14172049 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c @@ -0,0 +1,1300 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static enum mlx5_ifc_flow_destination_type +hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type) +{ + switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; + case MLX5_FLOW_DESTINATION_TYPE_TIR: + return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: + return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; + case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE: + return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE; + case MLX5_FLOW_DESTINATION_TYPE_NONE: + case MLX5_FLOW_DESTINATION_TYPE_PORT: + case MLX5_FLOW_DESTINATION_TYPE_COUNTER: + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: + case MLX5_FLOW_DESTINATION_TYPE_RANGE: + default: + pr_warn("HWS: unknown flow dest type %d\n", type); + return 0; + } +}; + +static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev, + u32 object_type, + u32 object_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + u32 *table_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; + void *ft_ctx; + int ret; + + MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); + MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); + + ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); + MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); + MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); + MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en); + MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en); + + ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out); + if (ret) + return ret; + + *table_id = MLX5_GET(create_flow_table_out, out, table_id); + + return 0; +} + +int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_modify_attr *ft_attr, + u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; + void *ft_ctx; + + MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); + MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); + MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); + MLX5_SET(modify_flow_table_in, in, table_id, table_id); + + ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); + + MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); + MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); + MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0); + MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1); + + return mlx5_cmd_exec_in(mdev, modify_flow_table, in); +} + +int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev, + u32 table_id, + struct mlx5hws_cmd_ft_query_attr *ft_attr, + u64 *icm_addr_0, u64 *icm_addr_1) +{ + u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0}; + void *ft_ctx; + int ret; + + MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE); + MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type); + MLX5_SET(query_flow_table_in, in, table_id, table_id); + + ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out); + if (ret) + return ret; + + ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context); + *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0); + *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1); + + return ret; +} + +int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, + u8 fw_ft_type, u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; + + MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); + MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type); + MLX5_SET(destroy_flow_table_in, in, table_id, table_id); + + return mlx5_cmd_exec_in(mdev, destroy_flow_table, in); +} + +void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, + u32 table_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id); +} + +static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_fg_attr *fg_attr, + u32 *group_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 *in; + int ret; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); + MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); + + ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out); + if (ret) + goto out; + + *group_id = MLX5_GET(create_flow_group_out, out, group_id); + +out: + kvfree(in); + return ret; +} + +static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev, + u32 ft_id, u32 fg_id, u8 ft_type) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; + + MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, table_type, ft_type); + MLX5_SET(destroy_flow_group_in, in, table_id, ft_id); + MLX5_SET(destroy_flow_group_in, in, group_id, fg_id); + + return mlx5_cmd_exec_in(mdev, destroy_flow_group, in); +} + +int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + struct mlx5hws_cmd_set_fte_attr *fte_attr) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; + void *in_flow_context; + u32 dest_entry_sz; + u32 total_dest_sz; + u32 action_flags; + u8 *in_dests; + u32 inlen; + u32 *in; + int ret; + u32 i; + + dest_entry_sz = fte_attr->extended_dest ? + MLX5_ST_SZ_BYTES(extended_dest_format) : + MLX5_ST_SZ_BYTES(dest_format); + total_dest_sz = dest_entry_sz * fte_attr->dests_num; + inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, table_type, table_type); + MLX5_SET(set_fte_in, in, table_id, table_id); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source); + MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest); + MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level); + + action_flags = fte_attr->action_flags; + MLX5_SET(flow_context, in_flow_context, action, action_flags); + + if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + MLX5_SET(flow_context, in_flow_context, + packet_reformat_id, fte_attr->packet_reformat_id); + } + + if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) { + MLX5_SET(flow_context, in_flow_context, + encrypt_decrypt_type, fte_attr->encrypt_decrypt_type); + MLX5_SET(flow_context, in_flow_context, + encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id); + } + + if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination); + + for (i = 0; i < fte_attr->dests_num; i++) { + struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i]; + enum mlx5_ifc_flow_destination_type ifc_dest_type = + hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type); + + switch (dest->destination_type) { + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) { + MLX5_SET(dest_format, in_dests, + destination_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(dest_format, in_dests, + destination_eswitch_owner_vhca_id, + dest->esw_owner_vhca_id); + } + fallthrough; + case MLX5_FLOW_DESTINATION_TYPE_TIR: + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type); + MLX5_SET(dest_format, in_dests, destination_id, + dest->destination_id); + if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) { + MLX5_SET(dest_format, in_dests, packet_reformat, 1); + MLX5_SET(extended_dest_format, in_dests, packet_reformat_id, + dest->ext_reformat_id); + } + break; + default: + ret = -EOPNOTSUPP; + goto out; + } + + in_dests = in_dests + dest_entry_sz; + } + MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num); + } + + ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n"); + +out: + kfree(in); + return ret; +} + +int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; + + MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + MLX5_SET(delete_fte_in, in, table_type, table_type); + MLX5_SET(delete_fte_in, in, table_id, table_id); + + return mlx5_cmd_exec_in(mdev, delete_fte, in); +} + +struct mlx5hws_cmd_forward_tbl * +mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + struct mlx5hws_cmd_set_fte_attr *fte_attr) +{ + struct mlx5hws_cmd_fg_attr fg_attr = {0}; + struct mlx5hws_cmd_forward_tbl *tbl; + int ret; + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id); + if (ret) { + mlx5_core_err(mdev, "Failed to create FT\n"); + goto free_tbl; + } + + fg_attr.table_id = tbl->ft_id; + fg_attr.table_type = ft_attr->type; + + ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id); + if (ret) { + mlx5_core_err(mdev, "Failed to create FG\n"); + goto free_ft; + } + + ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type, + tbl->ft_id, tbl->fg_id, fte_attr); + if (ret) { + mlx5_core_err(mdev, "Failed to create FTE\n"); + goto free_fg; + } + + tbl->type = ft_attr->type; + return tbl; + +free_fg: + hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type); +free_ft: + mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id); +free_tbl: + kfree(tbl); + return NULL; +} + +void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_forward_tbl *tbl) +{ + mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id); + hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type); + mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id); + kfree(tbl); +} + +void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, + u32 fw_ft_type, + enum mlx5hws_table_type type, + struct mlx5hws_cmd_ft_modify_attr *ft_attr) +{ + u32 default_miss_tbl; + + if (type != MLX5HWS_TABLE_TYPE_FDB) + return; + + ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr->type = fw_ft_type; + ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; + + default_miss_tbl = ctx->common_res[type].default_miss->ft_id; + if (!default_miss_tbl) { + pr_warn("HWS: no flow table ID for default miss\n"); + return; + } + + ft_attr->table_miss_id = default_miss_tbl; +} + +int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + u32 *rtc_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_RTC); + + attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); + MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ? + MLX5_IFC_RTC_STE_FORMAT_11DW : + MLX5_IFC_RTC_STE_FORMAT_8DW); + + if (rtc_attr->is_scnd_range) { + MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE); + MLX5_SET(rtc, attr, num_match_ste, 2); + } + + MLX5_SET(rtc, attr, pd, rtc_attr->pd); + MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe); + MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); + MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); + MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); + MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); + MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); + MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); + MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); + MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0); + MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); + MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); + MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); + MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); + MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); + MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create RTC\n"); + goto out; + } + + *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id); +} + +int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_create_attr *stc_attr, + u32 *stc_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_stc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STC); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, stc_attr->log_obj_range); + + attr = MLX5_ADDR_OF(create_stc_in, in, stc); + MLX5_SET(stc, attr, table_type, stc_attr->table_type); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create STC\n"); + goto out; + } + + *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id); +} + +static int +hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + void *stc_param) +{ + switch (stc_attr->action_type) { + case MLX5_IFC_STC_ACTION_TYPE_COUNTER: + MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: + MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: + MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: + MLX5_SET(stc_ste_param_header_modify_list, stc_param, + header_modify_pattern_id, stc_attr->modify_header.pattern_id); + MLX5_SET(stc_ste_param_header_modify_list, stc_param, + header_modify_argument_id, stc_attr->modify_header.arg_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: + MLX5_SET(stc_ste_param_remove, stc_param, action_type, + MLX5_MODIFICATION_TYPE_REMOVE); + MLX5_SET(stc_ste_param_remove, stc_param, decap, + stc_attr->remove_header.decap); + MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor, + stc_attr->remove_header.start_anchor); + MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor, + stc_attr->remove_header.end_anchor); + break; + case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: + MLX5_SET(stc_ste_param_insert, stc_param, action_type, + MLX5_MODIFICATION_TYPE_INSERT); + MLX5_SET(stc_ste_param_insert, stc_param, encap, + stc_attr->insert_header.encap); + MLX5_SET(stc_ste_param_insert, stc_param, inline_data, + stc_attr->insert_header.is_inline); + MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor, + stc_attr->insert_header.insert_anchor); + /* HW gets the next 2 sizes in words */ + MLX5_SET(stc_ste_param_insert, stc_param, insert_size, + stc_attr->insert_header.header_size / W_SIZE); + MLX5_SET(stc_ste_param_insert, stc_param, insert_offset, + stc_attr->insert_header.insert_offset / W_SIZE); + MLX5_SET(stc_ste_param_insert, stc_param, insert_argument, + stc_attr->insert_header.arg_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_COPY: + case MLX5_IFC_STC_ACTION_TYPE_SET: + case MLX5_IFC_STC_ACTION_TYPE_ADD: + case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: + *(__be64 *)stc_param = stc_attr->modify_action.data; + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: + MLX5_SET(stc_ste_param_vport, stc_param, vport_number, + stc_attr->vport.vport_num); + MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id, + stc_attr->vport.esw_owner_vhca_id); + MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid, + stc_attr->vport.eswitch_owner_vhca_id_valid); + break; + case MLX5_IFC_STC_ACTION_TYPE_DROP: + case MLX5_IFC_STC_ACTION_TYPE_NOP: + case MLX5_IFC_STC_ACTION_TYPE_TAG: + case MLX5_IFC_STC_ACTION_TYPE_ALLOW: + break; + case MLX5_IFC_STC_ACTION_TYPE_ASO: + MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id, + stc_attr->aso.devx_obj_id); + MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id, + stc_attr->aso.return_reg_id); + MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type, + stc_attr->aso.aso_type); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: + MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id, + stc_attr->ste_table.ste_obj_id); + MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id, + stc_attr->ste_table.match_definer_id); + MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size, + stc_attr->ste_table.log_hash_size); + break; + case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: + MLX5_SET(stc_ste_param_remove_words, stc_param, action_type, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS); + MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor, + stc_attr->remove_words.start_anchor); + MLX5_SET(stc_ste_param_remove_words, stc_param, + remove_size, stc_attr->remove_words.num_of_words); + break; + case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION: + MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id, + stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION: + MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id, + stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_TRAILER: + MLX5_SET(stc_ste_param_trailer, stc_param, command, + stc_attr->reformat_trailer.op); + MLX5_SET(stc_ste_param_trailer, stc_param, type, + stc_attr->reformat_trailer.type); + MLX5_SET(stc_ste_param_trailer, stc_param, length, + stc_attr->reformat_trailer.size); + break; + default: + mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type); + return -EINVAL; + } + return 0; +} + +int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev, + u32 stc_id, + struct mlx5hws_cmd_stc_modify_attr *stc_attr) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; + void *stc_param; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_stc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id); + MLX5_SET(general_obj_in_cmd_hdr, in, + op_param.query.obj_offset, stc_attr->stc_offset); + + attr = MLX5_ADDR_OF(create_stc_in, in, stc); + MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); + MLX5_SET(stc, attr, action_type, stc_attr->action_type); + MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode); + MLX5_SET64(stc, attr, modify_field_select, + MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); + + /* Set destination TIRN, TAG, FT ID, STE ID */ + stc_param = MLX5_ADDR_OF(stc, attr, stc_param); + ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param); + if (ret) + return ret; + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n", + stc_attr->action_type); + + return ret; +} + +int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, + u16 log_obj_range, + u32 pd, + u32 *arg_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_arg_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, log_obj_range); + + attr = MLX5_ADDR_OF(create_arg_in, in, arg); + MLX5_SET(arg, attr, access_pd, pd); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create ARG\n"); + goto out; + } + + *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev, + u32 arg_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id); +} + +int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev, + u32 pattern_length, + u8 *actions, + u32 *ptrn_id) +{ + u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + int num_of_actions; + u64 *pattern_data; + void *pattern; + void *attr; + int ret; + int i; + + if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { + mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n", + pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY); + return -EINVAL; + } + + attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN); + + pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); + /* Pattern_length is in ddwords */ + MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); + + pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); + memcpy(pattern_data, actions, pattern_length); + + num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE; + for (i = 0; i < num_of_actions; i++) { + int type; + + type = MLX5_GET(set_action_in, &pattern_data[i], action_type); + if (type != MLX5_MODIFICATION_TYPE_COPY && + type != MLX5_MODIFICATION_TYPE_ADD_FIELD) + /* Action typ-copy use all bytes for control */ + MLX5_SET(set_action_in, &pattern_data[i], data, 0); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create header_modify_pattern\n"); + goto out; + } + + *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev, + u32 ptrn_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id); +} + +int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ste_create_attr *ste_attr, + u32 *ste_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_ste_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STE); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, ste_attr->log_obj_range); + + attr = MLX5_ADDR_OF(create_ste_in, in, ste); + MLX5_SET(ste, attr, table_type, ste_attr->table_type); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create STE\n"); + goto out; + } + + *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id); +} + +int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_definer_create_attr *def_attr, + u32 *definer_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; + void *ptr; + int ret; + + MLX5_SET(general_obj_in_cmd_hdr, + in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER); + + ptr = MLX5_ADDR_OF(create_definer_in, in, definer); + MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); + + MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); + MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); + MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); + MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); + MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); + MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); + MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); + MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); + MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); + + MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); + MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); + MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); + MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); + MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); + MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); + MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); + MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); + + ptr = MLX5_ADDR_OF(definer, ptr, match_mask); + memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create Definer\n"); + goto out; + } + + *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev, + u32 definer_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id); +} + +int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_packet_reformat_create_attr *attr, + u32 *reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; + size_t insz, cmd_data_sz, cmd_total_sz; + void *prctx; + void *pdata; + void *in; + int ret; + + cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); + cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in); + cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data); + insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE); + in = kzalloc(insz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(alloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); + + prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, + packet_reformat_context); + pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); + + MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type); + MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0); + MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz); + memcpy(pdata, attr->data, attr->data_sz); + + ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create packet reformat\n"); + goto out; + } + + *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); +out: + kfree(in); + return ret; +} + +int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev, + u32 reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0}; + int ret; + + MLX5_SET(dealloc_packet_reformat_in, in, opcode, + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_SET(dealloc_packet_reformat_in, in, + packet_reformat_id, reformat_id); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to destroy packet_reformat\n"); + + return ret; +} + +int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn) +{ + u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; + void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + int ret; + + MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); + MLX5_SET(modify_sq_in, in, sqn, sqn); + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to modify SQ\n"); + + return ret; +} + +int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_allow_other_vhca_access_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; + void *key; + int ret; + + MLX5_SET(allow_other_vhca_access_in, + in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); + MLX5_SET(allow_other_vhca_access_in, + in, object_type_to_be_accessed, attr->obj_type); + MLX5_SET(allow_other_vhca_access_in, + in, object_id_to_be_accessed, attr->obj_id); + + key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); + memcpy(key, attr->access_key, sizeof(attr->access_key)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n"); + + return ret; +} + +int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; + void *attr; + void *key; + int ret; + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, alias_attr->obj_type); + MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1); + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); + MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); + MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); + + key = MLX5_ADDR_OF(alias_context, attr, access_key); + memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n"); + goto out; + } + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, + u16 obj_type, + u32 obj_id) +{ + return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id); +} + +int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_generate_wqe_attr *attr, + struct mlx5_cqe64 *ret_cqe) +{ + u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0}; + u8 status; + void *ptr; + int ret; + + MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE); + MLX5_SET(generate_wqe_in, in, pdn, attr->pdn); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl); + memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl)); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl); + memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl)); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0); + memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0)); + + if (attr->gta_data_1) { + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1); + memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1)); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n"); + return ret; + } + + status = MLX5_GET(generate_wqe_out, out, status); + if (status) { + mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status); + return -EINVAL; + } + + ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data); + memcpy(ret_cqe, ptr, sizeof(*ret_cqe)); + + return ret; +} + +int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_query_caps *caps) +{ + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; + u32 out_size; + u32 *out; + int ret; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device caps\n"); + goto out; + } + + caps->wqe_based_update = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.wqe_based_flow_table_update_cap); + + caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.eswitch_manager); + + caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_protocols); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED) + caps->flex_parser_id_geneve_tlv_option_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED) + caps->flex_parser_id_mpls_over_gre = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) + caps->flex_parser_id_mpls_over_udp = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label); + + caps->log_header_modify_argument_granularity = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_granularity); + + caps->log_header_modify_argument_granularity -= + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_granularity_offset); + + caps->log_header_modify_argument_max_alloc = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_max_alloc); + + caps->definer_format_sup = + MLX5_GET64(query_hca_cap_out, out, + capability.cmd_hca_cap.match_definer_format_supported); + + caps->vhca_id = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.vhca_id); + + caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.sq_ts_format); + + caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.ipsec_offload); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device caps 2\n"); + goto out; + } + + caps->full_dw_jumbo_support = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_8_6_ext); + + caps->format_select_gtpu_dw_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0); + + caps->format_select_gtpu_dw_1 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1); + + caps->format_select_gtpu_dw_2 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2); + + caps->format_select_gtpu_ext_dw_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0); + + caps->supp_type_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.generate_wqe_type); + + caps->flow_table_hash_type = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.flow_table_hash_type); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query flow table caps\n"); + goto out; + } + + caps->nic_ft.max_level = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level); + + caps->nic_ft.reparse = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse); + + caps->nic_ft.ignore_flow_level_rtc_valid = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); + + caps->flex_parser_ok_bits_supp = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist); + + if (caps->wqe_based_update) { + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query WQE based FT caps\n"); + goto out; + } + + caps->rtc_reparse_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_reparse_mode); + + caps->ste_format = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_format); + + caps->rtc_index_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_index_mode); + + caps->rtc_log_depth_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_log_depth_max); + + caps->ste_alloc_log_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_alloc_log_max); + + caps->ste_alloc_log_gran = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_alloc_log_granularity); + + caps->trivial_match_definer = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.trivial_match_definer); + + caps->stc_alloc_log_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.stc_alloc_log_max); + + caps->stc_alloc_log_gran = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.stc_alloc_log_granularity); + + caps->rtc_hash_split_table = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_hash_split_table); + + caps->rtc_linear_lookup_table = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_linear_lookup_table); + + caps->access_index_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.access_index_mode); + + caps->linear_match_definer = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3); + + caps->rtc_max_hash_def_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe); + + caps->supp_ste_format_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_format_gen_wqe); + + caps->fdb_tir_stc = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc); + } + + if (caps->eswitch_manager) { + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query flow table esw caps\n"); + goto out; + } + + caps->fdb_ft.max_level = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level); + + caps->fdb_ft.reparse = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query eswitch capabilities\n"); + goto out; + } + + if (MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.esw_manager_vport_number_valid)) + caps->eswitch_manager_vport_number = + MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.esw_manager_vport_number); + + caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.merged_eswitch); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device attributes\n"); + goto out; + } + + snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + + caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev); + +out: + kfree(out); + return ret; +} + +int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, + u16 vport_number, u16 *gvmi) +{ + bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false; + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; + int out_size; + void *out; + int err; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, other_function, other_function); + MLX5_SET(query_hca_cap_in, in, function_id, + mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func)); + MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR); + + err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out); + if (err) { + kfree(out); + return err; + } + + *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id); + + kfree(out); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h new file mode 100644 index 000000000000..2fbcf4ff571a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_CMD_H_ +#define MLX5HWS_CMD_H_ + +#define WIRE_PORT 0xFFFF + +#define ACCESS_KEY_LEN 32 + +enum mlx5hws_cmd_ext_dest_flags { + MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0, + MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1, +}; + +struct mlx5hws_cmd_set_fte_dest { + u8 destination_type; + u32 destination_id; + enum mlx5hws_cmd_ext_dest_flags ext_flags; + u32 ext_reformat_id; + u16 esw_owner_vhca_id; +}; + +struct mlx5hws_cmd_set_fte_attr { + u32 action_flags; + bool ignore_flow_level; + u8 flow_source; + u8 extended_dest; + u8 encrypt_decrypt_type; + u32 encrypt_decrypt_obj_id; + u32 packet_reformat_id; + u32 dests_num; + struct mlx5hws_cmd_set_fte_dest *dests; +}; + +struct mlx5hws_cmd_ft_create_attr { + u8 type; + u8 level; + bool rtc_valid; + bool decap_en; + bool reformat_en; +}; + +struct mlx5hws_cmd_ft_modify_attr { + u8 type; + u32 rtc_id_0; + u32 rtc_id_1; + u32 table_miss_id; + u8 table_miss_action; + u64 modify_fs; +}; + +struct mlx5hws_cmd_ft_query_attr { + u8 type; +}; + +struct mlx5hws_cmd_fg_attr { + u32 table_id; + u32 table_type; +}; + +struct mlx5hws_cmd_forward_tbl { + u8 type; + u32 ft_id; + u32 fg_id; + u32 refcount; +}; + +struct mlx5hws_cmd_rtc_create_attr { + u32 pd; + u32 stc_base; + u32 ste_base; + u32 ste_offset; + u32 miss_ft_id; + bool fw_gen_wqe; + u8 update_index_mode; + u8 access_index_mode; + u8 num_hash_definer; + u8 log_depth; + u8 log_size; + u8 table_type; + u8 match_definer_0; + u8 match_definer_1; + u8 reparse_mode; + bool is_frst_jumbo; + bool is_scnd_range; +}; + +struct mlx5hws_cmd_alias_obj_create_attr { + u32 obj_id; + u16 vhca_id; + u16 obj_type; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5hws_cmd_stc_create_attr { + u8 log_obj_range; + u8 table_type; +}; + +struct mlx5hws_cmd_stc_modify_attr { + u32 stc_offset; + u8 action_offset; + u8 reparse_mode; + enum mlx5_ifc_stc_action_type action_type; + union { + u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */ + struct { + u8 decap; + u16 start_anchor; + u16 end_anchor; + } remove_header; + struct { + u32 arg_id; + u32 pattern_id; + } modify_header; + struct { + __be64 data; + } modify_action; + struct { + u32 arg_id; + u32 header_size; + u8 is_inline; + u8 encap; + u16 insert_anchor; + u16 insert_offset; + } insert_header; + struct { + u8 aso_type; + u32 devx_obj_id; + u8 return_reg_id; + } aso; + struct { + u16 vport_num; + u16 esw_owner_vhca_id; + u8 eswitch_owner_vhca_id_valid; + } vport; + struct { + struct mlx5hws_pool_chunk ste; + struct mlx5hws_pool *ste_pool; + u32 ste_obj_id; /* Internal */ + u32 match_definer_id; + u8 log_hash_size; + bool ignore_tx; + } ste_table; + struct { + u16 start_anchor; + u16 num_of_words; + } remove_words; + struct { + u8 type; + u8 op; + u8 size; + } reformat_trailer; + + u32 dest_table_id; + u32 dest_tir_num; + }; +}; + +struct mlx5hws_cmd_ste_create_attr { + u8 log_obj_range; + u8 table_type; +}; + +struct mlx5hws_cmd_definer_create_attr { + u8 *dw_selector; + u8 *byte_selector; + u8 *match_mask; +}; + +struct mlx5hws_cmd_allow_other_vhca_access_attr { + u16 obj_type; + u32 obj_id; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5hws_cmd_packet_reformat_create_attr { + u8 type; + size_t data_sz; + void *data; + u8 reformat_param_0; +}; + +struct mlx5hws_cmd_query_ft_caps { + u8 max_level; + u8 reparse; + u8 ignore_flow_level_rtc_valid; +}; + +struct mlx5hws_cmd_generate_wqe_attr { + u8 *wqe_ctrl; + u8 *gta_ctrl; + u8 *gta_data_0; + u8 *gta_data_1; + u32 pdn; +}; + +struct mlx5hws_cmd_query_caps { + u32 flex_protocols; + u8 wqe_based_update; + u8 rtc_reparse_mode; + u16 ste_format; + u8 rtc_index_mode; + u8 ste_alloc_log_max; + u8 ste_alloc_log_gran; + u8 stc_alloc_log_max; + u8 stc_alloc_log_gran; + u8 rtc_log_depth_max; + u8 format_select_gtpu_dw_0; + u8 format_select_gtpu_dw_1; + u8 flow_table_hash_type; + u8 format_select_gtpu_dw_2; + u8 format_select_gtpu_ext_dw_0; + u8 access_index_mode; + u32 linear_match_definer; + bool full_dw_jumbo_support; + bool rtc_hash_split_table; + bool rtc_linear_lookup_table; + u32 supp_type_gen_wqe; + u8 rtc_max_hash_def_gen_wqe; + u16 supp_ste_format_gen_wqe; + struct mlx5hws_cmd_query_ft_caps nic_ft; + struct mlx5hws_cmd_query_ft_caps fdb_ft; + bool eswitch_manager; + bool merged_eswitch; + u32 eswitch_manager_vport_number; + u8 log_header_modify_argument_granularity; + u8 log_header_modify_argument_max_alloc; + u8 sq_ts_format; + u8 fdb_tir_stc; + u64 definer_format_sup; + u32 trivial_match_definer; + u32 vhca_id; + u32 shared_vhca_id; + char fw_ver[64]; + bool ipsec_offload; + bool is_ecpf; + u8 flex_parser_ok_bits_supp; + u8 flex_parser_id_geneve_tlv_option_0; + u8 flex_parser_id_mpls_over_gre; + u8 flex_parser_id_mpls_over_udp; +}; + +int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + u32 *table_id); + +int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_modify_attr *ft_attr, + u32 table_id); + +int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev, + u32 obj_id, + struct mlx5hws_cmd_ft_query_attr *ft_attr, + u64 *icm_addr_0, u64 *icm_addr_1); + +int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, + u8 fw_ft_type, u32 table_id); + +void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, + u32 table_id); + +int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + u32 *rtc_id); + +void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id); + +int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_create_attr *stc_attr, + u32 *stc_id); + +int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev, + u32 stc_id, + struct mlx5hws_cmd_stc_modify_attr *stc_attr); + +void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id); + +int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_generate_wqe_attr *attr, + struct mlx5_cqe64 *ret_cqe); + +int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ste_create_attr *ste_attr, + u32 *ste_id); + +void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id); + +int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_definer_create_attr *def_attr, + u32 *definer_id); + +void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev, + u32 definer_id); + +int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, + u16 log_obj_range, + u32 pd, + u32 *arg_id); + +void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev, + u32 arg_id); + +int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev, + u32 pattern_length, + u8 *actions, + u32 *ptrn_id); + +void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev, + u32 ptrn_id); + +int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_packet_reformat_create_attr *attr, + u32 *reformat_id); + +int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev, + u32 reformat_id); + +int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + struct mlx5hws_cmd_set_fte_attr *fte_attr); + +int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev, + u32 table_type, u32 table_id); + +struct mlx5hws_cmd_forward_tbl * +mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + struct mlx5hws_cmd_set_fte_attr *fte_attr); + +void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_forward_tbl *tbl); + +int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id); + +int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, + u16 obj_type, + u32 obj_id); + +int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn); + +int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_query_caps *caps); + +void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, + u32 fw_ft_type, + enum mlx5hws_table_type type, + struct mlx5hws_cmd_ft_modify_attr *ft_attr); + +int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_allow_other_vhca_access_attr *attr); + +int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, + u16 vport_number, u16 *gvmi); + +#endif /* MLX5HWS_CMD_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c new file mode 100644 index 000000000000..00e4fdf4a558 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */ + +#include "mlx5hws_internal.h" + +bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx) +{ + return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC); +} + +u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx) +{ + /* Prefer to use dynamic reparse, reparse only specific actions */ + if (mlx5hws_context_cap_dynamic_reparse(ctx)) + return MLX5_IFC_RTC_REPARSE_NEVER; + + /* Otherwise use less efficient static */ + return MLX5_IFC_RTC_REPARSE_ALWAYS; +} + +static int hws_context_pools_init(struct mlx5hws_context *ctx) +{ + struct mlx5hws_pool_attr pool_attr = {0}; + u8 max_log_sz; + int ret; + int i; + + ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache); + if (ret) + return ret; + + ret = mlx5hws_definer_init_cache(&ctx->definer_cache); + if (ret) + goto uninit_pat_cache; + + /* Create an STC pool per FT type */ + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL; + max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); + pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran); + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + pool_attr.table_type = i; + ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr); + if (!ctx->stc_pool[i]) { + mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i); + ret = -ENOMEM; + goto free_stc_pools; + } + } + + return 0; + +free_stc_pools: + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) + if (ctx->stc_pool[i]) + mlx5hws_pool_destroy(ctx->stc_pool[i]); + + mlx5hws_definer_uninit_cache(ctx->definer_cache); +uninit_pat_cache: + mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); + return ret; +} + +static void hws_context_pools_uninit(struct mlx5hws_context *ctx) +{ + int i; + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + if (ctx->stc_pool[i]) + mlx5hws_pool_destroy(ctx->stc_pool[i]); + } + + mlx5hws_definer_uninit_cache(ctx->definer_cache); + mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); +} + +static int hws_context_init_pd(struct mlx5hws_context *ctx) +{ + int ret = 0; + + ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate PD\n"); + return ret; + } + + ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD; + + return 0; +} + +static int hws_context_uninit_pd(struct mlx5hws_context *ctx) +{ + if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD) + mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num); + + return 0; +} + +static void hws_context_check_hws_supp(struct mlx5hws_context *ctx) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + /* HWS not supported on device / FW */ + if (!caps->wqe_based_update) { + mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n"); + return; + } + + if (!caps->eswitch_manager) { + mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n"); + return; + } + + /* Current solution requires all rules to set reparse bit */ + if ((!caps->nic_ft.reparse || + (!caps->fdb_ft.reparse && caps->eswitch_manager)) || + !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) { + mlx5hws_err(ctx, "Required HWS reparse cap not supported\n"); + return; + } + + /* FW/HW must support 8DW STE */ + if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) { + mlx5hws_err(ctx, "Required HWS STE format not supported\n"); + return; + } + + /* Adding rules by hash and by offset are requirements */ + if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) || + !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) { + mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n"); + return; + } + + /* Support for SELECT definer ID is required */ + if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) { + mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n"); + return; + } + + ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT; +} + +static int hws_context_init_hws(struct mlx5hws_context *ctx, + struct mlx5hws_context_attr *attr) +{ + int ret; + + hws_context_check_hws_supp(ctx); + + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) + return 0; + + ret = hws_context_init_pd(ctx); + if (ret) + return ret; + + ret = hws_context_pools_init(ctx); + if (ret) + goto uninit_pd; + + if (attr->bwc) + ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; + + ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size); + if (ret) + goto pools_uninit; + + INIT_LIST_HEAD(&ctx->tbl_list); + + return 0; + +pools_uninit: + hws_context_pools_uninit(ctx); +uninit_pd: + hws_context_uninit_pd(ctx); + return ret; +} + +static void hws_context_uninit_hws(struct mlx5hws_context *ctx) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) + return; + + mlx5hws_send_queues_close(ctx); + hws_context_pools_uninit(ctx); + hws_context_uninit_pd(ctx); +} + +struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev, + struct mlx5hws_context_attr *attr) +{ + struct mlx5hws_context *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->mdev = mdev; + + mutex_init(&ctx->ctrl_lock); + xa_init(&ctx->peer_ctx_xa); + + ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL); + if (!ctx->caps) + goto free_ctx; + + ret = mlx5hws_cmd_query_caps(mdev, ctx->caps); + if (ret) + goto free_caps; + + ret = mlx5hws_vport_init_vports(ctx); + if (ret) + goto free_caps; + + ret = hws_context_init_hws(ctx, attr); + if (ret) + goto uninit_vports; + + mlx5hws_debug_init_dump(ctx); + + return ctx; + +uninit_vports: + mlx5hws_vport_uninit_vports(ctx); +free_caps: + kfree(ctx->caps); +free_ctx: + xa_destroy(&ctx->peer_ctx_xa); + mutex_destroy(&ctx->ctrl_lock); + kfree(ctx); + return NULL; +} + +int mlx5hws_context_close(struct mlx5hws_context *ctx) +{ + mlx5hws_debug_uninit_dump(ctx); + hws_context_uninit_hws(ctx); + mlx5hws_vport_uninit_vports(ctx); + kfree(ctx->caps); + xa_destroy(&ctx->peer_ctx_xa); + mutex_destroy(&ctx->ctrl_lock); + kfree(ctx); + return 0; +} + +void mlx5hws_context_set_peer(struct mlx5hws_context *ctx, + struct mlx5hws_context *peer_ctx, + u16 peer_vhca_id) +{ + mutex_lock(&ctx->ctrl_lock); + + if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL))) + pr_warn("HWS: failed storing peer vhca ID in peer xarray\n"); + + mutex_unlock(&ctx->ctrl_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h new file mode 100644 index 000000000000..e5a7ce604334 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_CONTEXT_H_ +#define MLX5HWS_CONTEXT_H_ + +enum mlx5hws_context_flags { + MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0, + MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1, + MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2, +}; + +enum mlx5hws_context_shared_stc_type { + MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0, + MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1, + MLX5HWS_CONTEXT_SHARED_STC_MAX = 2, +}; + +struct mlx5hws_context_common_res { + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX]; + struct mlx5hws_cmd_forward_tbl *default_miss; +}; + +struct mlx5hws_context_debug_info { + struct dentry *steering_debugfs; + struct dentry *fdb_debugfs; +}; + +struct mlx5hws_context_vports { + u16 esw_manager_gvmi; + u16 uplink_gvmi; + struct xarray vport_gvmi_xa; +}; + +struct mlx5hws_context { + struct mlx5_core_dev *mdev; + struct mlx5hws_cmd_query_caps *caps; + u32 pd_num; + struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_pattern_cache *pattern_cache; + struct mlx5hws_definer_cache *definer_cache; + struct mutex ctrl_lock; /* control lock to protect the whole context */ + enum mlx5hws_context_flags flags; + struct mlx5hws_send_engine *send_queue; + size_t queues; + struct mutex *bwc_send_queue_locks; /* protect BWC queues */ + struct list_head tbl_list; + struct mlx5hws_context_debug_info debug_info; + struct xarray peer_ctx_xa; + struct mlx5hws_context_vports vports; +}; + +static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx) +{ + return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; +} + +bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx); + +u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx); + +#endif /* MLX5HWS_CONTEXT_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c new file mode 100644 index 000000000000..2b8c5a4e1c4c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/seq_file.h> +#include <linux/version.h> +#include "mlx5hws_internal.h" + +static int +hws_debug_dump_matcher_template_definer(struct seq_file *f, + void *parent_obj, + struct mlx5hws_definer *definer, + enum mlx5hws_debug_res_type type) +{ + int i; + + if (!definer) + return 0; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,", + type, + HWS_PTR_TO_ID(definer), + HWS_PTR_TO_ID(parent_obj), + definer->obj_id, + definer->type); + + for (i = 0; i < DW_SELECTORS; i++) + seq_printf(f, "0x%x%s", definer->dw_selector[i], + (i == DW_SELECTORS - 1) ? "," : "-"); + + for (i = 0; i < BYTE_SELECTORS; i++) + seq_printf(f, "0x%x%s", definer->byte_selector[i], + (i == BYTE_SELECTORS - 1) ? "," : "-"); + + for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++) + seq_printf(f, "%02x", definer->mask.jumbo[i]); + + seq_puts(f, "\n"); + + return 0; +} + +static int +hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_debug_res_type type; + int i, ret; + + for (i = 0; i < matcher->num_of_mt; i++) { + struct mlx5hws_match_template *mt = &matcher->mt[i]; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE, + HWS_PTR_TO_ID(mt), + HWS_PTR_TO_ID(matcher), + mt->fc_sz, + 0, 0); + + type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER; + ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_action_type action_type; + int i, j; + + for (i = 0; i < matcher->num_of_at; i++) { + struct mlx5hws_action_template *at = &matcher->at[i]; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE, + HWS_PTR_TO_ID(at), + HWS_PTR_TO_ID(matcher), + at->only_term, + at->num_of_action_stes, + at->num_actions); + + for (j = 0; j < at->num_actions; j++) { + action_type = at->action_type_arr[j]; + seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type)); + } + + seq_puts(f, "\n"); + } + + return 0; +} + +static int +hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR, + HWS_PTR_TO_ID(matcher), + attr->priority, + attr->mode, + attr->table.sz_row_log, + attr->table.sz_col_log, + attr->optimize_using_rule_idx, + attr->optimize_flow_src, + attr->insert_mode, + attr->distribute_mode); + + return 0; +} + +static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_table_type tbl_type = matcher->tbl->type; + struct mlx5hws_cmd_ft_query_attr ft_attr = {0}; + struct mlx5hws_pool_chunk *ste; + struct mlx5hws_pool *ste_pool; + u64 icm_addr_0 = 0; + u64 icm_addr_1 = 0; + u32 ste_0_id = -1; + u32 ste_1_id = -1; + int ret; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx", + MLX5HWS_DEBUG_RES_TYPE_MATCHER, + HWS_PTR_TO_ID(matcher), + HWS_PTR_TO_ID(matcher->tbl), + matcher->num_of_mt, + matcher->end_ft_id, + matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0); + + ste = &matcher->match_ste.ste; + ste_pool = matcher->match_ste.pool; + if (ste_pool) { + ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + } + + seq_printf(f, ",%d,%d,%d,%d", + matcher->match_ste.rtc_0_id, + (int)ste_0_id, + matcher->match_ste.rtc_1_id, + (int)ste_1_id); + + ste = &matcher->action_ste[0].ste; + ste_pool = matcher->action_ste[0].pool; + if (ste_pool) { + ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + else + ste_1_id = -1; + } else { + ste_0_id = -1; + ste_1_id = -1; + } + + ft_attr.type = matcher->tbl->fw_ft_type; + ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev, + matcher->end_ft_id, + &ft_attr, + &icm_addr_0, + &icm_addr_1); + if (ret) + return ret; + + seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n", + matcher->action_ste[0].rtc_0_id, + (int)ste_0_id, + matcher->action_ste[0].rtc_1_id, + (int)ste_1_id, + 0, + mlx5hws_debug_icm_to_idx(icm_addr_0), + mlx5hws_debug_icm_to_idx(icm_addr_1)); + + ret = hws_debug_dump_matcher_attr(f, matcher); + if (ret) + return ret; + + ret = hws_debug_dump_matcher_match_template(f, matcher); + if (ret) + return ret; + + ret = hws_debug_dump_matcher_action_template(f, matcher); + if (ret) + return ret; + + return 0; +} + +static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_ft_query_attr ft_attr = {0}; + struct mlx5hws_matcher *matcher; + u64 local_icm_addr_0 = 0; + u64 local_icm_addr_1 = 0; + u64 icm_addr_0 = 0; + u64 icm_addr_1 = 0; + int ret; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d", + MLX5HWS_DEBUG_RES_TYPE_TABLE, + HWS_PTR_TO_ID(tbl), + HWS_PTR_TO_ID(tbl->ctx), + tbl->ft_id, + MLX5HWS_TABLE_TYPE_BASE + tbl->type, + tbl->fw_ft_type, + tbl->level, + 0); + + ft_attr.type = tbl->fw_ft_type; + ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev, + tbl->ft_id, + &ft_attr, + &icm_addr_0, + &icm_addr_1); + if (ret) + return ret; + + seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n", + mlx5hws_debug_icm_to_idx(icm_addr_0), + mlx5hws_debug_icm_to_idx(icm_addr_1), + mlx5hws_debug_icm_to_idx(local_icm_addr_0), + mlx5hws_debug_icm_to_idx(local_icm_addr_1), + HWS_PTR_TO_ID(tbl->default_miss.miss_tbl)); + + list_for_each_entry(matcher, &tbl->matchers_list, list_node) { + ret = hws_debug_dump_matcher(f, matcher); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_send_engine *send_queue; + struct mlx5hws_send_ring *send_ring; + struct mlx5hws_send_ring_cq *cq; + struct mlx5hws_send_ring_sq *sq; + int i; + + for (i = 0; i < (int)ctx->queues; i++) { + send_queue = &ctx->send_queue[i]; + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE, + HWS_PTR_TO_ID(ctx), + i, + send_queue->used_entries, + send_queue->num_entries, + 1, /* one send ring per queue */ + send_queue->num_entries, + send_queue->err, + send_queue->completed.ci, + send_queue->completed.pi, + send_queue->completed.mask); + + send_ring = &send_queue->send_ring; + cq = &send_ring->send_cq; + sq = &send_ring->send_sq; + + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING, + HWS_PTR_TO_ID(ctx), + 0, /* one send ring per send queue */ + i, + cq->mcq.cqn, + 0, + 0, + 0, + 0, + 0, + 0, + cq->mcq.cqe_sz, + sq->sqn, + 0, + 0, + 0); + } + + return 0; +} + +static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS, + HWS_PTR_TO_ID(ctx), + caps->fw_ver, + caps->wqe_based_update, + caps->ste_format, + caps->ste_alloc_log_max, + caps->log_header_modify_argument_max_alloc); + + seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n", + caps->flex_protocols, + caps->rtc_reparse_mode, + caps->rtc_index_mode, + caps->ste_alloc_log_gran, + caps->stc_alloc_log_max, + caps->stc_alloc_log_gran, + caps->rtc_log_depth_max, + caps->format_select_gtpu_dw_0, + caps->format_select_gtpu_dw_1, + caps->format_select_gtpu_dw_2, + caps->format_select_gtpu_ext_dw_0, + caps->nic_ft.max_level, + caps->nic_ft.reparse, + caps->fdb_ft.max_level, + caps->fdb_ft.reparse, + caps->log_header_modify_argument_granularity, + caps->linear_match_definer, + "regc_3"); + + return 0; +} + +static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx) +{ + seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR, + HWS_PTR_TO_ID(ctx), + ctx->pd_num, + ctx->queues, + ctx->send_queue->num_entries, + "None", /* no shared gvmi */ + ctx->caps->vhca_id, + 0xffff); /* no shared gvmi */ + + return 0; +} + +static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5_core_dev *dev = ctx->mdev; + int ret; + + seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT, + HWS_PTR_TO_ID(ctx), + ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT, + pci_name(dev->pdev), + HWS_DEBUG_FORMAT_VERSION, + LINUX_VERSION_MAJOR, + LINUX_VERSION_PATCHLEVEL, + LINUX_VERSION_SUBLEVEL); + + ret = hws_debug_dump_context_attr(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_caps(f, ctx); + if (ret) + return ret; + + return 0; +} + +static int hws_debug_dump_context_stc_resource(struct seq_file *f, + struct mlx5hws_context *ctx, + u32 tbl_type, + struct mlx5hws_pool_resource *resource) +{ + seq_printf(f, "%d,0x%llx,%u,%u\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC, + HWS_PTR_TO_ID(ctx), + tbl_type, + resource->base_id); + + return 0; +} + +static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_pool *stc_pool; + u32 table_type; + int ret; + int i; + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + stc_pool = ctx->stc_pool[i]; + table_type = MLX5HWS_TABLE_TYPE_BASE + i; + + if (!stc_pool) + continue; + + if (stc_pool->resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, + stc_pool->resource[0]); + if (ret) + return ret; + } + + if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, + stc_pool->mirror_resource[0]); + if (ret) + return ret; + } + } + + return 0; +} + +static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_table *tbl; + int ret; + + ret = hws_debug_dump_context_info(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_send_engine(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_stc(f, ctx); + if (ret) + return ret; + + list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) { + ret = hws_debug_dump_table(f, tbl); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx) +{ + int ret; + + if (!f || !ctx) + return -EINVAL; + + mutex_lock(&ctx->ctrl_lock); + ret = hws_debug_dump_context(f, ctx); + mutex_unlock(&ctx->ctrl_lock); + + return ret; +} + +static int hws_dump_show(struct seq_file *file, void *priv) +{ + return hws_debug_dump(file, file->private); +} +DEFINE_SHOW_ATTRIBUTE(hws_dump); + +void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx) +{ + struct mlx5_core_dev *dev = ctx->mdev; + char file_name[128]; + + ctx->debug_info.steering_debugfs = + debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev)); + ctx->debug_info.fdb_debugfs = + debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs); + + sprintf(file_name, "ctx_%p", ctx); + debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs, + ctx, &hws_dump_fops); +} + +void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx) +{ + debugfs_remove_recursive(ctx->debug_info.steering_debugfs); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h new file mode 100644 index 000000000000..b93a536035d9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_DEBUG_H_ +#define MLX5HWS_DEBUG_H_ + +#define HWS_DEBUG_FORMAT_VERSION "1.0" + +#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL) + +enum mlx5hws_debug_res_type { + MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005, + + MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100, + + MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207, +}; + +static inline u64 +mlx5hws_debug_icm_to_idx(u64 icm_addr) +{ + return (icm_addr >> 6) & 0xffffffff; +} + +void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx); +void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx); + +#endif /* MLX5HWS_DEBUG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c new file mode 100644 index 000000000000..3bdb5c90efff --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c @@ -0,0 +1,2146 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN BIT(12) +#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13) +#define MLX5_FLOW_LAYER_GRE BIT(14) +#define MLX5_FLOW_LAYER_MPLS BIT(15) + +/* Pattern tunnel Layer bits (continued). */ +#define MLX5_FLOW_LAYER_IPIP BIT(23) +#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24) +#define MLX5_FLOW_LAYER_NVGRE BIT(25) +#define MLX5_FLOW_LAYER_GENEVE BIT(26) + +#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ + MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ + MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \ + MLX5_FLOW_ITEM_FLEX_TUNNEL) + +#define GTP_PDU_SC 0x85 +#define BAD_PORT 0xBAD +#define ETH_TYPE_IPV4_VXLAN 0x0800 +#define ETH_TYPE_IPV6_VXLAN 0x86DD +#define UDP_GTPU_PORT 2152 +#define UDP_PORT_MPLS 6635 +#define UDP_GENEVE_PORT 6081 +#define UDP_ROCEV2_PORT 4791 +#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS) + +#define STE_NO_VLAN 0x0 +#define STE_SVLAN 0x1 +#define STE_CVLAN 0x2 +#define STE_NO_L3 0x0 +#define STE_IPV4 0x1 +#define STE_IPV6 0x2 +#define STE_NO_L4 0x0 +#define STE_TCP 0x1 +#define STE_UDP 0x2 +#define STE_ICMP 0x3 +#define STE_ESP 0x3 + +#define IPV4 0x4 +#define IPV6 0x6 + +/* Setter function based on bit offset and mask, for 32bit DW */ +#define _HWS_SET32(p, v, byte_off, bit_off, mask) \ + do { \ + u32 _v = v; \ + *((__be32 *)(p) + ((byte_off) / 4)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \ + ((byte_off) / 4))) & \ + (~((mask) << (bit_off)))) | \ + (((_v) & (mask)) << \ + (bit_off))); \ + } while (0) + +/* Setter function based on bit offset and mask, for unaligned 32bit DW */ +#define HWS_SET32(p, v, byte_off, bit_off, mask) \ + do { \ + if (unlikely((bit_off) < 0)) { \ + u32 _bit_off = -1 * (bit_off); \ + u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \ + _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \ + _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \ + (bit_off) % BITS_IN_DW, second_dw_mask); \ + } else { \ + _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \ + } \ + } while (0) + +/* Getter for up to aligned 32bit DW */ +#define HWS_GET32(p, byte_off, bit_off, mask) \ + ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) + +#define HWS_CALC_FNAME(field, inner) \ + ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \ + MLX5HWS_DEFINER_FNAME_##field##_O) + +#define HWS_GET_MATCH_PARAM(match_param, hdr) \ + MLX5_GET(fte_match_param, match_param, hdr) + +#define HWS_IS_FLD_SET(match_param, hdr) \ + (!!(HWS_GET_MATCH_PARAM(match_param, hdr))) + +#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \ + BUILD_BUG_ON((sz_in_bits) % 32); \ + u32 sz = sz_in_bits; \ + u32 res = 0; \ + u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \ + while (!res && sz >= 32) { \ + res = *((match_param) + (dw_off++)); \ + sz -= 32; \ + } \ + res; \ + }) + +#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \ + (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \ + !!(HWS_GET_MATCH_PARAM(match_param, hdr))) + +#define HWS_GET64_MATCH_PARAM(match_param, hdr) \ + MLX5_GET64(fte_match_param, match_param, hdr) + +#define HWS_IS_FLD64_SET(match_param, hdr) \ + (!!(HWS_GET64_MATCH_PARAM(match_param, hdr))) + +#define HWS_CALC_HDR_SRC(fc, s_hdr) \ + do { \ + (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \ + (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \ + (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \ + } while (0) + +#define HWS_CALC_HDR_DST(fc, d_hdr) \ + do { \ + (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \ + (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \ + (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \ + } while (0) + +#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \ + do { \ + HWS_CALC_HDR_SRC(fc, s_hdr); \ + HWS_CALC_HDR_DST(fc, d_hdr); \ + (fc)->tag_set = &hws_definer_generic_set; \ + } while (0) + +#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \ + do { \ + if (HWS_IS_FLD_SET(match_param, s_hdr)) \ + HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \ + } while (0) + +struct mlx5hws_definer_sel_ctrl { + u8 allowed_full_dw; /* Full DW selectors cover all offsets */ + u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */ + u8 allowed_bytes; /* Bytes selectors, up to offset 255 */ + u8 used_full_dw; + u8 used_lim_dw; + u8 used_bytes; + u8 full_dw_selector[DW_SELECTORS]; + u8 lim_dw_selector[DW_SELECTORS_LIMITED]; + u8 byte_selector[BYTE_SELECTORS]; +}; + +struct mlx5hws_definer_conv_data { + struct mlx5hws_context *ctx; + struct mlx5hws_definer_fc *fc; + /* enum mlx5hws_definer_match_flag */ + u32 match_flags; +}; + +static void +hws_definer_ones_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_generic_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + /* Can be optimized */ + u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask); + + HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag)) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag)) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag, + bool inner) +{ + u32 second_cvlan_tag = inner ? + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) : + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag); + u32 second_svlan_tag = inner ? + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) : + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag); + + if (second_cvlan_tag) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (second_svlan_tag) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_second_vlan_type_set(fc, match_param, tag, true); +} + +static void +hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_second_vlan_type_set(fc, match_param, tag, false); +} + +static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code); + u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type); + u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) | + (code << __mlx5_dw_bit_off(header_icmp, code)); + + HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code); + u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type); + u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) | + (code << __mlx5_dw_bit_off(header_icmp, code)); + + HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask); + + if (val == IPV4) + HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (val == IPV6) + HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag, + struct mlx5hws_context *peer_ctx) +{ + u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port); + u16 vport_gvmi = 0; + int ret; + + ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi); + if (ret) { + HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask); + mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port); + return; + } + + if (vport_gvmi) + HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +__must_hold(&fc->ctx->ctrl_lock) +{ + int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id); + struct mlx5hws_context *peer_ctx; + + if (id == fc->ctx->caps->vhca_id) + peer_ctx = fc->ctx; + else + peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id); + + if (!peer_ctx) { + HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask); + mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id); + return; + } + + hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx); +} + +static void +hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx); +} + +static struct mlx5hws_definer_fc * +hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd, + u8 parser_id) +{ + struct mlx5hws_definer_fc *fc; + + switch (parser_id) { + case 0: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 1: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 2: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 3: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 4: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 5: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 6: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 7: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + default: + mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id); + return NULL; + } + + return fc; +} + +static struct mlx5hws_definer_fc * +hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd, + u8 parser_id) +{ + struct mlx5hws_definer_fc *fc; + + switch (parser_id) { + case 0: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0); + fc->tag_set = &hws_definer_generic_set; + break; + case 1: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1); + fc->tag_set = &hws_definer_generic_set; + break; + case 2: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2); + fc->tag_set = &hws_definer_generic_set; + break; + case 3: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3); + fc->tag_set = &hws_definer_generic_set; + break; + case 4: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4); + fc->tag_set = &hws_definer_generic_set; + break; + case 5: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5); + fc->tag_set = &hws_definer_generic_set; + break; + case 6: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6); + fc->tag_set = &hws_definer_generic_set; + break; + case 7: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7); + fc->tag_set = &hws_definer_generic_set; + break; + default: + mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id); + return NULL; + } + + return fc; +} + +static struct mlx5hws_definer_fc * +hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd, + bool *parser_is_used, + u32 id, + u32 value) +{ + if (id || value) { + if (id >= HWS_NUM_OF_FLEX_PARSERS) { + mlx5hws_err(cd->ctx, "Unsupported parser id\n"); + return NULL; + } + + if (parser_is_used[id]) { + mlx5hws_err(cd->ctx, "Parser id have been used\n"); + return NULL; + } + } + + parser_is_used[id] = true; + + return hws_definer_flex_parser_handler(cd, id); +} + +static int +hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd) +{ + u32 flags; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1); + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2); + + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP); + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 | + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 | + MLX5HWS_DEFINER_MATCH_FLAG_TCP_O | + MLX5HWS_DEFINER_MATCH_FLAG_TCP_I); + if (flags & (flags - 1)) + goto err_conflict; + + return 0; + +err_conflict: + mlx5hws_err(cd->ctx, "Invalid definer fields combination\n"); + return -EINVAL; +} + +static int +hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + u32 *s_ipv6, *d_ipv6; + + if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) || + HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) || + HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) { + mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n"); + return -EINVAL; + } + + /* L2 Check ethertype */ + HWS_SET_HDR(fc, match_param, ETH_TYPE_O, + outer_headers.ethertype, + eth_l2_outer.l3_ethertype); + /* L2 Check SMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O, + outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16); + /* L2 Check SMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O, + outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0); + /* L2 Check DMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O, + outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16); + /* L2 Check DMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O, + outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0); + + /* L2 VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O, + outer_headers.first_prio, eth_l2_outer.first_priority); + HWS_SET_HDR(fc, match_param, VLAN_CFI_O, + outer_headers.first_cfi, eth_l2_outer.first_cfi); + HWS_SET_HDR(fc, match_param, VLAN_ID_O, + outer_headers.first_vid, eth_l2_outer.first_vlan_id); + + /* L2 CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier); + curr_fc->tag_set = &hws_definer_outer_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + /* L3 Check IP header */ + HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O, + outer_headers.ip_protocol, + eth_l3_outer.protocol_next_header); + HWS_SET_HDR(fc, match_param, IP_TTL_O, + outer_headers.ttl_hoplimit, + eth_l3_outer.time_to_live_hop_limit); + + /* L3 Check IPv4/IPv6 addresses */ + s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + outer_headers.src_ipv4_src_ipv6.ipv6_layout); + d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout); + + /* Assume IPv6 is used if ipv6 bits are set */ + is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; + is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + + if (is_s_ipv6) { + /* Handle IPv6 source address */ + HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_src_outer.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_src_outer.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_src_outer.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_src_outer.ipv6_address_31_0); + } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.source_address); + } + if (is_d_ipv6) { + /* Handle IPv6 destination address */ + HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_dst_outer.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_dst_outer.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_dst_outer.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_dst_outer.ipv6_address_31_0); + } else { + /* Handle IPv4 destination address */ + HWS_SET_HDR(fc, match_param, IPV4_DST_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.destination_address); + } + + /* L4 Handle TCP/UDP */ + HWS_SET_HDR(fc, match_param, L4_SPORT_O, + outer_headers.tcp_sport, eth_l4_outer.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_O, + outer_headers.tcp_dport, eth_l4_outer.destination_port); + HWS_SET_HDR(fc, match_param, L4_SPORT_O, + outer_headers.udp_sport, eth_l4_outer.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_O, + outer_headers.udp_dport, eth_l4_outer.destination_port); + HWS_SET_HDR(fc, match_param, TCP_FLAGS_O, + outer_headers.tcp_flags, eth_l4_outer.tcp_flags); + + /* L3 Handle DSCP, ECN and IHL */ + HWS_SET_HDR(fc, match_param, IP_DSCP_O, + outer_headers.ip_dscp, eth_l3_outer.dscp); + HWS_SET_HDR(fc, match_param, IP_ECN_O, + outer_headers.ip_ecn, eth_l3_outer.ecn); + HWS_SET_HDR(fc, match_param, IPV4_IHL_O, + outer_headers.ipv4_ihl, eth_l3_outer.ihl); + + /* Set IP fragmented bit */ + if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) { + smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) || + HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16); + dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) || + HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16); + if (smac_set == dmac_set) { + HWS_SET_HDR(fc, match_param, IP_FRAG_O, + outer_headers.frag, eth_l4_outer.ip_fragmented); + } else { + HWS_SET_HDR(fc, match_param, IP_FRAG_O, + outer_headers.frag, eth_l2_src_outer.ip_fragmented); + } + } + + /* L3_type set */ + if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type); + curr_fc->tag_set = &hws_definer_l3_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version); + } + + return 0; +} + +static int +hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + u32 *s_ipv6, *d_ipv6; + + if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) || + HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) || + HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) { + mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n"); + return -EINVAL; + } + + /* L2 Check ethertype */ + HWS_SET_HDR(fc, match_param, ETH_TYPE_I, + inner_headers.ethertype, + eth_l2_inner.l3_ethertype); + /* L2 Check SMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I, + inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16); + /* L2 Check SMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I, + inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0); + /* L2 Check DMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I, + inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16); + /* L2 Check DMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I, + inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0); + + /* L2 VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I, + inner_headers.first_prio, eth_l2_inner.first_priority); + HWS_SET_HDR(fc, match_param, VLAN_CFI_I, + inner_headers.first_cfi, eth_l2_inner.first_cfi); + HWS_SET_HDR(fc, match_param, VLAN_ID_I, + inner_headers.first_vid, eth_l2_inner.first_vlan_id); + + /* L2 CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier); + curr_fc->tag_set = &hws_definer_inner_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + /* L3 Check IP header */ + HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I, + inner_headers.ip_protocol, + eth_l3_inner.protocol_next_header); + HWS_SET_HDR(fc, match_param, IP_VERSION_I, + inner_headers.ip_version, + eth_l3_inner.ip_version); + HWS_SET_HDR(fc, match_param, IP_TTL_I, + inner_headers.ttl_hoplimit, + eth_l3_inner.time_to_live_hop_limit); + + /* L3 Check IPv4/IPv6 addresses */ + s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + inner_headers.src_ipv4_src_ipv6.ipv6_layout); + d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + inner_headers.dst_ipv4_dst_ipv6.ipv6_layout); + + /* Assume IPv6 is used if ipv6 bits are set */ + is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; + is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + + if (is_s_ipv6) { + /* Handle IPv6 source address */ + HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_src_inner.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_src_inner.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_src_inner.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_src_inner.ipv6_address_31_0); + } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.source_address); + } + if (is_d_ipv6) { + /* Handle IPv6 destination address */ + HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_dst_inner.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_dst_inner.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_dst_inner.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_dst_inner.ipv6_address_31_0); + } else { + /* Handle IPv4 destination address */ + HWS_SET_HDR(fc, match_param, IPV4_DST_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.destination_address); + } + + /* L4 Handle TCP/UDP */ + HWS_SET_HDR(fc, match_param, L4_SPORT_I, + inner_headers.tcp_sport, eth_l4_inner.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_I, + inner_headers.tcp_dport, eth_l4_inner.destination_port); + HWS_SET_HDR(fc, match_param, L4_SPORT_I, + inner_headers.udp_sport, eth_l4_inner.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_I, + inner_headers.udp_dport, eth_l4_inner.destination_port); + HWS_SET_HDR(fc, match_param, TCP_FLAGS_I, + inner_headers.tcp_flags, eth_l4_inner.tcp_flags); + + /* L3 Handle DSCP, ECN and IHL */ + HWS_SET_HDR(fc, match_param, IP_DSCP_I, + inner_headers.ip_dscp, eth_l3_inner.dscp); + HWS_SET_HDR(fc, match_param, IP_ECN_I, + inner_headers.ip_ecn, eth_l3_inner.ecn); + HWS_SET_HDR(fc, match_param, IPV4_IHL_I, + inner_headers.ipv4_ihl, eth_l3_inner.ihl); + + /* Set IP fragmented bit */ + if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) { + if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l2_inner.ip_fragmented); + } else { + smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) || + HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16); + dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) || + HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16); + if (smac_set == dmac_set) { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l4_inner.ip_fragmented); + } else { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l2_src_inner.ip_fragmented); + } + } + } + + /* L3_type set */ + if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type); + curr_fc->tag_set = &hws_definer_l3_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version); + } + + return 0; +} + +static int +hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) || + HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) || + HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) || + HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) || + HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) || + HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) { + mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n"); + return -EINVAL; + } + + /* Check GRE related fields */ + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_c_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_k_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_s_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_protocol, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY; + HWS_SET_HDR(fc, match_param, GRE_OPT_KEY, + misc_parameters.gre_key.key, tunnel_header.tunnel_header_2); + } + + /* Check GENEVE related fields */ + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_vni, + tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_geneve, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_opt_len, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_protocol_type, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_oam, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag); + } + + HWS_SET_HDR(fc, match_param, SOURCE_QP, + misc_parameters.source_sqn, source_qp_gvmi.source_qp); + HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O, + misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label); + HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I, + misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label); + + /* L2 Second VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O, + misc_parameters.outer_second_prio, eth_l2_outer.second_priority); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I, + misc_parameters.inner_second_prio, eth_l2_inner.second_priority); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O, + misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I, + misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O, + misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I, + misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id); + + /* L2 Second CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier); + curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier); + curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + /* VXLAN VNI */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI]; + HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni); + } + + /* Flex protocol steering ok bits */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + + if (!caps->flex_parser_ok_bits_supp) { + mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_steering_ok_bits_handler( + cd, caps->flex_parser_id_geneve_tlv_option_0); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist); + } + + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI]; + HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi); + curr_fc->tag_mask_set = &hws_definer_ones_set; + curr_fc->tag_set = HWS_IS_FLD_SET(match_param, + misc_parameters.source_eswitch_owner_vhca_id) ? + &hws_definer_set_source_gvmi_vhca_id : + &hws_definer_set_source_gvmi; + } else { + if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) { + mlx5hws_err(cd->ctx, + "Unsupported source_eswitch_owner_vhca_id field usage\n"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int +hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) || + HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) || + HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n"); + return -EINVAL; + } + + HWS_SET_HDR(fc, match_param, MPLS0_O, + misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label); + HWS_SET_HDR(fc, match_param, MPLS0_I, + misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label); + HWS_SET_HDR(fc, match_param, REG_0, + misc_parameters_2.metadata_reg_c_0, registers.register_c_0); + HWS_SET_HDR(fc, match_param, REG_1, + misc_parameters_2.metadata_reg_c_1, registers.register_c_1); + HWS_SET_HDR(fc, match_param, REG_2, + misc_parameters_2.metadata_reg_c_2, registers.register_c_2); + HWS_SET_HDR(fc, match_param, REG_3, + misc_parameters_2.metadata_reg_c_3, registers.register_c_3); + HWS_SET_HDR(fc, match_param, REG_4, + misc_parameters_2.metadata_reg_c_4, registers.register_c_4); + HWS_SET_HDR(fc, match_param, REG_5, + misc_parameters_2.metadata_reg_c_5, registers.register_c_5); + HWS_SET_HDR(fc, match_param, REG_6, + misc_parameters_2.metadata_reg_c_6, registers.register_c_6); + HWS_SET_HDR(fc, match_param, REG_7, + misc_parameters_2.metadata_reg_c_7, registers.register_c_7); + HWS_SET_HDR(fc, match_param, REG_A, + misc_parameters_2.metadata_reg_a, metadata.general_purpose); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp); + } + + return 0; +} + +static int +hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + bool vxlan_gpe_flex_parser_enabled; + + /* Check reserved and unsupported fields */ + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) { + mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n"); + return -EINVAL; + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I; + HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM, + misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq); + HWS_SET_HDR(fc, match_param, TCP_ACK_NUM, + misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O; + HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM, + misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq); + HWS_SET_HDR(fc, match_param, TCP_ACK_NUM, + misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack); + } + + vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni, + tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol, + tunnel_header.tunnel_header_0); + curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n"); + return -EOPNOTSUPP; + } + + HWS_SET_HDR(fc, match_param, ICMP_DW3, + misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1]; + HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1); + curr_fc->tag_set = &hws_definer_icmp_dw1_set; + } + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n"); + return -EOPNOTSUPP; + } + + HWS_SET_HDR(fc, match_param, ICMP_DW3, + misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1]; + HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1); + curr_fc->tag_set = &hws_definer_icmpv6_dw1_set; + } + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + + curr_fc = + hws_definer_flex_parser_handler(cd, + caps->flex_parser_id_geneve_tlv_option_0); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, teid); + fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, msg_type); + fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type); + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, msg_flags); + fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags); + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0); + } + + return 0; +} + +static int +hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {}; + struct mlx5hws_definer_fc *fc; + u32 id, value; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) { + mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n"); + return -EINVAL; + } + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3); + + return 0; +} + +static int +hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_definer_fc *fc = cd->fc; + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) { + mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n"); + return -EINVAL; + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1; + HWS_SET_HDR(fc, match_param, TNL_HDR_0, + misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1; + HWS_SET_HDR(fc, match_param, TNL_HDR_1, + misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2; + HWS_SET_HDR(fc, match_param, TNL_HDR_2, + misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2); + } + + HWS_SET_HDR(fc, match_param, TNL_HDR_3, + misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3); + + return 0; +} + +static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc) +{ + u32 fc_sz = 0; + int i; + + /* For empty matcher, ZERO_SIZE_PTR is returned */ + if (fc == ZERO_SIZE_PTR) + return 0; + + for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) + if (fc[i].tag_set) + fc_sz++; + return fc_sz; +} + +static struct mlx5hws_definer_fc * +hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc) +{ + struct mlx5hws_definer_fc *compressed_fc = NULL; + u32 definer_size = hws_definer_get_fc_size(fc); + u32 fc_sz = 0; + int i; + + compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL); + if (!compressed_fc) + return NULL; + + /* For empty matcher, ZERO_SIZE_PTR is returned */ + if (!definer_size) + return compressed_fc; + + for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) { + if (!fc[i].tag_set) + continue; + + fc[i].fname = i; + memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc)); + } + + return compressed_fc; +} + +static void +hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc) +{ + int i; + + /* nothing to do for empty matcher */ + if (fc == ZERO_SIZE_PTR) + return; + + for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) { + if (!fc[i].tag_set) + continue; + + HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask); + } +} + +static struct mlx5hws_definer_fc * +hws_definer_alloc_fc(struct mlx5hws_context *ctx, + size_t len) +{ + struct mlx5hws_definer_fc *fc; + int i; + + fc = kcalloc(len, sizeof(*fc), GFP_KERNEL); + if (!fc) + return NULL; + + for (i = 0; i < len; i++) + fc[i].ctx = ctx; + + return fc; +} + +static int +hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + u8 *hl) +{ + struct mlx5hws_definer_conv_data cd = {0}; + struct mlx5hws_definer_fc *fc; + int ret; + + fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX); + if (!fc) + return -ENOMEM; + + cd.fc = fc; + cd.ctx = ctx; + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) { + mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n"); + ret = -EOPNOTSUPP; + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) { + ret = hws_definer_conv_outer(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) { + ret = hws_definer_conv_inner(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) { + ret = hws_definer_conv_misc(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) { + ret = hws_definer_conv_misc2(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) { + ret = hws_definer_conv_misc3(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) { + ret = hws_definer_conv_misc4(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) { + ret = hws_definer_conv_misc5(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + /* Check there is no conflicted fields set together */ + ret = hws_definer_check_match_flags(&cd); + if (ret) + goto err_free_fc; + + /* Allocate fc array on mt */ + mt->fc = hws_definer_alloc_compressed_fc(fc); + if (!mt->fc) { + mlx5hws_err(ctx, + "Convert match params: failed to set field copy to match template\n"); + ret = -ENOMEM; + goto err_free_fc; + } + mt->fc_sz = hws_definer_get_fc_size(fc); + + /* Fill in headers layout */ + hws_definer_set_hl(hl, fc); + + kfree(fc); + return 0; + +err_free_fc: + kfree(fc); + return ret; +} + +struct mlx5hws_definer_fc * +mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + u32 *match_param, + int *fc_sz) +{ + struct mlx5hws_definer_fc *compressed_fc = NULL; + struct mlx5hws_definer_conv_data cd = {0}; + struct mlx5hws_definer_fc *fc; + int ret; + + fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX); + if (!fc) + return NULL; + + cd.fc = fc; + cd.ctx = ctx; + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) { + ret = hws_definer_conv_outer(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) { + ret = hws_definer_conv_inner(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) { + ret = hws_definer_conv_misc(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) { + ret = hws_definer_conv_misc2(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) { + ret = hws_definer_conv_misc3(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) { + ret = hws_definer_conv_misc4(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) { + ret = hws_definer_conv_misc5(&cd, match_param); + if (ret) + goto err_free_fc; + } + + /* Allocate fc array on mt */ + compressed_fc = hws_definer_alloc_compressed_fc(fc); + if (!compressed_fc) { + mlx5hws_err(ctx, + "Convert to compressed fc: failed to set field copy to match template\n"); + goto err_free_fc; + } + *fc_sz = hws_definer_get_fc_size(fc); + +err_free_fc: + kfree(fc); + return compressed_fc; +} + +static int +hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer, + u32 hl_byte_off, + u32 *tag_byte_off) +{ + int i, dw_to_scan; + u8 byte_offset; + + /* Avoid accessing unused DW selectors */ + dw_to_scan = mlx5hws_definer_is_jumbo(definer) ? + DW_SELECTORS : DW_SELECTORS_MATCH; + + /* Add offset since each DW covers multiple BYTEs */ + byte_offset = hl_byte_off % DW_SIZE; + for (i = 0; i < dw_to_scan; i++) { + if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) { + *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1); + return 0; + } + } + + /* Add offset to skip DWs in definer */ + byte_offset = DW_SIZE * DW_SELECTORS; + /* Iterate in reverse since the code uses bytes from 7 -> 0 */ + for (i = BYTE_SELECTORS; i-- > 0 ;) { + if (definer->byte_selector[i] == hl_byte_off) { + *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1); + return 0; + } + } + + return -EINVAL; +} + +static int +hws_definer_fc_bind(struct mlx5hws_definer *definer, + struct mlx5hws_definer_fc *fc, + u32 fc_sz) +{ + u32 tag_offset = 0; + int ret, byte_diff; + u32 i; + + for (i = 0; i < fc_sz; i++) { + /* Map header layout byte offset to byte offset in tag */ + ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset); + if (ret) + return ret; + + /* Move setter based on the location in the definer */ + byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE; + fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE; + + /* Update offset in headers layout to offset in tag */ + fc->byte_off = tag_offset; + fc++; + } + + return 0; +} + +static bool +hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl, + u32 cur_dw, + u32 *data) +{ + u8 bytes_set; + int byte_idx; + bool ret; + int i; + + /* Reached end, nothing left to do */ + if (cur_dw == MLX5_ST_SZ_DW(definer_hl)) + return true; + + /* No data set, can skip to next DW */ + while (!*data) { + cur_dw++; + data++; + + /* Reached end, nothing left to do */ + if (cur_dw == MLX5_ST_SZ_DW(definer_hl)) + return true; + } + + /* Used all DW selectors and Byte selectors, no possible solution */ + if (ctrl->allowed_full_dw == ctrl->used_full_dw && + ctrl->allowed_lim_dw == ctrl->used_lim_dw && + ctrl->allowed_bytes == ctrl->used_bytes) + return false; + + /* Try to use limited DW selectors */ + if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) { + ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw; + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0; + } + + /* Try to use DW selectors */ + if (ctrl->allowed_full_dw > ctrl->used_full_dw) { + ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw; + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + ctrl->full_dw_selector[--ctrl->used_full_dw] = 0; + } + + /* No byte selector for offset bigger than 255 */ + if (cur_dw * DW_SIZE > 255) + return false; + + bytes_set = !!(0x000000ff & *data) + + !!(0x0000ff00 & *data) + + !!(0x00ff0000 & *data) + + !!(0xff000000 & *data); + + /* Check if there are enough byte selectors left */ + if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes) + return false; + + /* Try to use Byte selectors */ + for (i = 0; i < DW_SIZE; i++) + if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) { + /* Use byte selectors high to low */ + byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1; + ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i; + ctrl->used_bytes++; + } + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + for (i = 0; i < DW_SIZE; i++) + if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) { + ctrl->used_bytes--; + byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1; + ctrl->byte_selector[byte_idx] = 0; + } + + return false; +} + +static void +hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl, + struct mlx5hws_definer *definer) +{ + memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes); + memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw); + memcpy(definer->dw_selector + ctrl->allowed_full_dw, + ctrl->lim_dw_selector, ctrl->allowed_lim_dw); +} + +static int +hws_definer_find_best_match_fit(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer, + u8 *hl) +{ + struct mlx5hws_definer_sel_ctrl ctrl = {0}; + bool found; + + /* Try to create a match definer */ + ctrl.allowed_full_dw = DW_SELECTORS_MATCH; + ctrl.allowed_lim_dw = 0; + ctrl.allowed_bytes = BYTE_SELECTORS; + + found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl); + if (found) { + hws_definer_copy_sel_ctrl(&ctrl, definer); + definer->type = MLX5HWS_DEFINER_TYPE_MATCH; + return 0; + } + + /* Try to create a full/limited jumbo definer */ + ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS : + DW_SELECTORS_MATCH; + ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 : + DW_SELECTORS_LIMITED; + ctrl.allowed_bytes = BYTE_SELECTORS; + + found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl); + if (found) { + hws_definer_copy_sel_ctrl(&ctrl, definer); + definer->type = MLX5HWS_DEFINER_TYPE_JUMBO; + return 0; + } + + return E2BIG; +} + +static void +hws_definer_create_tag_mask(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag) +{ + u32 i; + + for (i = 0; i < fc_sz; i++) { + if (fc->tag_mask_set) + fc->tag_mask_set(fc, match_param, tag); + else + fc->tag_set(fc, match_param, tag); + fc++; + } +} + +void mlx5hws_definer_create_tag(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag) +{ + u32 i; + + for (i = 0; i < fc_sz; i++) { + fc->tag_set(fc, match_param, tag); + fc++; + } +} + +int mlx5hws_definer_get_id(struct mlx5hws_definer *definer) +{ + return definer->obj_id; +} + +int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a, + struct mlx5hws_definer *definer_b) +{ + int i; + + /* Future: Optimize by comparing selectors with valid mask only */ + for (i = 0; i < BYTE_SELECTORS; i++) + if (definer_a->byte_selector[i] != definer_b->byte_selector[i]) + return 1; + + for (i = 0; i < DW_SELECTORS; i++) + if (definer_a->dw_selector[i] != definer_b->dw_selector[i]) + return 1; + + for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++) + if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i]) + return 1; + + return 0; +} + +int +mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_definer) +{ + u8 *match_hl; + int ret; + + /* Union header-layout (hl) is used for creating a single definer + * field layout used with different bitmasks for hash and match. + */ + match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL); + if (!match_hl) + return -ENOMEM; + + /* Convert all mt items to header layout (hl) + * and allocate the match and range field copy array (fc & fcr). + */ + ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl); + if (ret) { + mlx5hws_err(ctx, "Failed to convert items to header layout\n"); + goto free_fc; + } + + /* Find the match definer layout for header layout match union */ + ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl); + if (ret) { + if (ret == E2BIG) + mlx5hws_dbg(ctx, + "Failed to create match definer from header layout - E2BIG\n"); + else + mlx5hws_err(ctx, + "Failed to create match definer from header layout (%d)\n", + ret); + goto free_fc; + } + + kfree(match_hl); + return 0; + +free_fc: + kfree(mt->fc); + + kfree(match_hl); + return ret; +} + +int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache) +{ + struct mlx5hws_definer_cache *new_cache; + + new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL); + if (!new_cache) + return -ENOMEM; + + INIT_LIST_HEAD(&new_cache->list_head); + *cache = new_cache; + + return 0; +} + +void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache) +{ + kfree(cache); +} + +int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer) +{ + struct mlx5hws_definer_cache *cache = ctx->definer_cache; + struct mlx5hws_cmd_definer_create_attr def_attr = {0}; + struct mlx5hws_definer_cache_item *cached_definer; + u32 obj_id; + int ret; + + /* Search definer cache for requested definer */ + list_for_each_entry(cached_definer, &cache->list_head, list_node) { + if (mlx5hws_definer_compare(&cached_definer->definer, definer)) + continue; + + /* Reuse definer and set LRU (move to be first in the list) */ + list_del_init(&cached_definer->list_node); + list_add(&cached_definer->list_node, &cache->list_head); + cached_definer->refcount++; + return cached_definer->definer.obj_id; + } + + /* Allocate and create definer based on the bitmask tag */ + def_attr.match_mask = definer->mask.jumbo; + def_attr.dw_selector = definer->dw_selector; + def_attr.byte_selector = definer->byte_selector; + + ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id); + if (ret) + return -1; + + cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL); + if (!cached_definer) + goto free_definer_obj; + + memcpy(&cached_definer->definer, definer, sizeof(*definer)); + cached_definer->definer.obj_id = obj_id; + cached_definer->refcount = 1; + list_add(&cached_definer->list_node, &cache->list_head); + + return obj_id; + +free_definer_obj: + mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id); + return -1; +} + +static void +hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id) +{ + struct mlx5hws_definer_cache_item *cached_definer; + + list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) { + if (cached_definer->definer.obj_id != obj_id) + continue; + + /* Object found */ + if (--cached_definer->refcount) + return; + + list_del_init(&cached_definer->list_node); + mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id); + kfree(cached_definer); + return; + } + + /* Programming error, object must be part of cache */ + pr_warn("HWS: failed putting definer object\n"); +} + +static struct mlx5hws_definer * +hws_definer_alloc(struct mlx5hws_context *ctx, + struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 *match_param, + struct mlx5hws_definer *layout, + bool bind_fc) +{ + struct mlx5hws_definer *definer; + int ret; + + definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL); + if (!definer) + return NULL; + + /* Align field copy array based on given layout */ + if (bind_fc) { + ret = hws_definer_fc_bind(definer, fc, fc_sz); + if (ret) { + mlx5hws_err(ctx, "Failed to bind field copy to definer\n"); + goto free_definer; + } + } + + /* Create the tag mask used for definer creation */ + hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo); + + ret = mlx5hws_definer_get_obj(ctx, definer); + if (ret < 0) + goto free_definer; + + definer->obj_id = ret; + return definer; + +free_definer: + kfree(definer); + return NULL; +} + +void mlx5hws_definer_free(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer) +{ + hws_definer_put_obj(ctx, definer->obj_id); + kfree(definer); +} + +static int +hws_definer_mt_match_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_layout) +{ + /* Create mandatory match definer */ + mt->definer = hws_definer_alloc(ctx, + mt->fc, + mt->fc_sz, + mt->match_param, + match_layout, + true); + if (!mt->definer) { + mlx5hws_err(ctx, "Failed to create match definer\n"); + return -EINVAL; + } + + return 0; +} + +static void +hws_definer_mt_match_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + mlx5hws_definer_free(ctx, mt->definer); +} + +int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + struct mlx5hws_definer match_layout = {0}; + int ret; + + ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout); + if (ret) { + mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n"); + return ret; + } + + /* Calculate definers needed for exact match */ + ret = hws_definer_mt_match_init(ctx, mt, &match_layout); + if (ret) { + mlx5hws_err(ctx, "Failed to init match definers\n"); + goto free_fc; + } + + return 0; + +free_fc: + kfree(mt->fc); + return ret; +} + +void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + hws_definer_mt_match_uninit(ctx, mt); + kfree(mt->fc); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h new file mode 100644 index 000000000000..2f6a7df4021c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_DEFINER_H_ +#define MLX5HWS_DEFINER_H_ + +/* Max available selecotrs */ +#define DW_SELECTORS 9 +#define BYTE_SELECTORS 8 + +/* Selectors based on match TAG */ +#define DW_SELECTORS_MATCH 6 +#define DW_SELECTORS_LIMITED 3 + +/* Selectors based on range TAG */ +#define DW_SELECTORS_RANGE 2 +#define BYTE_SELECTORS_RANGE 8 + +#define HWS_NUM_OF_FLEX_PARSERS 8 + +enum mlx5hws_definer_fname { + MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I, + MLX5HWS_DEFINER_FNAME_ETH_TYPE_O, + MLX5HWS_DEFINER_FNAME_ETH_TYPE_I, + MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O, + MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O, + MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O, + MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I, + MLX5HWS_DEFINER_FNAME_VLAN_CFI_O, + MLX5HWS_DEFINER_FNAME_VLAN_CFI_I, + MLX5HWS_DEFINER_FNAME_VLAN_ID_O, + MLX5HWS_DEFINER_FNAME_VLAN_ID_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I, + MLX5HWS_DEFINER_FNAME_IPV4_IHL_O, + MLX5HWS_DEFINER_FNAME_IPV4_IHL_I, + MLX5HWS_DEFINER_FNAME_IP_DSCP_O, + MLX5HWS_DEFINER_FNAME_IP_DSCP_I, + MLX5HWS_DEFINER_FNAME_IP_ECN_O, + MLX5HWS_DEFINER_FNAME_IP_ECN_I, + MLX5HWS_DEFINER_FNAME_IP_TTL_O, + MLX5HWS_DEFINER_FNAME_IP_TTL_I, + MLX5HWS_DEFINER_FNAME_IPV4_DST_O, + MLX5HWS_DEFINER_FNAME_IPV4_DST_I, + MLX5HWS_DEFINER_FNAME_IPV4_SRC_O, + MLX5HWS_DEFINER_FNAME_IPV4_SRC_I, + MLX5HWS_DEFINER_FNAME_IP_VERSION_O, + MLX5HWS_DEFINER_FNAME_IP_VERSION_I, + MLX5HWS_DEFINER_FNAME_IP_FRAG_O, + MLX5HWS_DEFINER_FNAME_IP_FRAG_I, + MLX5HWS_DEFINER_FNAME_IP_LEN_O, + MLX5HWS_DEFINER_FNAME_IP_LEN_I, + MLX5HWS_DEFINER_FNAME_IP_TOS_O, + MLX5HWS_DEFINER_FNAME_IP_TOS_I, + MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O, + MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I, + MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O, + MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I, + MLX5HWS_DEFINER_FNAME_L4_SPORT_O, + MLX5HWS_DEFINER_FNAME_L4_SPORT_I, + MLX5HWS_DEFINER_FNAME_L4_DPORT_O, + MLX5HWS_DEFINER_FNAME_L4_DPORT_I, + MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I, + MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O, + MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM, + MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM, + MLX5HWS_DEFINER_FNAME_GTP_TEID, + MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE, + MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG, + MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR, + MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU, + MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI, + MLX5HWS_DEFINER_FNAME_GTPU_DW0, + MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0, + MLX5HWS_DEFINER_FNAME_GTPU_DW2, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7, + MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0, + MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS, + MLX5HWS_DEFINER_FNAME_VXLAN_VNI, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN, + MLX5HWS_DEFINER_FNAME_GENEVE_OAM, + MLX5HWS_DEFINER_FNAME_GENEVE_PROTO, + MLX5HWS_DEFINER_FNAME_GENEVE_VNI, + MLX5HWS_DEFINER_FNAME_SOURCE_QP, + MLX5HWS_DEFINER_FNAME_SOURCE_GVMI, + MLX5HWS_DEFINER_FNAME_REG_0, + MLX5HWS_DEFINER_FNAME_REG_1, + MLX5HWS_DEFINER_FNAME_REG_2, + MLX5HWS_DEFINER_FNAME_REG_3, + MLX5HWS_DEFINER_FNAME_REG_4, + MLX5HWS_DEFINER_FNAME_REG_5, + MLX5HWS_DEFINER_FNAME_REG_6, + MLX5HWS_DEFINER_FNAME_REG_7, + MLX5HWS_DEFINER_FNAME_REG_8, + MLX5HWS_DEFINER_FNAME_REG_9, + MLX5HWS_DEFINER_FNAME_REG_10, + MLX5HWS_DEFINER_FNAME_REG_11, + MLX5HWS_DEFINER_FNAME_REG_A, + MLX5HWS_DEFINER_FNAME_REG_B, + MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT, + MLX5HWS_DEFINER_FNAME_GRE_C, + MLX5HWS_DEFINER_FNAME_GRE_K, + MLX5HWS_DEFINER_FNAME_GRE_S, + MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL, + MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY, + MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ, + MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM, + MLX5HWS_DEFINER_FNAME_INTEGRITY_O, + MLX5HWS_DEFINER_FNAME_INTEGRITY_I, + MLX5HWS_DEFINER_FNAME_ICMP_DW1, + MLX5HWS_DEFINER_FNAME_ICMP_DW2, + MLX5HWS_DEFINER_FNAME_ICMP_DW3, + MLX5HWS_DEFINER_FNAME_IPSEC_SPI, + MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER, + MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME, + MLX5HWS_DEFINER_FNAME_MPLS0_O, + MLX5HWS_DEFINER_FNAME_MPLS1_O, + MLX5HWS_DEFINER_FNAME_MPLS2_O, + MLX5HWS_DEFINER_FNAME_MPLS3_O, + MLX5HWS_DEFINER_FNAME_MPLS4_O, + MLX5HWS_DEFINER_FNAME_MPLS0_I, + MLX5HWS_DEFINER_FNAME_MPLS1_I, + MLX5HWS_DEFINER_FNAME_MPLS2_I, + MLX5HWS_DEFINER_FNAME_MPLS3_I, + MLX5HWS_DEFINER_FNAME_MPLS4_I, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7, + MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE, + MLX5HWS_DEFINER_FNAME_IB_L4_QPN, + MLX5HWS_DEFINER_FNAME_IB_L4_A, + MLX5HWS_DEFINER_FNAME_RANDOM_NUM, + MLX5HWS_DEFINER_FNAME_PTYPE_L2_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L2_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L3_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L3_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I, + MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O, + MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I, + MLX5HWS_DEFINER_FNAME_TNL_HDR_0, + MLX5HWS_DEFINER_FNAME_TNL_HDR_1, + MLX5HWS_DEFINER_FNAME_TNL_HDR_2, + MLX5HWS_DEFINER_FNAME_TNL_HDR_3, + MLX5HWS_DEFINER_FNAME_MAX, +}; + +enum mlx5hws_definer_match_criteria { + MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0, + MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1, + MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7, +}; + +enum mlx5hws_definer_type { + MLX5HWS_DEFINER_TYPE_MATCH, + MLX5HWS_DEFINER_TYPE_JUMBO, +}; + +enum mlx5hws_definer_match_flag { + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5, + + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7, + + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9, + + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10, + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11, + MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12, + MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13, +}; + +struct mlx5hws_definer_fc { + struct mlx5hws_context *ctx; + /* Source */ + u32 s_byte_off; + int s_bit_off; + u32 s_bit_mask; + /* Destination */ + u32 byte_off; + int bit_off; + u32 bit_mask; + enum mlx5hws_definer_fname fname; + void (*tag_set)(struct mlx5hws_definer_fc *fc, + void *mach_param, + u8 *tag); + void (*tag_mask_set)(struct mlx5hws_definer_fc *fc, + void *mach_param, + u8 *tag); +}; + +struct mlx5_ifc_definer_hl_eth_l2_bits { + u8 dmac_47_16[0x20]; + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + u8 reserved_at_40[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 ip_fragmented[0x1]; + u8 qp_type[0x2]; + u8 encap_type[0x2]; + u8 port_number[0x2]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; + u8 l4_type[0x4]; + u8 reserved_at_64[0x2]; + u8 ipsec_layer[0x2]; + u8 l2_type[0x2]; + u8 force_lb[0x1]; + u8 l2_ok[0x1]; + u8 l3_ok[0x1]; + u8 l4_ok[0x1]; + u8 second_vlan_qualifier[0x2]; + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_definer_hl_eth_l2_src_bits { + u8 smac_47_16[0x20]; + u8 smac_15_0[0x10]; + u8 loopback_syndrome[0x8]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 ip_fragmented[0x1]; + u8 functional_lb[0x1]; +}; + +struct mlx5_ifc_definer_hl_ib_l2_bits { + u8 sx_sniffer[0x1]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 reserved_at_3[0x3]; + u8 port_number[0x2]; + u8 sl[0x4]; + u8 qp_type[0x2]; + u8 lnh[0x2]; + u8 dlid[0x10]; + u8 vl[0x4]; + u8 lrh_packet_length[0xc]; + u8 slid[0x10]; +}; + +struct mlx5_ifc_definer_hl_eth_l3_bits { + u8 ip_version[0x4]; + u8 ihl[0x4]; + union { + u8 tos[0x8]; + struct { + u8 dscp[0x6]; + u8 ecn[0x2]; + }; + }; + u8 time_to_live_hop_limit[0x8]; + u8 protocol_next_header[0x8]; + u8 identification[0x10]; + union { + u8 ipv4_frag[0x10]; + struct { + u8 flags[0x3]; + u8 fragment_offset[0xd]; + }; + }; + u8 ipv4_total_length[0x10]; + u8 checksum[0x10]; + u8 reserved_at_60[0xc]; + u8 flow_label[0x14]; + u8 packet_length[0x10]; + u8 ipv6_payload_length[0x10]; +}; + +struct mlx5_ifc_definer_hl_eth_l4_bits { + u8 source_port[0x10]; + u8 destination_port[0x10]; + u8 data_offset[0x4]; + u8 l4_ok[0x1]; + u8 l3_ok[0x1]; + u8 ip_fragmented[0x1]; + u8 tcp_ns[0x1]; + union { + u8 tcp_flags[0x8]; + struct { + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + }; + }; + u8 first_fragment[0x1]; + u8 reserved_at_31[0xf]; +}; + +struct mlx5_ifc_definer_hl_src_qp_gvmi_bits { + u8 loopback_syndrome[0x8]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_e[0x1]; + u8 functional_lb[0x1]; + u8 source_gvmi[0x10]; + u8 force_lb[0x1]; + u8 ip_fragmented[0x1]; + u8 source_is_requestor[0x1]; + u8 reserved_at_23[0x5]; + u8 source_qp[0x18]; +}; + +struct mlx5_ifc_definer_hl_ib_l4_bits { + u8 opcode[0x8]; + u8 qp[0x18]; + u8 se[0x1]; + u8 migreq[0x1]; + u8 ackreq[0x1]; + u8 fecn[0x1]; + u8 becn[0x1]; + u8 bth[0x1]; + u8 deth[0x1]; + u8 dcceth[0x1]; + u8 reserved_at_28[0x2]; + u8 pad_count[0x2]; + u8 tver[0x4]; + u8 p_key[0x10]; + u8 reserved_at_40[0x8]; + u8 deth_source_qp[0x18]; +}; + +enum mlx5hws_integrity_ok1_bits { + MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24, + MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25, + MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26, + MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27, + MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28, + MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29, + MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30, + MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31, +}; + +struct mlx5_ifc_definer_hl_oks1_bits { + union { + u8 oks1_bits[0x20]; + struct { + u8 second_ipv4_checksum_ok[0x1]; + u8 second_l4_checksum_ok[0x1]; + u8 first_ipv4_checksum_ok[0x1]; + u8 first_l4_checksum_ok[0x1]; + u8 second_l3_ok[0x1]; + u8 second_l4_ok[0x1]; + u8 first_l3_ok[0x1]; + u8 first_l4_ok[0x1]; + u8 flex_parser7_steering_ok[0x1]; + u8 flex_parser6_steering_ok[0x1]; + u8 flex_parser5_steering_ok[0x1]; + u8 flex_parser4_steering_ok[0x1]; + u8 flex_parser3_steering_ok[0x1]; + u8 flex_parser2_steering_ok[0x1]; + u8 flex_parser1_steering_ok[0x1]; + u8 flex_parser0_steering_ok[0x1]; + u8 second_ipv6_extension_header_vld[0x1]; + u8 first_ipv6_extension_header_vld[0x1]; + u8 l3_tunneling_ok[0x1]; + u8 l2_tunneling_ok[0x1]; + u8 second_tcp_ok[0x1]; + u8 second_udp_ok[0x1]; + u8 second_ipv4_ok[0x1]; + u8 second_ipv6_ok[0x1]; + u8 second_l2_ok[0x1]; + u8 vxlan_ok[0x1]; + u8 gre_ok[0x1]; + u8 first_tcp_ok[0x1]; + u8 first_udp_ok[0x1]; + u8 first_ipv4_ok[0x1]; + u8 first_ipv6_ok[0x1]; + u8 first_l2_ok[0x1]; + }; + }; +}; + +struct mlx5_ifc_definer_hl_oks2_bits { + u8 reserved_at_0[0xa]; + u8 second_mpls_ok[0x1]; + u8 second_mpls4_s_bit[0x1]; + u8 second_mpls4_qualifier[0x1]; + u8 second_mpls3_s_bit[0x1]; + u8 second_mpls3_qualifier[0x1]; + u8 second_mpls2_s_bit[0x1]; + u8 second_mpls2_qualifier[0x1]; + u8 second_mpls1_s_bit[0x1]; + u8 second_mpls1_qualifier[0x1]; + u8 second_mpls0_s_bit[0x1]; + u8 second_mpls0_qualifier[0x1]; + u8 first_mpls_ok[0x1]; + u8 first_mpls4_s_bit[0x1]; + u8 first_mpls4_qualifier[0x1]; + u8 first_mpls3_s_bit[0x1]; + u8 first_mpls3_qualifier[0x1]; + u8 first_mpls2_s_bit[0x1]; + u8 first_mpls2_qualifier[0x1]; + u8 first_mpls1_s_bit[0x1]; + u8 first_mpls1_qualifier[0x1]; + u8 first_mpls0_s_bit[0x1]; + u8 first_mpls0_qualifier[0x1]; +}; + +struct mlx5_ifc_definer_hl_voq_bits { + u8 reserved_at_0[0x18]; + u8 ecn_ok[0x1]; + u8 congestion[0x1]; + u8 profile[0x2]; + u8 internal_prio[0x4]; +}; + +struct mlx5_ifc_definer_hl_ipv4_src_dst_bits { + u8 source_address[0x20]; + u8 destination_address[0x20]; +}; + +struct mlx5_ifc_definer_hl_random_number_bits { + u8 random_number[0x10]; + u8 reserved[0x10]; +}; + +struct mlx5_ifc_definer_hl_ipv6_addr_bits { + u8 ipv6_address_127_96[0x20]; + u8 ipv6_address_95_64[0x20]; + u8 ipv6_address_63_32[0x20]; + u8 ipv6_address_31_0[0x20]; +}; + +struct mlx5_ifc_definer_tcp_icmp_header_bits { + union { + struct { + u8 icmp_dw1[0x20]; + u8 icmp_dw2[0x20]; + u8 icmp_dw3[0x20]; + }; + struct { + u8 tcp_seq[0x20]; + u8 tcp_ack[0x20]; + u8 tcp_win_urg[0x20]; + }; + }; +}; + +struct mlx5_ifc_definer_hl_tunnel_header_bits { + u8 tunnel_header_0[0x20]; + u8 tunnel_header_1[0x20]; + u8 tunnel_header_2[0x20]; + u8 tunnel_header_3[0x20]; +}; + +struct mlx5_ifc_definer_hl_ipsec_bits { + u8 spi[0x20]; + u8 sequence_number[0x20]; + u8 reserved[0x10]; + u8 ipsec_syndrome[0x8]; + u8 next_header[0x8]; +}; + +struct mlx5_ifc_definer_hl_metadata_bits { + u8 metadata_to_cqe[0x20]; + u8 general_purpose[0x20]; + u8 acomulated_hash[0x20]; +}; + +struct mlx5_ifc_definer_hl_flex_parser_bits { + u8 flex_parser_7[0x20]; + u8 flex_parser_6[0x20]; + u8 flex_parser_5[0x20]; + u8 flex_parser_4[0x20]; + u8 flex_parser_3[0x20]; + u8 flex_parser_2[0x20]; + u8 flex_parser_1[0x20]; + u8 flex_parser_0[0x20]; +}; + +struct mlx5_ifc_definer_hl_registers_bits { + u8 register_c_10[0x20]; + u8 register_c_11[0x20]; + u8 register_c_8[0x20]; + u8 register_c_9[0x20]; + u8 register_c_6[0x20]; + u8 register_c_7[0x20]; + u8 register_c_4[0x20]; + u8 register_c_5[0x20]; + u8 register_c_2[0x20]; + u8 register_c_3[0x20]; + u8 register_c_0[0x20]; + u8 register_c_1[0x20]; +}; + +struct mlx5_ifc_definer_hl_mpls_bits { + u8 mpls0_label[0x20]; + u8 mpls1_label[0x20]; + u8 mpls2_label[0x20]; + u8 mpls3_label[0x20]; + u8 mpls4_label[0x20]; +}; + +struct mlx5_ifc_definer_hl_bits { + struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer; + struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner; + struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer; + struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner; + struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2; + struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer; + struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner; + struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer; + struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner; + struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi; + struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4; + struct mlx5_ifc_definer_hl_oks1_bits oks1; + struct mlx5_ifc_definer_hl_oks2_bits oks2; + struct mlx5_ifc_definer_hl_voq_bits voq; + u8 reserved_at_480[0x380]; + struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer; + struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner; + u8 unsupported_dest_ib_l3[0x80]; + u8 unsupported_source_ib_l3[0x80]; + u8 unsupported_udp_misc_outer[0x20]; + u8 unsupported_udp_misc_inner[0x20]; + struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp; + struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header; + struct mlx5_ifc_definer_hl_mpls_bits mpls_outer; + struct mlx5_ifc_definer_hl_mpls_bits mpls_inner; + u8 unsupported_config_headers_outer[0x80]; + u8 unsupported_config_headers_inner[0x80]; + struct mlx5_ifc_definer_hl_random_number_bits random_number; + struct mlx5_ifc_definer_hl_ipsec_bits ipsec; + struct mlx5_ifc_definer_hl_metadata_bits metadata; + u8 unsupported_utc_timestamp[0x40]; + u8 unsupported_free_running_timestamp[0x40]; + struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser; + struct mlx5_ifc_definer_hl_registers_bits registers; + /* Reserved in case header layout on future HW */ + u8 unsupported_reserved[0xd40]; +}; + +enum mlx5hws_definer_gtp { + MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04, +}; + +struct mlx5_ifc_header_gtp_bits { + u8 version[0x3]; + u8 proto_type[0x1]; + u8 reserved1[0x1]; + union { + u8 msg_flags[0x3]; + struct { + u8 ext_hdr_flag[0x1]; + u8 seq_num_flag[0x1]; + u8 pdu_flag[0x1]; + }; + }; + u8 msg_type[0x8]; + u8 msg_len[0x8]; + u8 teid[0x20]; +}; + +struct mlx5_ifc_header_opt_gtp_bits { + u8 seq_num[0x10]; + u8 pdu_num[0x8]; + u8 next_ext_hdr_type[0x8]; +}; + +struct mlx5_ifc_header_gtp_psc_bits { + u8 len[0x8]; + u8 pdu_type[0x4]; + u8 flags[0x4]; + u8 qfi[0x8]; + u8 reserved2[0x8]; +}; + +struct mlx5_ifc_header_ipv6_vtc_bits { + u8 version[0x4]; + union { + u8 tos[0x8]; + struct { + u8 dscp[0x6]; + u8 ecn[0x2]; + }; + }; + u8 flow_label[0x14]; +}; + +struct mlx5_ifc_header_ipv6_routing_ext_bits { + u8 next_hdr[0x8]; + u8 hdr_len[0x8]; + u8 type[0x8]; + u8 segments_left[0x8]; + union { + u8 flags[0x20]; + struct { + u8 last_entry[0x8]; + u8 flag[0x8]; + u8 tag[0x10]; + }; + }; +}; + +struct mlx5_ifc_header_vxlan_bits { + u8 flags[0x8]; + u8 reserved1[0x18]; + u8 vni[0x18]; + u8 reserved2[0x8]; +}; + +struct mlx5_ifc_header_vxlan_gpe_bits { + u8 flags[0x8]; + u8 rsvd0[0x10]; + u8 protocol[0x8]; + u8 vni[0x18]; + u8 rsvd1[0x8]; +}; + +struct mlx5_ifc_header_gre_bits { + union { + u8 c_rsvd0_ver[0x10]; + struct { + u8 gre_c_present[0x1]; + u8 reserved_at_1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 reserved_at_4[0x9]; + u8 version[0x3]; + }; + }; + u8 gre_protocol[0x10]; + u8 checksum[0x10]; + u8 reserved_at_30[0x10]; +}; + +struct mlx5_ifc_header_geneve_bits { + union { + u8 ver_opt_len_o_c_rsvd[0x10]; + struct { + u8 version[0x2]; + u8 opt_len[0x6]; + u8 o_flag[0x1]; + u8 c_flag[0x1]; + u8 reserved_at_a[0x6]; + }; + }; + u8 protocol_type[0x10]; + u8 vni[0x18]; + u8 reserved_at_38[0x8]; +}; + +struct mlx5_ifc_header_geneve_opt_bits { + u8 class[0x10]; + u8 type[0x8]; + u8 reserved[0x3]; + u8 len[0x5]; +}; + +struct mlx5_ifc_header_icmp_bits { + union { + u8 icmp_dw1[0x20]; + struct { + u8 type[0x8]; + u8 code[0x8]; + u8 cksum[0x10]; + }; + }; + union { + u8 icmp_dw2[0x20]; + struct { + u8 ident[0x10]; + u8 seq_nb[0x10]; + }; + }; +}; + +struct mlx5hws_definer { + enum mlx5hws_definer_type type; + u8 dw_selector[DW_SELECTORS]; + u8 byte_selector[BYTE_SELECTORS]; + struct mlx5hws_rule_match_tag mask; + u32 obj_id; +}; + +struct mlx5hws_definer_cache { + struct list_head list_head; +}; + +struct mlx5hws_definer_cache_item { + struct mlx5hws_definer definer; + u32 refcount; + struct list_head list_node; +}; + +static inline bool +mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer) +{ + return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO); +} + +void mlx5hws_definer_create_tag(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag); + +int mlx5hws_definer_get_id(struct mlx5hws_definer *definer); + +int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt); + +void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt); + +int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache); + +void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache); + +int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a, + struct mlx5hws_definer *definer_b); + +int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer); + +void mlx5hws_definer_free(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer); + +int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_definer); + +struct mlx5hws_definer_fc * +mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + u32 *match_param, + int *fc_sz); + +#endif /* MLX5HWS_DEFINER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h new file mode 100644 index 000000000000..5643be1cd5bf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_INTERNAL_H_ +#define MLX5HWS_INTERNAL_H_ + +#include <linux/mlx5/transobj.h> +#include <linux/mlx5/vport.h> +#include "fs_core.h" +#include "wq.h" +#include "lib/mlx5.h" + +#include "mlx5hws_prm.h" +#include "mlx5hws.h" +#include "mlx5hws_pool.h" +#include "mlx5hws_vport.h" +#include "mlx5hws_context.h" +#include "mlx5hws_table.h" +#include "mlx5hws_send.h" +#include "mlx5hws_rule.h" +#include "mlx5hws_cmd.h" +#include "mlx5hws_action.h" +#include "mlx5hws_definer.h" +#include "mlx5hws_matcher.h" +#include "mlx5hws_debug.h" +#include "mlx5hws_pat_arg.h" +#include "mlx5hws_bwc.h" +#include "mlx5hws_bwc_complex.h" + +#define W_SIZE 2 +#define DW_SIZE 4 +#define BITS_IN_BYTE 8 +#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE) + +#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit))) + +#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg) +#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg) +#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg) + +#define MLX5HWS_TABLE_TYPE_BASE 2 +#define MLX5HWS_ACTION_STE_IDX_ANY 0 + +static inline bool is_mem_zero(const u8 *mem, size_t size) +{ + if (unlikely(!size)) { + pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__); + return true; + } + + return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0; +} + +static inline unsigned long align(unsigned long val, unsigned long align) +{ + return (val + align - 1) & ~(align - 1); +} + +#endif /* MLX5HWS_INTERNAL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c new file mode 100644 index 000000000000..1964261415aa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c @@ -0,0 +1,1216 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +enum mlx5hws_matcher_rtc_type { + HWS_MATCHER_RTC_TYPE_MATCH, + HWS_MATCHER_RTC_TYPE_STE_ARRAY, + HWS_MATCHER_RTC_TYPE_MAX, +}; + +static const char * const mlx5hws_matcher_rtc_type_str[] = { + [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH", + [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY", + [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN", +}; + +static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type) +{ + if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX) + rtc_type = HWS_MATCHER_RTC_TYPE_MAX; + return mlx5hws_matcher_rtc_type_str[rtc_type]; +} + +static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules) +{ + /* Collision table concatenation is done only for large rule tables */ + return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH; +} + +static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules) +{ + if (hws_matcher_requires_col_tbl(log_num_of_rules)) + return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH; + + /* For small rule tables we use a single deep table to assure insertion */ + return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH); +} + +static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher) +{ + mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id); +} + +static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + int ret; + + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n"); + return ret; + } + return 0; +} + +static int hws_matcher_connect(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *prev = NULL; + struct mlx5hws_matcher *next = NULL; + struct mlx5hws_matcher *tmp_matcher; + int ret; + + /* Find location in matcher list */ + if (list_empty(&tbl->matchers_list)) { + list_add(&matcher->list_node, &tbl->matchers_list); + goto connect; + } + + list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) { + if (tmp_matcher->attr.priority > matcher->attr.priority) { + next = tmp_matcher; + break; + } + prev = tmp_matcher; + } + + if (next) + /* insert before next */ + list_add_tail(&matcher->list_node, &next->list_node); + else + /* insert after prev */ + list_add(&matcher->list_node, &prev->list_node); + +connect: + if (next) { + /* Connect to next RTC */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + matcher->end_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n"); + goto remove_from_list; + } + } else { + /* Connect last matcher to next miss_tbl if exists */ + ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + if (ret) { + mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n"); + goto remove_from_list; + } + } + + /* Connect to previous FT */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + prev ? prev->end_ft_id : tbl->ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n"); + goto remove_from_list; + } + + /* Reset prev matcher FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n"); + goto remove_from_list; + } + + if (!prev) { + /* Update tables missing to current matcher in the table */ + ret = mlx5hws_table_update_connected_miss_tables(tbl); + if (ret) { + mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n"); + goto remove_from_list; + } + } + + return 0; + +remove_from_list: + list_del_init(&matcher->list_node); + return ret; +} + +static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher *next = NULL, *prev = NULL; + struct mlx5hws_table *tbl = matcher->tbl; + u32 prev_ft_id = tbl->ft_id; + int ret; + + if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) { + prev = list_prev_entry(matcher, list_node); + prev_ft_id = prev->end_ft_id; + } + + if (!list_is_last(&matcher->list_node, &tbl->matchers_list)) + next = list_next_entry(matcher, list_node); + + list_del_init(&matcher->list_node); + + if (next) { + /* Connect previous end FT to next RTC */ + ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx, + prev_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n"); + goto matcher_reconnect; + } + } else { + ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n"); + goto matcher_reconnect; + } + } + + /* Removing first matcher, update connected miss tables if exists */ + if (prev_ft_id == tbl->ft_id) { + ret = mlx5hws_table_update_connected_miss_tables(tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n"); + goto matcher_reconnect; + } + } + + ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n"); + goto matcher_reconnect; + } + + return 0; + +matcher_reconnect: + if (list_empty(&tbl->matchers_list) || !prev) + list_add(&matcher->list_node, &tbl->matchers_list); + else + /* insert after prev matcher */ + list_add(&matcher->list_node, &prev->list_node); + + return ret; +} + +static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + enum mlx5hws_matcher_rtc_type rtc_type, + bool is_mirror) +{ + struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste; + enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src; + bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH; + + if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) || + (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) { + /* Optimize FDB RTC */ + rtc_attr->log_size = 0; + rtc_attr->log_depth = 0; + } else { + /* Keep original values */ + rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order; + rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0; + } +} + +static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, + enum mlx5hws_matcher_rtc_type rtc_type, + u8 action_ste_selector) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; + struct mlx5hws_match_template *mt = matcher->mt; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool *ste_pool, *stc_pool; + struct mlx5hws_pool_chunk *ste; + u32 *rtc_0_id, *rtc_1_id; + u32 obj_id; + int ret; + + switch (rtc_type) { + case HWS_MATCHER_RTC_TYPE_MATCH: + rtc_0_id = &matcher->match_ste.rtc_0_id; + rtc_1_id = &matcher->match_ste.rtc_1_id; + ste_pool = matcher->match_ste.pool; + ste = &matcher->match_ste.ste; + ste->order = attr->table.sz_col_log + attr->table.sz_row_log; + + rtc_attr.log_size = attr->table.sz_row_log; + rtc_attr.log_depth = attr->table.sz_col_log; + rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + rtc_attr.is_scnd_range = 0; + rtc_attr.miss_ft_id = matcher->end_ft_id; + + if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) { + /* The usual Hash Table */ + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; + + /* The first mt is used since all share the same definer */ + rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); + } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) { + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + rtc_attr.num_hash_definer = 1; + + if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + /* Hash Split Table */ + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; + rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); + } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { + /* Linear Lookup Table */ + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR; + rtc_attr.match_definer_0 = ctx->caps->linear_match_definer; + } + } + + /* Match pool requires implicit allocation */ + ret = mlx5hws_pool_chunk_alloc(ste_pool, ste); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate STE for %s RTC", + hws_matcher_rtc_type_to_str(rtc_type)); + return ret; + } + break; + + case HWS_MATCHER_RTC_TYPE_STE_ARRAY: + action_ste = &matcher->action_ste[action_ste_selector]; + + rtc_0_id = &action_ste->rtc_0_id; + rtc_1_id = &action_ste->rtc_1_id; + ste_pool = action_ste->pool; + ste = &action_ste->ste; + ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) + + attr->table.sz_row_log; + rtc_attr.log_size = ste->order; + rtc_attr.log_depth = 0; + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + /* The action STEs use the default always hit definer */ + rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; + rtc_attr.is_frst_jumbo = false; + rtc_attr.miss_ft_id = 0; + break; + + default: + mlx5hws_err(ctx, "HWS Invalid RTC type\n"); + return -EINVAL; + } + + obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + + rtc_attr.pd = ctx->pd_num; + rtc_attr.ste_base = obj_id; + rtc_attr.ste_offset = ste->offset; + rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false); + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false); + + /* STC is a single resource (obj_id), use any STC for the ID */ + stc_pool = ctx->stc_pool[tbl->type]; + default_stc = ctx->common_res[tbl->type].default_stc; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create matcher RTC of type %s", + hws_matcher_rtc_type_to_str(rtc_type)); + goto free_ste; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + rtc_attr.ste_base = obj_id; + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true); + + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true); + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s", + hws_matcher_rtc_type_to_str(rtc_type)); + goto destroy_rtc_0; + } + } + + return 0; + +destroy_rtc_0: + mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id); +free_ste: + if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) + mlx5hws_pool_chunk_free(ste_pool, ste); + return ret; +} + +static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher, + enum mlx5hws_matcher_rtc_type rtc_type, + u8 action_ste_selector) +{ + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool_chunk *ste; + struct mlx5hws_pool *ste_pool; + u32 rtc_0_id, rtc_1_id; + + switch (rtc_type) { + case HWS_MATCHER_RTC_TYPE_MATCH: + rtc_0_id = matcher->match_ste.rtc_0_id; + rtc_1_id = matcher->match_ste.rtc_1_id; + ste_pool = matcher->match_ste.pool; + ste = &matcher->match_ste.ste; + break; + case HWS_MATCHER_RTC_TYPE_STE_ARRAY: + action_ste = &matcher->action_ste[action_ste_selector]; + rtc_0_id = action_ste->rtc_0_id; + rtc_1_id = action_ste->rtc_1_id; + ste_pool = action_ste->pool; + ste = &action_ste->ste; + break; + default: + return; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id); + + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id); + if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) + mlx5hws_pool_chunk_free(ste_pool, ste); +} + +static int +hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + if (attr->table.sz_col_log > caps->rtc_log_depth_max) { + mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n", + caps->rtc_log_depth_max); + return -EOPNOTSUPP; + } + + if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) { + mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n", + caps->ste_alloc_log_max); + return -EOPNOTSUPP; + } + + if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) { + mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n", + caps->ste_alloc_log_gran); + return -EOPNOTSUPP; + } + + return 0; +} + +static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr, + struct mlx5hws_matcher *matcher) +{ + switch (matcher->attr.optimize_flow_src) { + case MLX5HWS_MATCHER_FLOW_SRC_VPORT: + attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG; + break; + case MLX5HWS_MATCHER_FLOW_SRC_WIRE: + attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR; + break; + default: + break; + } +} + +static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + bool valid; + int ret; + + valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type); + if (!valid) { + mlx5hws_err(ctx, "Invalid combination in action template\n"); + return -EINVAL; + } + + /* Process action template to setters */ + ret = mlx5hws_action_template_process(at); + if (ret) { + mlx5hws_err(ctx, "Failed to process action template\n"); + return ret; + } + + return 0; +} + +static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher) +{ + struct mlx5hws_matcher_resize_data *resize_data; + + resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL); + if (!resize_data) + return -ENOMEM; + + resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; + + resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc; + resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id; + resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id; + resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ? + src_matcher->action_ste[0].pool : + NULL; + resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc; + resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id; + resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id; + resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ? + src_matcher->action_ste[1].pool : + NULL; + + /* Place the new resized matcher on the dst matcher's list */ + list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); + + /* Move all the previous resized matchers to the dst matcher's list */ + while (!list_empty(&src_matcher->resize_data)) { + resize_data = list_first_entry(&src_matcher->resize_data, + struct mlx5hws_matcher_resize_data, + list_node); + list_del_init(&resize_data->list_node); + list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); + } + + return 0; +} + +static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_resize_data *resize_data; + + if (!mlx5hws_matcher_is_resizable(matcher)) + return; + + while (!list_empty(&matcher->resize_data)) { + resize_data = list_first_entry(&matcher->resize_data, + struct mlx5hws_matcher_resize_data, + list_node); + list_del_init(&resize_data->list_node); + + if (resize_data->max_stes) { + mlx5hws_action_free_single_stc(matcher->tbl->ctx, + matcher->tbl->type, + &resize_data->action_ste[1].stc); + mlx5hws_action_free_single_stc(matcher->tbl->ctx, + matcher->tbl->type, + &resize_data->action_ste[0].stc); + + if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[1].rtc_1_id); + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[0].rtc_1_id); + } + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[1].rtc_0_id); + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[0].rtc_0_id); + if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) { + mlx5hws_pool_destroy(resize_data->action_ste[1].pool); + mlx5hws_pool_destroy(resize_data->action_ste[0].pool); + } + } + + kfree(resize_data); + } +} + +static int +hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool_attr pool_attr = {0}; + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + action_ste = &matcher->action_ste[action_ste_selector]; + + /* Allocate action STE mempool */ + pool_attr.table_type = tbl->type; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; + pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) + + matcher->attr.table.sz_row_log; + hws_matcher_set_pool_attr(&pool_attr, matcher); + action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!action_ste->pool) { + mlx5hws_err(ctx, "Failed to create action ste pool\n"); + return -EINVAL; + } + + /* Allocate action RTC */ + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + if (ret) { + mlx5hws_err(ctx, "Failed to create action RTC\n"); + goto free_ste_pool; + } + + /* Allocate STC for jumps to STE */ + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.ste_table.ste = action_ste->ste; + stc_attr.ste_table.ste_pool = action_ste->pool; + stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type, + &action_ste->stc); + if (ret) { + mlx5hws_err(ctx, "Failed to create action jump to table STC\n"); + goto free_rtc; + } + + return 0; + +free_rtc: + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); +free_ste_pool: + mlx5hws_pool_destroy(action_ste->pool); + return ret; +} + +static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +{ + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + + action_ste = &matcher->action_ste[action_ste_selector]; + + if (!action_ste->max_stes || + matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION || + mlx5hws_matcher_is_in_resize(matcher)) + return; + + mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + mlx5hws_pool_destroy(action_ste->pool); +} + +static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + u32 required_stes; + u8 max_stes = 0; + int i, ret; + + if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION) + return 0; + + for (i = 0; i < matcher->num_of_at; i++) { + struct mlx5hws_action_template *at = &matcher->at[i]; + + ret = hws_matcher_check_and_process_at(matcher, at); + if (ret) { + mlx5hws_err(ctx, "Invalid at %d", i); + return ret; + } + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + max_stes = max(max_stes, required_stes); + + /* Future: Optimize reparse */ + } + + /* There are no additional STEs required for matcher */ + if (!max_stes) + return 0; + + matcher->action_ste[0].max_stes = max_stes; + matcher->action_ste[1].max_stes = max_stes; + + ret = hws_matcher_bind_at_idx(matcher, 0); + if (ret) + return ret; + + ret = hws_matcher_bind_at_idx(matcher, 1); + if (ret) + goto free_at_0; + + return 0; + +free_at_0: + hws_matcher_unbind_at_idx(matcher, 0); + return ret; +} + +static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher) +{ + hws_matcher_unbind_at_idx(matcher, 1); + hws_matcher_unbind_at_idx(matcher, 0); +} + +static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_pool_attr pool_attr = {0}; + int ret; + + /* Calculate match, range and hash definers */ + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) { + ret = mlx5hws_definer_mt_init(ctx, matcher->mt); + if (ret) { + if (ret == E2BIG) + mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n"); + return ret; + } + } + + /* Create an STE pool per matcher*/ + pool_attr.table_type = matcher->tbl->type; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL; + pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log + + matcher->attr.table.sz_row_log; + hws_matcher_set_pool_attr(&pool_attr, matcher); + + matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!matcher->match_ste.pool) { + mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n"); + ret = -EOPNOTSUPP; + goto uninit_match_definer; + } + + return 0; + +uninit_match_definer: + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + mlx5hws_definer_mt_uninit(ctx, matcher->mt); + return ret; +} + +static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher) +{ + mlx5hws_pool_destroy(matcher->match_ste.pool); + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt); +} + +static int +hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + + switch (attr->insert_mode) { + case MLX5HWS_MATCHER_INSERT_BY_HASH: + if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + mlx5hws_err(ctx, "Invalid matcher distribute mode\n"); + return -EOPNOTSUPP; + } + break; + + case MLX5HWS_MATCHER_INSERT_BY_INDEX: + if (attr->table.sz_col_log) { + mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n"); + return -EOPNOTSUPP; + } + + if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + /* Hash Split Table */ + if (!caps->rtc_hash_split_table) { + mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n"); + return -EOPNOTSUPP; + } + } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { + /* Linear Lookup Table */ + if (!caps->rtc_linear_lookup_table || + !IS_BIT_SET(caps->access_index_mode, + MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) { + mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n"); + return -EOPNOTSUPP; + } + + if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) { + mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d", + MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX); + return -EOPNOTSUPP; + } + } else { + mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n"); + return -EOPNOTSUPP; + } + break; + + default: + mlx5hws_err(ctx, "Matcher has unsupported insert mode\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + if (hws_matcher_validate_insert_mode(caps, matcher)) + return -EOPNOTSUPP; + + if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) { + mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n"); + return -EOPNOTSUPP; + } + + /* Convert number of rules to the required depth */ + if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE && + attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) + attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log); + + matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0; + + return hws_matcher_check_attr_sz(caps, matcher); +} + +static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) +{ + int ret; + + /* Select and create the definers for current matcher */ + ret = hws_matcher_bind_mt(matcher); + if (ret) + return ret; + + /* Calculate and verify action combination */ + ret = hws_matcher_bind_at(matcher); + if (ret) + goto unbind_mt; + + /* Create matcher end flow table anchor */ + ret = hws_matcher_create_end_ft(matcher); + if (ret) + goto unbind_at; + + /* Allocate the RTC for the new matcher */ + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + if (ret) + goto destroy_end_ft; + + /* Connect the matcher to the matcher list */ + ret = hws_matcher_connect(matcher); + if (ret) + goto destroy_rtc; + + return 0; + +destroy_rtc: + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); +destroy_end_ft: + hws_matcher_destroy_end_ft(matcher); +unbind_at: + hws_matcher_unbind_at(matcher); +unbind_mt: + hws_matcher_unbind_mt(matcher); + return ret; +} + +static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher) +{ + hws_matcher_resize_uninit(matcher); + hws_matcher_disconnect(matcher); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + hws_matcher_destroy_end_ft(matcher); + hws_matcher_unbind_at(matcher); + hws_matcher_unbind_mt(matcher); +} + +static int +hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_matcher *col_matcher; + int ret; + + if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE || + matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) + return 0; + + if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log)) + return 0; + + col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL); + if (!col_matcher) + return -ENOMEM; + + INIT_LIST_HEAD(&col_matcher->resize_data); + + col_matcher->tbl = matcher->tbl; + col_matcher->mt = matcher->mt; + col_matcher->at = matcher->at; + col_matcher->num_of_at = matcher->num_of_at; + col_matcher->num_of_mt = matcher->num_of_mt; + col_matcher->attr.priority = matcher->attr.priority; + col_matcher->flags = matcher->flags; + col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION; + col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE; + col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src; + col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log; + col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH; + if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO) + col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO; + + col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach; + + ret = hws_matcher_process_attr(ctx->caps, col_matcher); + if (ret) + goto free_col_matcher; + + ret = hws_matcher_create_and_connect(col_matcher); + if (ret) + goto free_col_matcher; + + matcher->col_matcher = col_matcher; + + return 0; + +free_col_matcher: + kfree(col_matcher); + mlx5hws_err(ctx, "Failed to create assured collision matcher\n"); + return ret; +} + +static void +hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher) +{ + if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE || + matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) + return; + + if (matcher->col_matcher) { + hws_matcher_destroy_and_disconnect(matcher->col_matcher); + kfree(matcher->col_matcher); + } +} + +static int hws_matcher_init(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + int ret; + + INIT_LIST_HEAD(&matcher->resize_data); + + mutex_lock(&ctx->ctrl_lock); + + /* Allocate matcher resource and connect to the packet pipe */ + ret = hws_matcher_create_and_connect(matcher); + if (ret) + goto unlock_err; + + /* Create additional matcher for collision handling */ + ret = hws_matcher_create_col_matcher(matcher); + if (ret) + goto destory_and_disconnect; + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +destory_and_disconnect: + hws_matcher_destroy_and_disconnect(matcher); +unlock_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static int hws_matcher_uninit(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + + mutex_lock(&ctx->ctrl_lock); + hws_matcher_destroy_col_matcher(matcher); + hws_matcher_destroy_and_disconnect(matcher); + mutex_unlock(&ctx->ctrl_lock); + + return 0; +} + +int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); + struct mlx5hws_context *ctx = matcher->tbl->ctx; + u32 required_stes; + int ret; + + if (!matcher->attr.max_num_of_at_attach) { + mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n", + matcher->num_of_at); + return -EOPNOTSUPP; + } + + ret = hws_matcher_check_and_process_at(matcher, at); + if (ret) + return -ret; + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) { + mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n", + required_stes, + matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes); + return -ENOMEM; + } + + matcher->at[matcher->num_of_at] = *at; + matcher->num_of_at += 1; + matcher->attr.max_num_of_at_attach -= 1; + + if (matcher->col_matcher) + matcher->col_matcher->num_of_at = matcher->num_of_at; + + return 0; +} + +static int +hws_matcher_set_templates(struct mlx5hws_matcher *matcher, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + int ret = 0; + int i; + + if (!num_of_mt || !num_of_at) { + mlx5hws_err(ctx, "Number of action/match template cannot be zero\n"); + return -EOPNOTSUPP; + } + + matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL); + if (!matcher->mt) + return -ENOMEM; + + matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach, + sizeof(*matcher->at), + GFP_KERNEL); + if (!matcher->at) { + mlx5hws_err(ctx, "Failed to allocate action template array\n"); + ret = -ENOMEM; + goto free_mt; + } + + for (i = 0; i < num_of_mt; i++) + matcher->mt[i] = *mt[i]; + + for (i = 0; i < num_of_at; i++) + matcher->at[i] = *at[i]; + + matcher->num_of_mt = num_of_mt; + matcher->num_of_at = num_of_at; + + return 0; + +free_mt: + kfree(matcher->mt); + return ret; +} + +static void +hws_matcher_unset_templates(struct mlx5hws_matcher *matcher) +{ + kfree(matcher->at); + kfree(matcher->mt); +} + +struct mlx5hws_matcher * +mlx5hws_matcher_create(struct mlx5hws_table *tbl, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at, + struct mlx5hws_matcher_attr *attr) +{ + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *matcher; + int ret; + + matcher = kzalloc(sizeof(*matcher), GFP_KERNEL); + if (!matcher) + return NULL; + + matcher->tbl = tbl; + matcher->attr = *attr; + + ret = hws_matcher_process_attr(tbl->ctx->caps, matcher); + if (ret) + goto free_matcher; + + ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at); + if (ret) + goto free_matcher; + + ret = hws_matcher_init(matcher); + if (ret) { + mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret); + goto unset_templates; + } + + return matcher; + +unset_templates: + hws_matcher_unset_templates(matcher); +free_matcher: + kfree(matcher); + return NULL; +} + +int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher) +{ + hws_matcher_uninit(matcher); + hws_matcher_unset_templates(matcher); + kfree(matcher); + return 0; +} + +struct mlx5hws_match_template * +mlx5hws_match_template_create(struct mlx5hws_context *ctx, + u32 *match_param, + u32 match_param_sz, + u8 match_criteria_enable) +{ + struct mlx5hws_match_template *mt; + + mt = kzalloc(sizeof(*mt), GFP_KERNEL); + if (!mt) + return NULL; + + mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!mt->match_param) + goto free_template; + + memcpy(mt->match_param, match_param, match_param_sz); + mt->match_criteria_enable = match_criteria_enable; + + return mt; + +free_template: + kfree(mt); + return NULL; +} + +int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt) +{ + kfree(mt->match_param); + kfree(mt); + return 0; +} + +static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher) +{ + struct mlx5hws_context *ctx = src_matcher->tbl->ctx; + int i; + + if (src_matcher->tbl->type != dst_matcher->tbl->type) { + mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n"); + return -EINVAL; + } + + if (!mlx5hws_matcher_is_resizable(src_matcher) || + !mlx5hws_matcher_is_resizable(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matcher is not resizable\n"); + return -EINVAL; + } + + if (mlx5hws_matcher_is_insert_by_idx(src_matcher) != + mlx5hws_matcher_is_insert_by_idx(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n"); + return -EINVAL; + } + + if (mlx5hws_matcher_is_in_resize(src_matcher) || + mlx5hws_matcher_is_in_resize(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matcher is already in resize\n"); + return -EINVAL; + } + + /* Compare match templates - make sure the definers are equivalent */ + if (src_matcher->num_of_mt != dst_matcher->num_of_mt) { + mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n"); + return -EINVAL; + } + + if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes > + dst_matcher->action_ste[0].max_stes) { + mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n"); + return -EINVAL; + } + + for (i = 0; i < src_matcher->num_of_mt; i++) { + if (mlx5hws_definer_compare(src_matcher->mt[i].definer, + dst_matcher->mt[i].definer)) { + mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n"); + return -EINVAL; + } + } + + return 0; +} + +int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher) +{ + int ret = 0; + + mutex_lock(&src_matcher->tbl->ctx->ctrl_lock); + + ret = hws_matcher_resize_precheck(src_matcher, dst_matcher); + if (ret) + goto out; + + src_matcher->resize_dst = dst_matcher; + + ret = hws_matcher_resize_init(src_matcher); + if (ret) + src_matcher->resize_dst = NULL; + +out: + mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock); + return ret; +} + +int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = src_matcher->tbl->ctx; + + if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) { + mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n"); + return -EINVAL; + } + + if (unlikely(src_matcher != rule->matcher)) { + mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n"); + return -EINVAL; + } + + return mlx5hws_rule_move_hws_add(rule, attr); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h new file mode 100644 index 000000000000..125391d1a114 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_MATCHER_H_ +#define MLX5HWS_MATCHER_H_ + +/* We calculated that concatenating a collision table to the main table with + * 3% of the main table rows will be enough resources for high insertion + * success probability. + * + * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5 + */ +#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5 +/* Threshold to determine if amount of rules require a collision table */ +#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10 +/* Required depth of an assured collision table */ +#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4 +/* Required depth of the main large table */ +#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2 + +enum mlx5hws_matcher_offset { + MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12, + MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13, +}; + +enum mlx5hws_matcher_flags { + MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2, + MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3, +}; + +struct mlx5hws_match_template { + struct mlx5hws_definer *definer; + struct mlx5hws_definer_fc *fc; + u32 *match_param; + u8 match_criteria_enable; + u16 fc_sz; +}; + +struct mlx5hws_matcher_match_ste { + struct mlx5hws_pool_chunk ste; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; +}; + +struct mlx5hws_matcher_action_ste { + struct mlx5hws_pool_chunk ste; + struct mlx5hws_pool_chunk stc; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; + u8 max_stes; +}; + +struct mlx5hws_matcher_resize_data_node { + struct mlx5hws_pool_chunk stc; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; +}; + +struct mlx5hws_matcher_resize_data { + struct mlx5hws_matcher_resize_data_node action_ste[2]; + u8 max_stes; + struct list_head list_node; +}; + +struct mlx5hws_matcher { + struct mlx5hws_table *tbl; + struct mlx5hws_matcher_attr attr; + struct mlx5hws_match_template *mt; + struct mlx5hws_action_template *at; + u8 num_of_at; + u8 num_of_mt; + /* enum mlx5hws_matcher_flags */ + u8 flags; + u32 end_ft_id; + struct mlx5hws_matcher *col_matcher; + struct mlx5hws_matcher *resize_dst; + struct mlx5hws_matcher_match_ste match_ste; + struct mlx5hws_matcher_action_ste action_ste[2]; + struct list_head list_node; + struct list_head resize_data; +}; + +static inline bool +mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt) +{ + return mlx5hws_definer_is_jumbo(mt->definer); +} + +static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher) +{ + return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE); +} + +static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher) +{ + return !!matcher->resize_dst; +} + +static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher) +{ + return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX; +} + +#endif /* MLX5HWS_MATCHER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c new file mode 100644 index 000000000000..e084a5cbf81f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_data_size_to_arg_log_size(u16 data_size) +{ + /* Return the roundup of log2(data_size) */ + if (data_size <= MLX5HWS_ARG_DATA_SIZE) + return MLX5HWS_ARG_CHUNK_SIZE_1; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2) + return MLX5HWS_ARG_CHUNK_SIZE_2; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4) + return MLX5HWS_ARG_CHUNK_SIZE_3; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8) + return MLX5HWS_ARG_CHUNK_SIZE_4; + + return MLX5HWS_ARG_CHUNK_SIZE_MAX; +} + +u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size) +{ + return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size)); +} + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_get_arg_log_size(u16 num_of_actions) +{ + return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions * + MLX5HWS_MODIFY_ACTION_SIZE); +} + +u32 mlx5hws_arg_get_arg_size(u16 num_of_actions) +{ + return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions)); +} + +bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions) +{ + u16 i, field; + u8 action_id; + + for (i = 0; i < num_of_actions; i++) { + action_id = MLX5_GET(set_action_in, &actions[i], action_type); + + switch (action_id) { + case MLX5_MODIFICATION_TYPE_NOP: + field = MLX5_MODI_OUT_NONE; + break; + + case MLX5_MODIFICATION_TYPE_SET: + case MLX5_MODIFICATION_TYPE_ADD: + field = MLX5_GET(set_action_in, &actions[i], field); + break; + + case MLX5_MODIFICATION_TYPE_COPY: + case MLX5_MODIFICATION_TYPE_ADD_FIELD: + field = MLX5_GET(copy_action_in, &actions[i], dst_field); + break; + + default: + /* Insert/Remove/Unknown actions require reparse */ + return true; + } + + /* Below fields can change packet structure require a reparse */ + if (field == MLX5_MODI_OUT_ETHERTYPE || + field == MLX5_MODI_OUT_IPV6_NEXT_HDR) + return true; + } + + return false; +} + +/* Cache and cache element handling */ +int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache) +{ + struct mlx5hws_pattern_cache *new_cache; + + new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL); + if (!new_cache) + return -ENOMEM; + + INIT_LIST_HEAD(&new_cache->ptrn_list); + mutex_init(&new_cache->lock); + + *cache = new_cache; + + return 0; +} + +void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache) +{ + mutex_destroy(&cache->lock); + kfree(cache); +} + +static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions, + __be64 cur_actions[], + int num_of_actions, + __be64 actions[]) +{ + int i; + + if (cur_num_of_actions != num_of_actions) + return false; + + for (i = 0; i < num_of_actions; i++) { + u8 action_id = + MLX5_GET(set_action_in, &actions[i], action_type); + + if (action_id == MLX5_MODIFICATION_TYPE_COPY || + action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) { + if (actions[i] != cur_actions[i]) + return false; + } else { + /* Compare just the control, not the values */ + if ((__force __be32)actions[i] != + (__force __be32)cur_actions[i]) + return false; + } + } + + return true; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pat = NULL; + + list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) { + if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions, + (__be64 *)cached_pat->mh_data.data, + num_of_actions, + actions)) + return cached_pat; + } + + return NULL; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pattern; + + cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions); + if (cached_pattern) { + /* LRU: move it to be first in the list */ + list_del_init(&cached_pattern->ptrn_list_node); + list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list); + cached_pattern->refcount++; + } + + return cached_pattern; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache, + u32 pattern_id, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pattern; + + cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL); + if (!cached_pattern) + return NULL; + + cached_pattern->mh_data.num_of_actions = num_of_actions; + cached_pattern->mh_data.pattern_id = pattern_id; + cached_pattern->mh_data.data = + kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); + if (!cached_pattern->mh_data.data) + goto free_cached_obj; + + list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list); + cached_pattern->refcount = 1; + + return cached_pattern; + +free_cached_obj: + kfree(cached_pattern); + return NULL; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache, + u32 ptrn_id) +{ + struct mlx5hws_pattern_cache_item *cached_pattern = NULL; + + list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) { + if (cached_pattern->mh_data.pattern_id == ptrn_id) + return cached_pattern; + } + + return NULL; +} + +static void +mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern) +{ + list_del_init(&cached_pattern->ptrn_list_node); + + kfree(cached_pattern->mh_data.data); + kfree(cached_pattern); +} + +void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id) +{ + struct mlx5hws_pattern_cache *cache = ctx->pattern_cache; + struct mlx5hws_pattern_cache_item *cached_pattern; + + mutex_lock(&cache->lock); + cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id); + if (!cached_pattern) { + mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n"); + pr_warn("HWS: pattern ID %d is not found\n", ptrn_id); + goto out; + } + + if (--cached_pattern->refcount) + goto out; + + mlx5hws_pat_remove_pattern(cached_pattern); + mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id); + +out: + mutex_unlock(&cache->lock); +} + +int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, + __be64 *pattern, size_t pattern_sz, + u32 *pattern_id) +{ + u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE; + struct mlx5hws_pattern_cache_item *cached_pattern; + u32 ptrn_id = 0; + int ret = 0; + + mutex_lock(&ctx->pattern_cache->lock); + + cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache, + num_of_actions, + pattern); + if (cached_pattern) { + *pattern_id = cached_pattern->mh_data.pattern_id; + goto out_unlock; + } + + ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev, + pattern_sz, + (u8 *)pattern, + &ptrn_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create pattern FW object\n"); + goto out_unlock; + } + + cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache, + ptrn_id, + num_of_actions, + pattern); + if (!cached_pattern) { + mlx5hws_err(ctx, "Failed to add pattern to cache\n"); + ret = -EINVAL; + goto clean_pattern; + } + + mutex_unlock(&ctx->pattern_cache->lock); + *pattern_id = ptrn_id; + + return ret; + +clean_pattern: + mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id); +out_unlock: + mutex_unlock(&ctx->pattern_cache->lock); + return ret; +} + +static void +mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr, + void *comp_data, + u32 arg_idx) +{ + send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG; + send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + send_attr->id = arg_idx; + send_attr->user_data = comp_data; +} + +void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL; + struct mlx5hws_send_engine_post_ctrl ctrl; + size_t wqe_len; + + mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx); + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg, + num_of_actions); + mlx5hws_send_engine_post_end(&ctrl, &send_attr); +} + +void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, + void *comp_data, + u32 arg_idx, + u8 *arg_data, + size_t data_size) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg; + struct mlx5hws_send_engine_post_ctrl ctrl; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + int i, full_iter, leftover; + size_t wqe_len; + + mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx); + + /* Each WQE can hold 64B of data, it might require multiple iteration */ + full_iter = data_size / MLX5HWS_ARG_DATA_SIZE; + leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1); + + for (i = 0; i < full_iter; i++) { + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + memcpy(wqe_arg, arg_data, wqe_len); + send_attr.id = arg_idx++; + mlx5hws_send_engine_post_end(&ctrl, &send_attr); + + /* Move to next argument data */ + arg_data += MLX5HWS_ARG_DATA_SIZE; + } + + if (leftover) { + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + memcpy(wqe_arg, arg_data, leftover); + send_attr.id = arg_idx; + mlx5hws_send_engine_post_end(&ctrl, &send_attr); + } +} + +int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, + u32 arg_idx, + u8 *arg_data, + size_t data_size) +{ + struct mlx5hws_send_engine *queue; + int ret; + + mutex_lock(&ctx->ctrl_lock); + + /* Get the control queue */ + queue = &ctx->send_queue[ctx->queues - 1]; + + mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size); + + mlx5hws_send_engine_flush_queue(queue); + + /* Poll for completion */ + ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + + if (ret) + mlx5hws_err(ctx, "Failed to drain arg queue\n"); + + mutex_unlock(&ctx->ctrl_lock); + + return ret; +} + +bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx, + u32 arg_size) +{ + if (arg_size < ctx->caps->log_header_modify_argument_granularity || + arg_size > ctx->caps->log_header_modify_argument_max_alloc) { + return false; + } + return true; +} + +int mlx5hws_arg_create(struct mlx5hws_context *ctx, + u8 *data, + size_t data_sz, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id) +{ + u16 single_arg_log_sz; + u16 multi_arg_log_sz; + int ret; + u32 id; + + single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz); + multi_arg_log_sz = single_arg_log_sz + log_bulk_sz; + + if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) { + mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz); + return -EOPNOTSUPP; + } + + if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) { + mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz); + return -EOPNOTSUPP; + } + + /* Alloc bulk of args */ + ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id); + if (ret) { + mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz); + return ret; + } + + if (write_data) { + ret = mlx5hws_arg_write_inline_arg_data(ctx, id, + data, data_sz); + if (ret) { + mlx5hws_err(ctx, "Failed writing arg data\n"); + mlx5hws_cmd_arg_destroy(ctx->mdev, id); + return ret; + } + } + + *arg_id = id; + return ret; +} + +void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id) +{ + mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id); +} + +int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx, + __be64 *data, + u8 num_of_actions, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id) +{ + size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE; + int ret; + + ret = mlx5hws_arg_create(ctx, + (u8 *)data, + data_sz, + log_bulk_sz, + write_data, + arg_id); + if (ret) + mlx5hws_err(ctx, "Failed creating modify header arg\n"); + + return ret; +} + +static int +hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern) +{ + /* Need to check field limitation here, but for now - return OK */ + return 0; +} + +#define INVALID_FIELD 0xffff + +static void +hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern, + u16 *src_field, u16 *dst_field) +{ + switch (action_type) { + case MLX5_ACTION_TYPE_SET: + case MLX5_ACTION_TYPE_ADD: + *src_field = MLX5_GET(set_action_in, pattern, field); + *dst_field = INVALID_FIELD; + break; + case MLX5_ACTION_TYPE_COPY: + *src_field = MLX5_GET(copy_action_in, pattern, src_field); + *dst_field = MLX5_GET(copy_action_in, pattern, dst_field); + break; + default: + pr_warn("HWS: invalid modify header action type %d\n", action_type); + } +} + +bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz) +{ + size_t i; + + for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) { + u8 action_type = + MLX5_GET(set_action_in, &pattern[i], action_type); + if (action_type >= MLX5_MODIFICATION_TYPE_MAX) { + mlx5hws_err(ctx, "Unsupported action id %d\n", action_type); + return false; + } + if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) { + mlx5hws_err(ctx, "Unsupported action number %zu\n", i); + return false; + } + } + + return true; +} + +void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nope_location, __be64 *new_pat) +{ + u16 prev_src_field = 0, prev_dst_field = 0; + u16 src_field, dst_field; + u8 action_type; + size_t i, j; + + *new_size = num_actions; + *nope_location = 0; + + if (num_actions == 1) + return; + + for (i = 0, j = 0; i < num_actions; i++, j++) { + action_type = MLX5_GET(set_action_in, &pattern[i], action_type); + + hws_action_modify_get_target_fields(action_type, &pattern[i], + &src_field, &dst_field); + if (i % 2) { + if (action_type == MLX5_ACTION_TYPE_COPY && + (prev_src_field == src_field || + prev_dst_field == dst_field)) { + /* need Nope */ + *new_size += 1; + *nope_location |= BIT(i); + memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); + MLX5_SET(set_action_in, &new_pat[j], + action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + } else if (prev_src_field == src_field) { + /* need Nope*/ + *new_size += 1; + *nope_location |= BIT(i); + MLX5_SET(set_action_in, &new_pat[j], + action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + } + } + memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); + /* check if no more space */ + if (j > max_actions) { + *new_size = num_actions; + *nope_location = 0; + return; + } + + prev_src_field = src_field; + prev_dst_field = dst_field; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h new file mode 100644 index 000000000000..27ca93385b08 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_PAT_ARG_H_ +#define MLX5HWS_PAT_ARG_H_ + +/* Modify-header arg pool */ +enum mlx5hws_arg_chunk_size { + MLX5HWS_ARG_CHUNK_SIZE_1, + /* Keep MIN updated when changing */ + MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1, + MLX5HWS_ARG_CHUNK_SIZE_2, + MLX5HWS_ARG_CHUNK_SIZE_3, + MLX5HWS_ARG_CHUNK_SIZE_4, + MLX5HWS_ARG_CHUNK_SIZE_MAX, +}; + +enum { + MLX5HWS_MODIFY_ACTION_SIZE = 8, + MLX5HWS_ARG_DATA_SIZE = 64, +}; + +struct mlx5hws_pattern_cache { + struct mutex lock; /* Protect pattern list */ + struct list_head ptrn_list; +}; + +struct mlx5hws_pattern_cache_item { + struct { + u32 pattern_id; + u8 *data; + u16 num_of_actions; + } mh_data; + u32 refcount; + struct list_head ptrn_list_node; +}; + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_get_arg_log_size(u16 num_of_actions); + +u32 mlx5hws_arg_get_arg_size(u16 num_of_actions); + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_data_size_to_arg_log_size(u16 data_size); + +u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size); + +int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache); + +void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache); + +bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz); + +int mlx5hws_arg_create(struct mlx5hws_context *ctx, + u8 *data, + size_t data_sz, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id); + +void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id); + +int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx, + __be64 *data, + u8 num_of_actions, + u32 log_bulk_sz, + bool write_data, + u32 *modify_hdr_arg_id); + +int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, + __be64 *pattern, + size_t pattern_sz, + u32 *ptrn_id); + +void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, + u32 ptrn_id); + +bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx, + u32 arg_size); + +bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions); + +void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, + void *comp_data, + u32 arg_idx, + u8 *arg_data, + size_t data_size); + +void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions); + +int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, + u32 arg_idx, + u8 *arg_data, + size_t data_size); + +void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions, + size_t *new_size, u32 *nope_location, __be64 *new_pat); +#endif /* MLX5HWS_PAT_ARG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c new file mode 100644 index 000000000000..a8a63e3278be --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "mlx5hws_buddy.h" + +static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource) +{ + switch (resource->pool->type) { + case MLX5HWS_POOL_TYPE_STE: + mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); + break; + case MLX5HWS_POOL_TYPE_STC: + mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); + break; + default: + break; + } + + kfree(resource); +} + +static void hws_pool_resource_free(struct mlx5hws_pool *pool, + int resource_idx) +{ + hws_pool_free_one_resource(pool->resource[resource_idx]); + pool->resource[resource_idx] = NULL; + + if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { + hws_pool_free_one_resource(pool->mirror_resource[resource_idx]); + pool->mirror_resource[resource_idx] = NULL; + } +} + +static struct mlx5hws_pool_resource * +hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, + u32 fw_ft_type) +{ + struct mlx5hws_cmd_ste_create_attr ste_attr; + struct mlx5hws_cmd_stc_create_attr stc_attr; + struct mlx5hws_pool_resource *resource; + u32 obj_id = 0; + int ret; + + resource = kzalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + return NULL; + + switch (pool->type) { + case MLX5HWS_POOL_TYPE_STE: + ste_attr.log_obj_range = log_range; + ste_attr.table_type = fw_ft_type; + ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id); + break; + case MLX5HWS_POOL_TYPE_STC: + stc_attr.log_obj_range = log_range; + stc_attr.table_type = fw_ft_type; + ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id); + break; + default: + ret = -EINVAL; + } + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n"); + goto free_resource; + } + + resource->pool = pool; + resource->range = 1 << log_range; + resource->base_id = obj_id; + + return resource; + +free_resource: + kfree(resource); + return NULL; +} + +static int +hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx) +{ + struct mlx5hws_pool_resource *resource; + u32 fw_ft_type, opt_log_range; + + fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false); + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range; + resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); + if (!resource) { + mlx5hws_err(pool->ctx, "Failed allocating resource\n"); + return -EINVAL; + } + + pool->resource[idx] = resource; + + if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { + struct mlx5hws_pool_resource *mirror_resource; + + fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true); + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range; + mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); + if (!mirror_resource) { + mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n"); + hws_pool_free_one_resource(resource); + pool->resource[idx] = NULL; + return -EINVAL; + } + pool->mirror_resource[idx] = mirror_resource; + } + + return 0; +} + +static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range) +{ + unsigned long *cur_bmp; + + cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL); + if (!cur_bmp) + return NULL; + + bitmap_fill(cur_bmp, 1 << log_range); + + return cur_bmp; +} + +static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + struct mlx5hws_buddy_mem *buddy; + + buddy = pool->db.buddy_manager->buddies[chunk->resource_idx]; + if (!buddy) { + mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx); + return; + } + + mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); +} + +static struct mlx5hws_buddy_mem * +hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx, + u32 order, bool *is_new_buddy) +{ + static struct mlx5hws_buddy_mem *buddy; + u32 new_buddy_size; + + buddy = pool->db.buddy_manager->buddies[idx]; + if (buddy) + return buddy; + + new_buddy_size = max(pool->alloc_log_sz, order); + *is_new_buddy = true; + buddy = mlx5hws_buddy_create(new_buddy_size); + if (!buddy) { + mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n", + new_buddy_size, idx); + return NULL; + } + + if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, new_buddy_size, idx); + mlx5hws_buddy_cleanup(buddy); + return NULL; + } + + pool->db.buddy_manager->buddies[idx] = buddy; + + return buddy; +} + +static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool, + int order, + u32 *buddy_idx, + int *seg) +{ + struct mlx5hws_buddy_mem *buddy; + bool new_mem = false; + int ret = 0; + int i; + + *seg = -1; + + /* Find the next free place from the buddy array */ + while (*seg == -1) { + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + buddy = hws_pool_buddy_get_next_buddy(pool, i, + order, + &new_mem); + if (!buddy) { + ret = -ENOMEM; + goto out; + } + + *seg = mlx5hws_buddy_alloc_mem(buddy, order); + if (*seg != -1) + goto found; + + if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) { + mlx5hws_err(pool->ctx, + "Fail to allocate seg for one resource pool\n"); + ret = -ENOMEM; + goto out; + } + + if (new_mem) { + /* We have new memory pool, should be place for us */ + mlx5hws_err(pool->ctx, + "No memory for order: %d with buddy no: %d\n", + order, i); + ret = -ENOMEM; + goto out; + } + } + } + +found: + *buddy_idx = i; +out: + return ret; +} + +static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret = 0; + + /* Go over the buddies and find next free slot */ + ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool) +{ + struct mlx5hws_buddy_mem *buddy; + int i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + buddy = pool->db.buddy_manager->buddies[i]; + if (buddy) { + mlx5hws_buddy_cleanup(buddy); + kfree(buddy); + pool->db.buddy_manager->buddies[i] = NULL; + } + } + + kfree(pool->db.buddy_manager); +} + +static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range) +{ + pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL); + if (!pool->db.buddy_manager) + return -ENOMEM; + + if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) { + bool new_buddy; + + if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) { + mlx5hws_err(pool->ctx, + "Failed allocating memory on create log_sz: %d\n", log_range); + kfree(pool->db.buddy_manager); + return -ENOMEM; + } + } + + pool->p_db_uninit = &hws_pool_buddy_db_uninit; + pool->p_get_chunk = &hws_pool_buddy_db_get_chunk; + pool->p_put_chunk = &hws_pool_buddy_db_put_chunk; + + return 0; +} + +static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool, + u32 alloc_size, int idx) +{ + int ret = hws_pool_resource_alloc(pool, alloc_size, idx); + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + return ret; + } + + return 0; +} + +static struct mlx5hws_pool_elements * +hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx) +{ + struct mlx5hws_pool_elements *elem; + u32 alloc_size; + + alloc_size = pool->alloc_log_sz; + + elem = kzalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) + return NULL; + + /* Sharing the same resource, also means that all the elements are with size 1 */ + if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) && + !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) { + /* Currently all chunks in size 1 */ + elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order); + if (!elem->bitmap) { + mlx5hws_err(pool->ctx, + "Failed to create bitmap type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + goto free_elem; + } + + elem->log_size = alloc_size - order; + } + + if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + goto free_db; + } + + pool->db.element_manager->elements[idx] = elem; + + return elem; + +free_db: + bitmap_free(elem->bitmap); +free_elem: + kfree(elem); + return NULL; +} + +static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg) +{ + unsigned int segment, size; + + size = 1 << elem->log_size; + + segment = find_first_bit(elem->bitmap, size); + if (segment >= size) { + elem->is_full = true; + return -ENOMEM; + } + + bitmap_clear(elem->bitmap, segment, 1); + *seg = segment; + return 0; +} + +static int +hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, + u32 *idx, int *seg) +{ + struct mlx5hws_pool_elements *elem; + + elem = pool->db.element_manager->elements[0]; + if (!elem) + elem = hws_pool_element_create_new_elem(pool, order, 0); + if (!elem) + goto err_no_elem; + + if (hws_pool_element_find_seg(elem, seg) != 0) { + mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); + return -ENOMEM; + } + + *idx = 0; + elem->num_of_elements++; + return 0; + +err_no_elem: + mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); + return -ENOMEM; +} + +static int +hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, + u32 *idx, int *seg) +{ + int ret, i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + if (!pool->resource[i]) { + ret = hws_pool_create_resource_on_index(pool, order, i); + if (ret) + goto err_no_res; + *idx = i; + *seg = 0; /* One memory slot in that element */ + return 0; + } + } + + mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); + return -ENOMEM; + +err_no_res: + mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); + return -ENOMEM; +} + +static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret; + + /* Go over all memory elements and find/allocate free slot */ + ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + if (unlikely(!pool->resource[chunk->resource_idx])) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE) + hws_pool_resource_free(pool, chunk->resource_idx); +} + +static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool) +{ + (void)pool; +} + +/* This memory management works as the following: + * - At start doesn't allocate no mem at all. + * - When new request for chunk arrived: + * allocate resource and give it. + * - When free that chunk: + * the resource is freed. + */ +static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool) +{ + pool->p_db_uninit = &hws_pool_general_element_db_uninit; + pool->p_get_chunk = &hws_pool_general_element_db_get_chunk; + pool->p_put_chunk = &hws_pool_general_element_db_put_chunk; + + return 0; +} + +static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool, + struct mlx5hws_pool_elements *elem, + struct mlx5hws_pool_chunk *chunk) +{ + if (unlikely(!pool->resource[chunk->resource_idx])) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + hws_pool_resource_free(pool, chunk->resource_idx); + kfree(elem); + pool->db.element_manager->elements[chunk->resource_idx] = NULL; +} + +static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + struct mlx5hws_pool_elements *elem; + + if (unlikely(chunk->resource_idx)) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + elem = pool->db.element_manager->elements[chunk->resource_idx]; + if (!elem) { + mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx); + return; + } + + bitmap_set(elem->bitmap, chunk->offset, 1); + elem->is_full = false; + elem->num_of_elements--; + + if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE && + !elem->num_of_elements) + hws_onesize_element_db_destroy_element(pool, elem, chunk); +} + +static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret = 0; + + /* Go over all memory elements and find/allocate free slot */ + ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool) +{ + struct mlx5hws_pool_elements *elem; + int i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + elem = pool->db.element_manager->elements[i]; + if (elem) { + bitmap_free(elem->bitmap); + kfree(elem); + pool->db.element_manager->elements[i] = NULL; + } + } + kfree(pool->db.element_manager); +} + +/* This memory management works as the following: + * - At start doesn't allocate no mem at all. + * - When new request for chunk arrived: + * aloocate the first and only slot of memory/resource + * when it ended return error. + */ +static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool) +{ + pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL); + if (!pool->db.element_manager) + return -ENOMEM; + + pool->p_db_uninit = &hws_onesize_element_db_uninit; + pool->p_get_chunk = &hws_onesize_element_db_get_chunk; + pool->p_put_chunk = &hws_onesize_element_db_put_chunk; + + return 0; +} + +static int hws_pool_db_init(struct mlx5hws_pool *pool, + enum mlx5hws_db_type db_type) +{ + int ret; + + if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE) + ret = hws_pool_general_element_db_init(pool); + else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE) + ret = hws_pool_onesize_element_db_init(pool); + else + ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz); + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret); + return ret; + } + + return 0; +} + +static void hws_pool_db_unint(struct mlx5hws_pool *pool) +{ + pool->p_db_uninit(pool); +} + +int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret; + + mutex_lock(&pool->lock); + ret = pool->p_get_chunk(pool, chunk); + mutex_unlock(&pool->lock); + + return ret; +} + +void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + mutex_lock(&pool->lock); + pool->p_put_chunk(pool, chunk); + mutex_unlock(&pool->lock); +} + +struct mlx5hws_pool * +mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr) +{ + enum mlx5hws_db_type res_db_type; + struct mlx5hws_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->ctx = ctx; + pool->type = pool_attr->pool_type; + pool->alloc_log_sz = pool_attr->alloc_log_sz; + pool->flags = pool_attr->flags; + pool->tbl_type = pool_attr->table_type; + pool->opt_type = pool_attr->opt_type; + + /* Support general db */ + if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) + res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE; + else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS)) + res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE; + else + res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY; + + pool->alloc_log_sz = pool_attr->alloc_log_sz; + + if (hws_pool_db_init(pool, res_db_type)) + goto free_pool; + + mutex_init(&pool->lock); + + return pool; + +free_pool: + kfree(pool); + return NULL; +} + +int mlx5hws_pool_destroy(struct mlx5hws_pool *pool) +{ + int i; + + mutex_destroy(&pool->lock); + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) + if (pool->resource[i]) + hws_pool_resource_free(pool, i); + + hws_pool_db_unint(pool); + + kfree(pool); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h new file mode 100644 index 000000000000..621298b352b2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_POOL_H_ +#define MLX5HWS_POOL_H_ + +#define MLX5HWS_POOL_STC_LOG_SZ 15 + +#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100 + +enum mlx5hws_pool_type { + MLX5HWS_POOL_TYPE_STE, + MLX5HWS_POOL_TYPE_STC, +}; + +struct mlx5hws_pool_chunk { + u32 resource_idx; + /* Internal offset, relative to base index */ + int offset; + int order; +}; + +struct mlx5hws_pool_resource { + struct mlx5hws_pool *pool; + u32 base_id; + u32 range; +}; + +enum mlx5hws_pool_flags { + /* Only a one resource in that pool */ + MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0, + MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1, + /* No sharing resources between chunks */ + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2, + /* All objects are in the same size */ + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3, + /* Managed by buddy allocator */ + MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4, + /* Allocate pool_type memory on pool creation */ + MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5, + + /* These values should be used by the caller */ + MLX5HWS_POOL_FLAGS_FOR_STC_POOL = + MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS, + MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL = + MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK, + MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL = + MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_BUDDY_MANAGED | + MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE, +}; + +enum mlx5hws_pool_optimize { + MLX5HWS_POOL_OPTIMIZE_NONE = 0x0, + MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1, + MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2, +}; + +struct mlx5hws_pool_attr { + enum mlx5hws_pool_type pool_type; + enum mlx5hws_table_type table_type; + enum mlx5hws_pool_flags flags; + enum mlx5hws_pool_optimize opt_type; + /* Allocation size once memory is depleted */ + size_t alloc_log_sz; +}; + +enum mlx5hws_db_type { + /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/ + MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE, + /* One resource only, all the elements are with same one size */ + MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE, + /* Many resources, the memory allocated with buddy mechanism */ + MLX5HWS_POOL_DB_TYPE_BUDDY, +}; + +struct mlx5hws_buddy_manager { + struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ]; +}; + +struct mlx5hws_pool_elements { + u32 num_of_elements; + unsigned long *bitmap; + u32 log_size; + bool is_full; +}; + +struct mlx5hws_element_manager { + struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ]; +}; + +struct mlx5hws_pool_db { + enum mlx5hws_db_type type; + union { + struct mlx5hws_element_manager *element_manager; + struct mlx5hws_buddy_manager *buddy_manager; + }; +}; + +typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); +typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); +typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool); + +struct mlx5hws_pool { + struct mlx5hws_context *ctx; + enum mlx5hws_pool_type type; + enum mlx5hws_pool_flags flags; + struct mutex lock; /* protect the pool */ + size_t alloc_log_sz; + enum mlx5hws_table_type tbl_type; + enum mlx5hws_pool_optimize opt_type; + struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; + struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; + /* DB */ + struct mlx5hws_pool_db db; + /* Functions */ + mlx5hws_pool_unint_db p_db_uninit; + mlx5hws_pool_db_get_chunk p_get_chunk; + mlx5hws_pool_db_put_chunk p_put_chunk; +}; + +struct mlx5hws_pool * +mlx5hws_pool_create(struct mlx5hws_context *ctx, + struct mlx5hws_pool_attr *pool_attr); + +int mlx5hws_pool_destroy(struct mlx5hws_pool *pool); + +int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); + +void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); + +static inline u32 +mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + return pool->resource[chunk->resource_idx]->base_id; +} + +static inline u32 +mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + return pool->mirror_resource[chunk->resource_idx]->base_id; +} +#endif /* MLX5HWS_POOL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h new file mode 100644 index 000000000000..de92cecbeb92 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h @@ -0,0 +1,514 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5_PRM_H_ +#define MLX5_PRM_H_ + +#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512 + +/* Action type of header modification. */ +enum { + MLX5_MODIFICATION_TYPE_SET = 0x1, + MLX5_MODIFICATION_TYPE_ADD = 0x2, + MLX5_MODIFICATION_TYPE_COPY = 0x3, + MLX5_MODIFICATION_TYPE_INSERT = 0x4, + MLX5_MODIFICATION_TYPE_REMOVE = 0x5, + MLX5_MODIFICATION_TYPE_NOP = 0x6, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7, + MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8, + MLX5_MODIFICATION_TYPE_MAX, +}; + +/* The field of packet to be modified. */ +enum mlx5_modification_field { + MLX5_MODI_OUT_NONE = -1, + MLX5_MODI_OUT_SMAC_47_16 = 1, + MLX5_MODI_OUT_SMAC_15_0, + MLX5_MODI_OUT_ETHERTYPE, + MLX5_MODI_OUT_DMAC_47_16, + MLX5_MODI_OUT_DMAC_15_0, + MLX5_MODI_OUT_IP_DSCP, + MLX5_MODI_OUT_TCP_FLAGS, + MLX5_MODI_OUT_TCP_SPORT, + MLX5_MODI_OUT_TCP_DPORT, + MLX5_MODI_OUT_IPV4_TTL, + MLX5_MODI_OUT_UDP_SPORT, + MLX5_MODI_OUT_UDP_DPORT, + MLX5_MODI_OUT_SIPV6_127_96, + MLX5_MODI_OUT_SIPV6_95_64, + MLX5_MODI_OUT_SIPV6_63_32, + MLX5_MODI_OUT_SIPV6_31_0, + MLX5_MODI_OUT_DIPV6_127_96, + MLX5_MODI_OUT_DIPV6_95_64, + MLX5_MODI_OUT_DIPV6_63_32, + MLX5_MODI_OUT_DIPV6_31_0, + MLX5_MODI_OUT_SIPV4, + MLX5_MODI_OUT_DIPV4, + MLX5_MODI_OUT_FIRST_VID, + MLX5_MODI_IN_SMAC_47_16 = 0x31, + MLX5_MODI_IN_SMAC_15_0, + MLX5_MODI_IN_ETHERTYPE, + MLX5_MODI_IN_DMAC_47_16, + MLX5_MODI_IN_DMAC_15_0, + MLX5_MODI_IN_IP_DSCP, + MLX5_MODI_IN_TCP_FLAGS, + MLX5_MODI_IN_TCP_SPORT, + MLX5_MODI_IN_TCP_DPORT, + MLX5_MODI_IN_IPV4_TTL, + MLX5_MODI_IN_UDP_SPORT, + MLX5_MODI_IN_UDP_DPORT, + MLX5_MODI_IN_SIPV6_127_96, + MLX5_MODI_IN_SIPV6_95_64, + MLX5_MODI_IN_SIPV6_63_32, + MLX5_MODI_IN_SIPV6_31_0, + MLX5_MODI_IN_DIPV6_127_96, + MLX5_MODI_IN_DIPV6_95_64, + MLX5_MODI_IN_DIPV6_63_32, + MLX5_MODI_IN_DIPV6_31_0, + MLX5_MODI_IN_SIPV4, + MLX5_MODI_IN_DIPV4, + MLX5_MODI_OUT_IPV6_HOPLIMIT, + MLX5_MODI_IN_IPV6_HOPLIMIT, + MLX5_MODI_META_DATA_REG_A, + MLX5_MODI_META_DATA_REG_B = 0x50, + MLX5_MODI_META_REG_C_0, + MLX5_MODI_META_REG_C_1, + MLX5_MODI_META_REG_C_2, + MLX5_MODI_META_REG_C_3, + MLX5_MODI_META_REG_C_4, + MLX5_MODI_META_REG_C_5, + MLX5_MODI_META_REG_C_6, + MLX5_MODI_META_REG_C_7, + MLX5_MODI_OUT_TCP_SEQ_NUM, + MLX5_MODI_IN_TCP_SEQ_NUM, + MLX5_MODI_OUT_TCP_ACK_NUM, + MLX5_MODI_IN_TCP_ACK_NUM = 0x5C, + MLX5_MODI_GTP_TEID = 0x6E, + MLX5_MODI_OUT_IP_ECN = 0x73, + MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75, + MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76, + MLX5_MODI_HASH_RESULT = 0x81, + MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a, + MLX5_MODI_IN_MPLS_LABEL_1, + MLX5_MODI_IN_MPLS_LABEL_2, + MLX5_MODI_IN_MPLS_LABEL_3, + MLX5_MODI_IN_MPLS_LABEL_4, + MLX5_MODI_OUT_IP_PROTOCOL = 0x4A, + MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A, + MLX5_MODI_META_REG_C_8 = 0x8F, + MLX5_MODI_META_REG_C_9 = 0x90, + MLX5_MODI_META_REG_C_10 = 0x91, + MLX5_MODI_META_REG_C_11 = 0x92, + MLX5_MODI_META_REG_C_12 = 0x93, + MLX5_MODI_META_REG_C_13 = 0x94, + MLX5_MODI_META_REG_C_14 = 0x95, + MLX5_MODI_META_REG_C_15 = 0x96, + MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D, + MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E, + MLX5_MODI_OUT_IPV4_IHL = 0x11F, + MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120, + MLX5_MODI_OUT_ESP_SPI = 0x5E, + MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82, + MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126, + MLX5_MODI_INVALID = INT_MAX, +}; + +enum { + MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1, + MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1, + MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1, + MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1, + MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1, +}; + +enum mlx5_ifc_rtc_update_mode { + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0, + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1, +}; + +enum mlx5_ifc_rtc_access_mode { + MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0, + MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1, +}; + +enum mlx5_ifc_rtc_ste_format { + MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4, + MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5, + MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7, +}; + +enum mlx5_ifc_rtc_reparse_mode { + MLX5_IFC_RTC_REPARSE_NEVER = 0x0, + MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1, + MLX5_IFC_RTC_REPARSE_BY_STC = 0x2, +}; + +#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16 + +struct mlx5_ifc_rtc_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x40]; + u8 update_index_mode[0x2]; + u8 reparse_mode[0x2]; + u8 num_match_ste[0x4]; + u8 pd[0x18]; + u8 reserved_at_a0[0x9]; + u8 access_index_mode[0x3]; + u8 num_hash_definer[0x4]; + u8 update_method[0x1]; + u8 reserved_at_b1[0x2]; + u8 log_depth[0x5]; + u8 log_hash_size[0x8]; + u8 ste_format_0[0x8]; + u8 table_type[0x8]; + u8 ste_format_1[0x8]; + u8 reserved_at_d8[0x8]; + u8 match_definer_0[0x20]; + u8 stc_id[0x20]; + u8 ste_table_base_id[0x20]; + u8 ste_table_offset[0x20]; + u8 reserved_at_160[0x8]; + u8 miss_flow_table_id[0x18]; + u8 match_definer_1[0x20]; + u8 reserved_at_1a0[0x260]; +}; + +enum mlx5_ifc_stc_action_type { + MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00, + MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05, + MLX5_IFC_STC_ACTION_TYPE_SET = 0x06, + MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07, + MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08, + MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09, + MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b, + MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c, + MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e, + MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10, + MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11, + MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12, + MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13, + MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14, + MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82, + MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83, + MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86, +}; + +enum mlx5_ifc_stc_reparse_mode { + MLX5_IFC_STC_REPARSE_IGNORE = 0x0, + MLX5_IFC_STC_REPARSE_NEVER = 0x1, + MLX5_IFC_STC_REPARSE_ALWAYS = 0x2, +}; + +struct mlx5_ifc_stc_ste_param_ste_table_bits { + u8 ste_obj_id[0x20]; + u8 match_definer_id[0x20]; + u8 reserved_at_40[0x3]; + u8 log_hash_size[0x5]; + u8 reserved_at_48[0x38]; +}; + +struct mlx5_ifc_stc_ste_param_tir_bits { + u8 reserved_at_0[0x8]; + u8 tirn[0x18]; + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_stc_ste_param_table_bits { + u8 reserved_at_0[0x8]; + u8 table_id[0x18]; + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_stc_ste_param_flow_counter_bits { + u8 flow_counter_id[0x20]; +}; + +enum { + MLX5_ASO_CT_NUM_PER_OBJ = 1, + MLX5_ASO_METER_NUM_PER_OBJ = 2, + MLX5_ASO_IPSEC_NUM_PER_OBJ = 1, + MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512, +}; + +struct mlx5_ifc_stc_ste_param_execute_aso_bits { + u8 aso_object_id[0x20]; + u8 return_reg_id[0x4]; + u8 aso_type[0x4]; + u8 reserved_at_28[0x18]; +}; + +struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits { + u8 ipsec_object_id[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits { + u8 ipsec_object_id[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_trailer_bits { + u8 reserved_at_0[0x8]; + u8 command[0x4]; + u8 reserved_at_c[0x2]; + u8 type[0x2]; + u8 reserved_at_10[0xa]; + u8 length[0x6]; +}; + +struct mlx5_ifc_stc_ste_param_header_modify_list_bits { + u8 header_modify_pattern_id[0x20]; + u8 header_modify_argument_id[0x20]; +}; + +enum mlx5_ifc_header_anchors { + MLX5_HEADER_ANCHOR_PACKET_START = 0x0, + MLX5_HEADER_ANCHOR_MAC = 0x1, + MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2, + MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07, + MLX5_HEADER_ANCHOR_ESP = 0x08, + MLX5_HEADER_ANCHOR_TCP_UDP = 0x09, + MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a, + MLX5_HEADER_ANCHOR_INNER_MAC = 0x13, + MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, + MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a, + MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b, + MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c +}; + +struct mlx5_ifc_stc_ste_param_remove_bits { + u8 action_type[0x4]; + u8 decap[0x1]; + u8 reserved_at_5[0x5]; + u8 remove_start_anchor[0x6]; + u8 reserved_at_10[0x2]; + u8 remove_end_anchor[0x6]; + u8 reserved_at_18[0x8]; +}; + +struct mlx5_ifc_stc_ste_param_remove_words_bits { + u8 action_type[0x4]; + u8 reserved_at_4[0x6]; + u8 remove_start_anchor[0x6]; + u8 reserved_at_10[0x1]; + u8 remove_offset[0x7]; + u8 reserved_at_18[0x2]; + u8 remove_size[0x6]; +}; + +struct mlx5_ifc_stc_ste_param_insert_bits { + u8 action_type[0x4]; + u8 encap[0x1]; + u8 inline_data[0x1]; + u8 reserved_at_6[0x4]; + u8 insert_anchor[0x6]; + u8 reserved_at_10[0x1]; + u8 insert_offset[0x7]; + u8 reserved_at_18[0x1]; + u8 insert_size[0x7]; + u8 insert_argument[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_vport_bits { + u8 eswitch_owner_vhca_id[0x10]; + u8 vport_number[0x10]; + u8 eswitch_owner_vhca_id_valid[0x1]; + u8 reserved_at_21[0x5f]; +}; + +union mlx5_ifc_stc_param_bits { + struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table; + struct mlx5_ifc_stc_ste_param_tir_bits tir; + struct mlx5_ifc_stc_ste_param_table_bits table; + struct mlx5_ifc_stc_ste_param_flow_counter_bits counter; + struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header; + struct mlx5_ifc_stc_ste_param_execute_aso_bits aso; + struct mlx5_ifc_stc_ste_param_remove_bits remove_header; + struct mlx5_ifc_stc_ste_param_insert_bits insert_header; + struct mlx5_ifc_set_action_in_bits add; + struct mlx5_ifc_set_action_in_bits set; + struct mlx5_ifc_copy_action_in_bits copy; + struct mlx5_ifc_stc_ste_param_vport_bits vport; + struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt; + struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt; + struct mlx5_ifc_stc_ste_param_trailer_bits trailer; + u8 reserved_at_0[0x80]; +}; + +enum { + MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0), +}; + +struct mlx5_ifc_stc_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x46]; + u8 reparse_mode[0x2]; + u8 table_type[0x8]; + u8 ste_action_offset[0x8]; + u8 action_type[0x8]; + u8 reserved_at_a0[0x60]; + union mlx5_ifc_stc_param_bits stc_param; + u8 reserved_at_180[0x280]; +}; + +struct mlx5_ifc_ste_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x48]; + u8 table_type[0x8]; + u8 reserved_at_90[0x370]; +}; + +struct mlx5_ifc_definer_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x50]; + u8 format_id[0x10]; + u8 reserved_at_60[0x60]; + u8 format_select_dw3[0x8]; + u8 format_select_dw2[0x8]; + u8 format_select_dw1[0x8]; + u8 format_select_dw0[0x8]; + u8 format_select_dw7[0x8]; + u8 format_select_dw6[0x8]; + u8 format_select_dw5[0x8]; + u8 format_select_dw4[0x8]; + u8 reserved_at_100[0x18]; + u8 format_select_dw8[0x8]; + u8 reserved_at_120[0x20]; + u8 format_select_byte3[0x8]; + u8 format_select_byte2[0x8]; + u8 format_select_byte1[0x8]; + u8 format_select_byte0[0x8]; + u8 format_select_byte7[0x8]; + u8 format_select_byte6[0x8]; + u8 format_select_byte5[0x8]; + u8 format_select_byte4[0x8]; + u8 reserved_at_180[0x40]; + u8 ctrl[0xa0]; + u8 match_mask[0x160]; +}; + +struct mlx5_ifc_arg_bits { + u8 rsvd0[0x88]; + u8 access_pd[0x18]; +}; + +struct mlx5_ifc_header_modify_pattern_in_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 pattern_length[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x60]; + + u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8]; +}; + +struct mlx5_ifc_create_rtc_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_rtc_bits rtc; +}; + +struct mlx5_ifc_create_stc_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_stc_bits stc; +}; + +struct mlx5_ifc_create_ste_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_ste_bits ste; +}; + +struct mlx5_ifc_create_definer_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_definer_bits definer; +}; + +struct mlx5_ifc_create_arg_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_arg_bits arg; +}; + +struct mlx5_ifc_create_header_modify_pattern_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_header_modify_pattern_in_bits pattern; +}; + +struct mlx5_ifc_generate_wqe_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mode[0x10]; + u8 reserved_at_40[0x40]; + u8 reserved_at_80[0x8]; + u8 pdn[0x18]; + u8 reserved_at_a0[0x160]; + u8 wqe_ctrl[0x80]; + u8 wqe_gta_ctrl[0x180]; + u8 wqe_gta_data_0[0x200]; + u8 wqe_gta_data_1[0x200]; +}; + +struct mlx5_ifc_generate_wqe_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x1c0]; + u8 cqe_data[0x200]; +}; + +enum mlx5_access_aso_opc_mod { + ASO_OPC_MOD_IPSEC = 0x0, + ASO_OPC_MOD_CONNECTION_TRACKING = 0x1, + ASO_OPC_MOD_POLICER = 0x2, + ASO_OPC_MOD_RACE_AVOIDANCE = 0x3, + ASO_OPC_MOD_FLOW_HIT = 0x4, +}; + +enum { + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0), + MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1), +}; + +enum { + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0, + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1, +}; + +struct mlx5_ifc_alloc_packet_reformat_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_packet_reformat_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_packet_reformat_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +#endif /* MLX5_PRM_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c new file mode 100644 index 000000000000..c79ee70edf03 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static void hws_rule_skip(struct mlx5hws_matcher *matcher, + struct mlx5hws_match_template *mt, + u32 flow_source, + bool *skip_rx, bool *skip_tx) +{ + /* By default FDB rules are added to both RX and TX */ + *skip_rx = false; + *skip_tx = false; + + if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) { + *skip_rx = true; + } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) { + *skip_tx = true; + } else { + /* If no flow source was set for current rule, + * check for flow source in matcher attributes. + */ + if (matcher->attr.optimize_flow_src) { + *skip_tx = + matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE; + *skip_rx = + matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT; + return; + } + } +} + +static void +hws_rule_update_copy_tag(struct mlx5hws_rule *rule, + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data, + bool is_jumbo) +{ + struct mlx5hws_rule_match_tag *tag; + + if (!mlx5hws_matcher_is_resizable(rule->matcher)) { + tag = &rule->tag; + } else { + struct mlx5hws_wqe_gta_data_seg_ste *data_seg = + (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg; + tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action; + } + + if (is_jumbo) + memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ); + else + memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ); +} + +static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe, + struct mlx5hws_rule *rule, + struct mlx5hws_match_template *mt, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_table *tbl = matcher->tbl; + bool skip_rx, skip_tx; + + dep_wqe->rule = rule; + dep_wqe->user_data = attr->user_data; + dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ? + attr->rule_idx : 0; + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx); + + if (!skip_rx) { + dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id; + dep_wqe->retry_rtc_0 = matcher->col_matcher ? + matcher->col_matcher->match_ste.rtc_0_id : 0; + } else { + dep_wqe->rtc_0 = 0; + dep_wqe->retry_rtc_0 = 0; + } + + if (!skip_tx) { + dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id; + dep_wqe->retry_rtc_1 = matcher->col_matcher ? + matcher->col_matcher->match_ste.rtc_1_id : 0; + } else { + dep_wqe->rtc_1 = 0; + dep_wqe->retry_rtc_1 = 0; + } + } else { + pr_warn("HWS: invalid tbl->type: %d\n", tbl->type); + } +} + +static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst; + + if (rule->resize_info->rtc_0) { + ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id; + ste_attr->retry_rtc_0 = dst_matcher->col_matcher ? + dst_matcher->col_matcher->match_ste.rtc_0_id : 0; + } + if (rule->resize_info->rtc_1) { + ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id; + ste_attr->retry_rtc_1 = dst_matcher->col_matcher ? + dst_matcher->col_matcher->match_ste.rtc_1_id : 0; + } +} + +static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue, + struct mlx5hws_rule *rule, + bool err, + void *user_data, + enum mlx5hws_rule_status rule_status_on_succ) +{ + enum mlx5hws_flow_op_status comp_status; + + if (!err) { + comp_status = MLX5HWS_FLOW_OP_SUCCESS; + rule->status = rule_status_on_succ; + } else { + comp_status = MLX5HWS_FLOW_OP_ERROR; + rule->status = MLX5HWS_RULE_STATUS_FAILED; + } + + mlx5hws_send_engine_inc_rule(queue); + mlx5hws_send_engine_gen_comp(queue, user_data, comp_status); +} + +static void +hws_rule_save_resize_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr, + bool is_update) +{ + if (!mlx5hws_matcher_is_resizable(rule->matcher)) + return; + + if (likely(!is_update)) { + rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL); + if (unlikely(!rule->resize_info)) { + pr_warn("HWS: resize info isn't allocated for rule\n"); + return; + } + + rule->resize_info->max_stes = + rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; + rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ? + rule->matcher->action_ste[0].pool : + NULL; + rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ? + rule->matcher->action_ste[1].pool : + NULL; + } + + memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl, + sizeof(rule->resize_info->ctrl_seg)); + memcpy(rule->resize_info->data_seg, ste_attr->wqe_data, + sizeof(rule->resize_info->data_seg)); +} + +void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule) +{ + if (mlx5hws_matcher_is_resizable(rule->matcher) && + rule->resize_info) { + kfree(rule->resize_info); + rule->resize_info = NULL; + } +} + +static void +hws_rule_save_delete_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_match_template *mt = rule->matcher->mt; + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + + if (mlx5hws_matcher_is_resizable(rule->matcher)) + return; + + if (is_jumbo) + memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ); + else + memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ); +} + +static void +hws_rule_clear_delete_info(struct mlx5hws_rule *rule) +{ + /* nothing to do here */ +} + +static void +hws_rule_load_delete_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) { + ste_attr->wqe_tag = &rule->tag; + } else { + struct mlx5hws_wqe_gta_data_seg_ste *data_seg = + (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg; + struct mlx5hws_rule_match_tag *tag = + (struct mlx5hws_rule_match_tag *)(void *)data_seg->action; + ste_attr->wqe_tag = tag; + } +} + +static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule, + u8 action_ste_selector) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_pool_chunk ste = {0}; + int ret; + + action_ste = &matcher->action_ste[action_ste_selector]; + ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes)); + ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste); + if (unlikely(ret)) { + mlx5hws_err(matcher->tbl->ctx, + "Failed to allocate STE for rule actions"); + return ret; + } + rule->action_ste_idx = ste.offset; + + return 0; +} + +static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule, + u8 action_ste_selector) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_pool_chunk ste = {0}; + struct mlx5hws_pool *pool; + u8 max_stes; + + if (mlx5hws_matcher_is_resizable(matcher)) { + /* Free the original action pool if rule was resized */ + max_stes = rule->resize_info->max_stes; + pool = rule->resize_info->action_ste_pool[action_ste_selector]; + } else { + max_stes = matcher->action_ste[action_ste_selector].max_stes; + pool = matcher->action_ste[action_ste_selector].pool; + } + + /* This release is safe only when the rule match part was deleted */ + ste.order = ilog2(roundup_pow_of_two(max_stes)); + ste.offset = rule->action_ste_idx; + + mlx5hws_pool_chunk_free(pool, &ste); +} + +static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + int action_ste_idx; + int ret; + + ret = hws_rule_alloc_action_ste_idx(rule, 0); + if (unlikely(ret)) + return ret; + + action_ste_idx = rule->action_ste_idx; + + ret = hws_rule_alloc_action_ste_idx(rule, 1); + if (unlikely(ret)) { + hws_rule_free_action_ste_idx(rule, 0); + return ret; + } + + /* Both pools have to return the same index */ + if (unlikely(rule->action_ste_idx != action_ste_idx)) { + pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n"); + return -EINVAL; + } + + return 0; +} + +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule) +{ + if (rule->action_ste_idx > -1) { + hws_rule_free_action_ste_idx(rule, 1); + hws_rule_free_action_ste_idx(rule, 0); + } +} + +static void hws_rule_create_init(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr, + struct mlx5hws_actions_apply_data *apply, + bool is_update) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + + /* Init rule before reuse */ + if (!is_update) { + /* In update we use these rtc's */ + rule->rtc_0 = 0; + rule->rtc_1 = 0; + rule->action_ste_selector = 0; + } else { + rule->action_ste_selector = !rule->action_ste_selector; + } + + rule->pending_wqes = 0; + rule->action_ste_idx = -1; + rule->status = MLX5HWS_RULE_STATUS_CREATING; + + /* Init default send STE attributes */ + ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + + /* Init default action apply */ + apply->tbl_type = tbl->type; + apply->common_res = &ctx->common_res[tbl->type]; + apply->jump_to_action_stc = matcher->action_ste[0].stc.offset; + apply->require_dep = 0; +} + +static void hws_rule_move_init(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + /* Save the old RTC IDs to be later used in match STE delete */ + rule->resize_info->rtc_0 = rule->rtc_0; + rule->resize_info->rtc_1 = rule->rtc_1; + rule->resize_info->rule_idx = attr->rule_idx; + + rule->rtc_0 = 0; + rule->rtc_1 = 0; + + rule->pending_wqes = 0; + rule->action_ste_idx = -1; + rule->action_ste_selector = 0; + rule->status = MLX5HWS_RULE_STATUS_CREATING; + rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING; +} + +bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule) +{ + return mlx5hws_matcher_is_in_resize(rule->matcher) && + rule->resize_info && + rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE; +} + +static int hws_rule_create_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_action_template *at = &rule->matcher->at[at_idx]; + struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx]; + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + struct mlx5hws_actions_wqe_setter *setter; + struct mlx5hws_actions_apply_data apply; + struct mlx5hws_send_engine *queue; + u8 total_stes, action_stes; + bool is_update; + int i, ret; + + is_update = !match_param; + + setter = &at->setters[at->num_of_action_stes]; + total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term); + action_stes = total_stes - 1; + + queue = &ctx->send_queue[attr->queue_id]; + if (unlikely(mlx5hws_send_engine_err(queue))) + return -EIO; + + hws_rule_create_init(rule, &ste_attr, &apply, is_update); + + /* Allocate dependent match WQE since rule might have dependent writes. + * The queued dependent WQE can be later aborted or kept as a dependency. + * dep_wqe buffers (ctrl, data) are also reused for all STE writes. + */ + dep_wqe = mlx5hws_send_add_new_dep_wqe(queue); + hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr); + + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; + apply.wqe_ctrl = &dep_wqe->wqe_ctrl; + apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data; + apply.rule_action = rule_actions; + apply.queue = queue; + + if (action_stes) { + /* Allocate action STEs for rules that need more than match STE */ + if (!is_update) { + ret = hws_rule_alloc_action_ste(rule, attr); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate action memory %d", ret); + mlx5hws_send_abort_new_dep_wqe(queue); + return ret; + } + } + /* Skip RX/TX based on the dep_wqe init */ + ste_attr.rtc_0 = dep_wqe->rtc_0 ? + matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0; + ste_attr.rtc_1 = dep_wqe->rtc_1 ? + matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0; + /* Action STEs are written to a specific index last to first */ + ste_attr.direct_index = rule->action_ste_idx + action_stes; + apply.next_direct_idx = ste_attr.direct_index; + } else { + apply.next_direct_idx = 0; + } + + for (i = total_stes; i-- > 0;) { + mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo); + + if (i == 0) { + /* Handle last match STE. + * For hash split / linear lookup RTCs, packets reaching any STE + * will always match and perform the specified actions, which + * makes the tag irrelevant. + */ + if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update)) + mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz, + (u8 *)dep_wqe->wqe_data.action); + else if (is_update) + hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo); + + /* Rule has dependent WQEs, match dep_wqe is queued */ + if (action_stes || apply.require_dep) + break; + + /* Rule has no dependencies, abort dep_wqe and send WQE now */ + mlx5hws_send_abort_new_dep_wqe(queue); + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = dep_wqe->user_data; + ste_attr.send_attr.rule = dep_wqe->rule; + ste_attr.rtc_0 = dep_wqe->rtc_0; + ste_attr.rtc_1 = dep_wqe->rtc_1; + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; + ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; + ste_attr.direct_index = dep_wqe->direct_index; + } else { + apply.next_direct_idx = --ste_attr.direct_index; + } + + mlx5hws_send_ste(queue, &ste_attr); + } + + /* Backup TAG on the rule for deletion and resize info for + * moving rules to a new matcher, only after insertion. + */ + if (!is_update) + hws_rule_save_delete_info(rule, &ste_attr); + + hws_rule_save_resize_info(rule, &ste_attr, is_update); + mlx5hws_send_engine_inc_rule(queue); + + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + return 0; +} + +static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_send_engine *queue; + + queue = &ctx->send_queue[attr->queue_id]; + + hws_rule_gen_comp(queue, rule, false, + attr->user_data, MLX5HWS_RULE_STATUS_DELETED); + + /* Rule failed now we can safely release action STEs */ + mlx5hws_rule_free_action_ste(rule); + + /* Clear complex tag */ + hws_rule_clear_delete_info(rule); + + /* Clear info that was saved for resizing */ + mlx5hws_rule_clear_resize_info(rule); + + /* If a rule that was indicated as burst (need to trigger HW) has failed + * insertion we won't ring the HW as nothing is being written to the WQ. + * In such case update the last WQE and ring the HW with that work + */ + if (attr->burst) + return; + + mlx5hws_send_all_dep_wqe(queue); + mlx5hws_send_engine_flush_queue(queue); +} + +static int hws_rule_destroy_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0}; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + + queue = &ctx->send_queue[attr->queue_id]; + + if (unlikely(mlx5hws_send_engine_err(queue))) { + hws_rule_destroy_failed_hws(rule, attr); + return 0; + } + + /* Rule is not completed yet */ + if (rule->status == MLX5HWS_RULE_STATUS_CREATING) + return -EBUSY; + + /* Rule failed and doesn't require cleanup */ + if (rule->status == MLX5HWS_RULE_STATUS_FAILED) { + hws_rule_destroy_failed_hws(rule, attr); + return 0; + } + + if (rule->skip_delete) { + /* Rule shouldn't be deleted in HW. + * Generate completion as if write succeeded, and we can + * safely release action STEs and clear resize info. + */ + hws_rule_gen_comp(queue, rule, false, + attr->user_data, MLX5HWS_RULE_STATUS_DELETED); + + mlx5hws_rule_free_action_ste(rule); + mlx5hws_rule_clear_resize_info(rule); + return 0; + } + + mlx5hws_send_engine_inc_rule(queue); + + /* Send dependent WQE */ + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + rule->status = MLX5HWS_RULE_STATUS_DELETING; + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = attr->user_data; + + ste_attr.rtc_0 = rule->rtc_0; + ste_attr.rtc_1 = rule->rtc_1; + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.wqe_ctrl = &wqe_ctrl; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE; + if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher))) + ste_attr.direct_index = attr->rule_idx; + + hws_rule_load_delete_info(rule, &ste_attr); + mlx5hws_send_ste(queue, &ste_attr); + hws_rule_clear_delete_info(rule); + + return 0; +} + +static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + + if (unlikely(!attr->user_data)) + return -EINVAL; + + /* Check if there is room in queue */ + if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id]))) + return -EBUSY; + + return 0; +} + +static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED)) + return -EINVAL; + + return hws_rule_enqueue_precheck(rule, attr); +} + +static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher))) + /* Matcher in resize - new rules are not allowed */ + return -EAGAIN; + + return hws_rule_enqueue_precheck(rule, attr); +} + +static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + + if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) && + !matcher->attr.optimize_using_rule_idx && + !mlx5hws_matcher_is_insert_by_idx(matcher))) { + return -EOPNOTSUPP; + } + + if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED)) + return -EBUSY; + + return hws_rule_enqueue_precheck_create(rule, attr); +} + +int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, + void *queue_ptr, + void *user_data) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0}; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_send_engine *queue = queue_ptr; + struct mlx5hws_send_ste_attr ste_attr = {0}; + + mlx5hws_send_all_dep_wqe(queue); + + rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING; + + ste_attr.send_attr.fence = 0; + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.notify_hw = 1; + ste_attr.send_attr.user_data = user_data; + ste_attr.rtc_0 = rule->resize_info->rtc_0; + ste_attr.rtc_1 = rule->resize_info->rtc_1; + ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0; + ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1; + ste_attr.wqe_ctrl = &empty_wqe_ctrl; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE; + + if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher))) + ste_attr.direct_index = rule->resize_info->rule_idx; + + hws_rule_load_delete_info(rule, &ste_attr); + mlx5hws_send_ste(queue, &ste_attr); + + return 0; +} + +int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + int ret; + + ret = hws_rule_enqueue_precheck_move(rule, attr); + if (unlikely(ret)) + return ret; + + queue = &ctx->send_queue[attr->queue_id]; + + ret = mlx5hws_send_engine_err(queue); + if (ret) + return ret; + + hws_rule_move_init(rule, attr); + hws_rule_move_get_rtc(rule, &ste_attr); + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.fence = 0; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = attr->user_data; + + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg; + ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg; + ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ? + attr->rule_idx : 0; + + mlx5hws_send_ste(queue, &ste_attr); + mlx5hws_send_engine_inc_rule(queue); + + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + return 0; +} + +int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr, + struct mlx5hws_rule *rule_handle) +{ + int ret; + + rule_handle->matcher = matcher; + + ret = hws_rule_enqueue_precheck_create(rule_handle, attr); + if (unlikely(ret)) + return ret; + + if (unlikely(!(matcher->num_of_mt >= mt_idx) || + !(matcher->num_of_at >= at_idx) || + !match_param)) { + pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n"); + return -EINVAL; + } + + ret = hws_rule_create_hws(rule_handle, + attr, + mt_idx, + match_param, + at_idx, + rule_actions); + + return ret; +} + +int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + int ret; + + ret = hws_rule_enqueue_precheck(rule, attr); + if (unlikely(ret)) + return -ret; + + ret = hws_rule_destroy_hws(rule, attr); + + return -ret; +} + +int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr) +{ + int ret; + + ret = hws_rule_enqueue_precheck_update(rule, attr); + if (unlikely(ret)) + return -ret; + + ret = hws_rule_create_hws(rule, + attr, + 0, + NULL, + at_idx, + rule_actions); + + return -ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h new file mode 100644 index 000000000000..495cdd17e9f3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_RULE_H_ +#define MLX5HWS_RULE_H_ + +enum { + MLX5HWS_STE_CTRL_SZ = 20, + MLX5HWS_ACTIONS_SZ = 12, + MLX5HWS_MATCH_TAG_SZ = 32, + MLX5HWS_JUMBO_TAG_SZ = 44, +}; + +enum mlx5hws_rule_status { + MLX5HWS_RULE_STATUS_UNKNOWN, + MLX5HWS_RULE_STATUS_CREATING, + MLX5HWS_RULE_STATUS_CREATED, + MLX5HWS_RULE_STATUS_DELETING, + MLX5HWS_RULE_STATUS_DELETED, + MLX5HWS_RULE_STATUS_FAILING, + MLX5HWS_RULE_STATUS_FAILED, +}; + +enum mlx5hws_rule_move_state { + MLX5HWS_RULE_RESIZE_STATE_IDLE, + MLX5HWS_RULE_RESIZE_STATE_WRITING, + MLX5HWS_RULE_RESIZE_STATE_DELETING, +}; + +enum mlx5hws_rule_jumbo_match_tag_offset { + MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8, +}; + +struct mlx5hws_rule_match_tag { + union { + u8 jumbo[MLX5HWS_JUMBO_TAG_SZ]; + struct { + u8 reserved[MLX5HWS_ACTIONS_SZ]; + u8 match[MLX5HWS_MATCH_TAG_SZ]; + }; + }; +}; + +struct mlx5hws_rule_resize_info { + struct mlx5hws_pool *action_ste_pool[2]; + u32 rtc_0; + u32 rtc_1; + u32 rule_idx; + u8 state; + u8 max_stes; + u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */ + u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */ +}; + +struct mlx5hws_rule { + struct mlx5hws_matcher *matcher; + union { + struct mlx5hws_rule_match_tag tag; + struct mlx5hws_rule_resize_info *resize_info; + }; + u32 rtc_0; /* The RTC into which the STE was inserted */ + u32 rtc_1; /* The RTC into which the STE was inserted */ + int action_ste_idx; /* STE array index */ + u8 status; /* enum mlx5hws_rule_status */ + u8 action_ste_selector; /* For rule update - which action STE is in use */ + u8 pending_wqes; + bool skip_delete; /* For complex rules - another rule with same tag + * still exists, so don't actually delete this rule. + */ +}; + +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule); + +int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, + void *queue, void *user_data); + +int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule); + +void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule); + +#endif /* MLX5HWS_RULE_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c new file mode 100644 index 000000000000..fb97a15c041a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c @@ -0,0 +1,1209 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "lib/clock.h" + +enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 }; + +struct mlx5hws_send_ring_dep_wqe * +mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1); + + memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ); + + return &send_sq->dep_wqe[idx]; +} + +void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue) +{ + queue->send_ring.send_sq.head_dep_idx--; +} + +void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + + /* Fence first from previous depend WQEs */ + ste_attr.send_attr.fence = 1; + + while (send_sq->head_dep_idx != send_sq->tail_dep_idx) { + dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)]; + + /* Notify HW on the last WQE */ + ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx); + ste_attr.send_attr.user_data = dep_wqe->user_data; + ste_attr.send_attr.rule = dep_wqe->rule; + + ste_attr.rtc_0 = dep_wqe->rtc_0; + ste_attr.rtc_1 = dep_wqe->rtc_1; + ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; + ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; + ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0; + ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1; + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; + ste_attr.direct_index = dep_wqe->direct_index; + + mlx5hws_send_ste(queue, &ste_attr); + + /* Fencing is done only on the first WQE */ + ste_attr.send_attr.fence = 0; + } +} + +struct mlx5hws_send_engine_post_ctrl +mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_engine_post_ctrl ctrl; + + ctrl.queue = queue; + /* Currently only one send ring is supported */ + ctrl.send_ring = &queue->send_ring; + ctrl.num_wqebbs = 0; + + return ctrl; +} + +void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl, + char **buf, size_t *len) +{ + struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq; + unsigned int idx; + + idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask; + + /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used + * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB + * can be on 2 different kernel memory pages. + */ + *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + *len = MLX5_SEND_WQE_BB; + + if (!ctrl->num_wqebbs) { + *buf += sizeof(struct mlx5hws_wqe_ctrl_seg); + *len -= sizeof(struct mlx5hws_wqe_ctrl_seg); + } + + ctrl->num_wqebbs++; +} + +static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_wqe_ctrl_seg *doorbell_cseg) +{ + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + *sq->wq.db = cpu_to_be32(sq->cur_post); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map); + + /* Ensure doorbell is written on uar_page before poll_cq */ + WRITE_ONCE(doorbell_cseg, NULL); +} + +static void +hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data, + struct mlx5hws_rule_match_tag *tag, + bool is_jumbo) +{ + if (is_jumbo) { + /* Clear previous possibly dirty control */ + memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ); + memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ); + } else { + /* Clear previous possibly dirty control and actions */ + memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ); + memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ); + } +} + +void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl, + struct mlx5hws_send_engine_post_attr *attr) +{ + struct mlx5hws_wqe_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_ring_sq *sq; + unsigned int idx; + u32 flags = 0; + + sq = &ctrl->send_ring->send_sq; + idx = sq->cur_post & sq->buf_mask; + sq->last_idx = idx; + + wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx); + + wqe_ctrl->opmod_idx_opcode = + cpu_to_be32((attr->opmod << 24) | + ((sq->cur_post & 0xffff) << 8) | + attr->opcode); + wqe_ctrl->qpn_ds = + cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 | + sq->sqn << 8); + wqe_ctrl->imm = cpu_to_be32(attr->id); + + flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0; + flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0; + wqe_ctrl->flags = cpu_to_be32(flags); + + sq->wr_priv[idx].id = attr->id; + sq->wr_priv[idx].retry_id = attr->retry_id; + + sq->wr_priv[idx].rule = attr->rule; + sq->wr_priv[idx].user_data = attr->user_data; + sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs; + + if (attr->rule) { + sq->wr_priv[idx].rule->pending_wqes++; + sq->wr_priv[idx].used_id = attr->used_id; + } + + sq->cur_post += ctrl->num_wqebbs; + + if (attr->notify_hw) + hws_send_engine_post_ring(sq, wqe_ctrl); +} + +static void hws_send_wqe(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_engine_post_attr *send_attr, + struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl, + void *send_wqe_data, + void *send_wqe_tag, + bool is_jumbo, + u8 gta_opcode, + u32 direct_index) +{ + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_engine_post_ctrl ctrl; + size_t wqe_len; + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len); + + wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index); + memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix, + sizeof(send_wqe_ctrl->stc_ix)); + + if (send_wqe_data) + memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data)); + else + hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo); + + mlx5hws_send_engine_post_end(&ctrl, send_attr); +} + +void mlx5hws_send_ste(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr; + u8 notify_hw = send_attr->notify_hw; + u8 fence = send_attr->fence; + + if (ste_attr->rtc_1) { + send_attr->id = ste_attr->rtc_1; + send_attr->used_id = ste_attr->used_id_rtc_1; + send_attr->retry_id = ste_attr->retry_rtc_1; + send_attr->fence = fence; + send_attr->notify_hw = notify_hw && !ste_attr->rtc_0; + hws_send_wqe(queue, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode, + ste_attr->direct_index); + } + + if (ste_attr->rtc_0) { + send_attr->id = ste_attr->rtc_0; + send_attr->used_id = ste_attr->used_id_rtc_0; + send_attr->retry_id = ste_attr->retry_rtc_0; + send_attr->fence = fence && !ste_attr->rtc_1; + send_attr->notify_hw = notify_hw; + hws_send_wqe(queue, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode, + ste_attr->direct_index); + } + + /* Restore to original requested values */ + send_attr->notify_hw = notify_hw; + send_attr->fence = fence; +} + +static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + u16 wqe_cnt) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_engine_post_ctrl ctrl; + struct mlx5hws_send_ring_sq *send_sq; + unsigned int idx; + size_t wqe_len; + char *p; + + send_attr.rule = priv->rule; + send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg); + send_attr.notify_hw = 1; + send_attr.fence = 0; + send_attr.user_data = priv->user_data; + send_attr.id = priv->retry_id; + send_attr.used_id = priv->used_id; + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len); + + send_sq = &ctrl.send_ring->send_sq; + idx = wqe_cnt & send_sq->buf_mask; + p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + + /* Copy old gta ctrl */ + memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg), + MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg)); + + idx = (wqe_cnt + 1) & send_sq->buf_mask; + p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + + /* Copy old gta data */ + memcpy(wqe_data, p, MLX5_SEND_WQE_BB); + + mlx5hws_send_engine_post_end(&ctrl, &send_attr); +} + +void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq; + struct mlx5hws_wqe_ctrl_seg *wqe_ctrl; + + wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx); + wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE); + + hws_send_engine_post_ring(sq, wqe_ctrl); +} + +static void +hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + enum mlx5hws_flow_op_status *status) +{ + switch (priv->rule->resize_info->state) { + case MLX5HWS_RULE_RESIZE_STATE_WRITING: + if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) { + /* Backup original RTCs */ + u32 orig_rtc_0 = priv->rule->resize_info->rtc_0; + u32 orig_rtc_1 = priv->rule->resize_info->rtc_1; + + /* Delete partially failed move rule using resize_info */ + priv->rule->resize_info->rtc_0 = priv->rule->rtc_0; + priv->rule->resize_info->rtc_1 = priv->rule->rtc_1; + + /* Move rule to original RTC for future delete */ + priv->rule->rtc_0 = orig_rtc_0; + priv->rule->rtc_1 = orig_rtc_1; + } + /* Clean leftovers */ + mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data); + break; + + case MLX5HWS_RULE_RESIZE_STATE_DELETING: + if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) { + *status = MLX5HWS_FLOW_OP_ERROR; + } else { + *status = MLX5HWS_FLOW_OP_SUCCESS; + priv->rule->matcher = priv->rule->matcher->resize_dst; + } + priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE; + priv->rule->status = MLX5HWS_RULE_STATUS_CREATED; + break; + + default: + break; + } +} + +static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + u16 wqe_cnt, + enum mlx5hws_flow_op_status *status) +{ + priv->rule->pending_wqes--; + + if (*status == MLX5HWS_FLOW_OP_ERROR) { + if (priv->retry_id) { + hws_send_engine_retry_post_send(queue, priv, wqe_cnt); + return; + } + /* Some part of the rule failed */ + priv->rule->status = MLX5HWS_RULE_STATUS_FAILING; + *priv->used_id = 0; + } else { + *priv->used_id = priv->id; + } + + /* Update rule status for the last completion */ + if (!priv->rule->pending_wqes) { + if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) { + hws_send_engine_update_rule_resize(queue, priv, status); + return; + } + + if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) { + /* Rule completely failed and doesn't require cleanup */ + if (!priv->rule->rtc_0 && !priv->rule->rtc_1) + priv->rule->status = MLX5HWS_RULE_STATUS_FAILED; + + *status = MLX5HWS_FLOW_OP_ERROR; + } else { + /* Increase the status, this only works on good flow as the enum + * is arrange it away creating -> created -> deleting -> deleted + */ + priv->rule->status++; + *status = MLX5HWS_FLOW_OP_SUCCESS; + /* Rule was deleted now we can safely release action STEs + * and clear resize info + */ + if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) { + mlx5hws_rule_free_action_ste(priv->rule); + mlx5hws_rule_clear_resize_info(priv->rule); + } + } + } +} + +static void hws_send_engine_update(struct mlx5hws_send_engine *queue, + struct mlx5_cqe64 *cqe, + struct mlx5hws_send_ring_priv *priv, + struct mlx5hws_flow_op_result res[], + s64 *i, + u32 res_nb, + u16 wqe_cnt) +{ + enum mlx5hws_flow_op_status status; + + if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) && + likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) { + status = MLX5HWS_FLOW_OP_SUCCESS; + } else { + status = MLX5HWS_FLOW_OP_ERROR; + } + + if (priv->user_data) { + if (priv->rule) { + hws_send_engine_update_rule(queue, priv, wqe_cnt, &status); + /* Completion is provided on the last rule WQE */ + if (priv->rule->pending_wqes) + return; + } + + if (*i < res_nb) { + res[*i].user_data = priv->user_data; + res[*i].status = status; + (*i)++; + mlx5hws_send_engine_dec_rule(queue); + } else { + mlx5hws_send_engine_gen_comp(queue, priv->user_data, status); + } + } +} + +static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq, + struct mlx5_cqe64 *cqe64) +{ + if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) { + struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64; + + mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64)); + mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd); + mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, + 16, 1, err_cqe, + sizeof(*err_cqe), false); + return CQ_POLL_ERR; + } + + return CQ_OK; +} + +static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq) +{ + struct mlx5_cqe64 *cqe64; + int err; + + cqe64 = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe64) { + if (unlikely(cq->mdev->state == + MLX5_DEVICE_STATE_INTERNAL_ERROR)) { + mlx5_core_dbg_once(cq->mdev, + "Polling CQ while device is shutting down\n"); + return CQ_POLL_ERR; + } + return CQ_EMPTY; + } + + mlx5_cqwq_pop(&cq->wq); + err = mlx5hws_parse_cqe(cq, cqe64); + mlx5_cqwq_update_db_record(&cq->wq); + + return err; +} + +static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + s64 *polled, + u32 res_nb) +{ + struct mlx5hws_send_ring *send_ring = &queue->send_ring; + struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; + struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq; + struct mlx5hws_send_ring_priv *priv; + struct mlx5_cqe64 *cqe; + u8 cqe_opcode; + u16 wqe_cnt; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return; + + cqe_opcode = get_cqe_opcode(cqe); + if (cqe_opcode == MLX5_CQE_INVALID) + return; + + if (unlikely(cqe_opcode != MLX5_CQE_REQ)) + queue->err = true; + + wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask; + + while (cq->poll_wqe != wqe_cnt) { + priv = &sq->wr_priv[cq->poll_wqe]; + hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0); + cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask; + } + + priv = &sq->wr_priv[wqe_cnt]; + cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask; + hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt); + mlx5hws_cq_poll_one(cq); +} + +static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + s64 *polled, + u32 res_nb) +{ + struct mlx5hws_completed_poll *comp = &queue->completed; + + while (comp->ci != comp->pi) { + if (*polled < res_nb) { + res[*polled].status = + comp->entries[comp->ci].status; + res[*polled].user_data = + comp->entries[comp->ci].user_data; + (*polled)++; + comp->ci = (comp->ci + 1) & comp->mask; + mlx5hws_send_engine_dec_rule(queue); + } else { + return; + } + } +} + +static int hws_send_engine_poll(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + u32 res_nb) +{ + s64 polled = 0; + + hws_send_engine_poll_list(queue, res, &polled, res_nb); + + if (polled >= res_nb) + return polled; + + hws_send_engine_poll_cq(queue, res, &polled, res_nb); + + return polled; +} + +int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + struct mlx5hws_flow_op_result res[], + u32 res_nb) +{ + return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb); +} + +static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev, + int numa_node, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + void *sqc_data) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wq_param param; + size_t buf_sz; + int err; + + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; + sq->mdev = mdev; + + param.db_numa_node = numa_node; + param.buf_numa_node = numa_node; + err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl); + if (err) + return err; + wq->db = &wq->db[MLX5_SND_DBR]; + + buf_sz = queue->num_entries * MAX_WQES_PER_RULE; + sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL); + if (!sq->dep_wqe) { + err = -ENOMEM; + goto destroy_wq_cyc; + } + + sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL); + if (!sq->dep_wqe) { + err = -ENOMEM; + goto free_dep_wqe; + } + + sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1; + + return 0; + +free_dep_wqe: + kfree(sq->dep_wqe); +destroy_wq_cyc: + mlx5_wq_destroy(&sq->wq_ctrl); + return err; +} + +static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq) +{ + if (!sq) + return; + kfree(sq->wr_priv); + kfree(sq->dep_wqe); + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn, + void *sqc_data, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + void *in, *sqc, *wq; + int inlen, err; + u8 ts_format; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc)); + MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn); + + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + MLX5_SET(sqc, sqc, ts_format, ts_format); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn) +{ + void *in, *sqc; + int inlen, err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + err = mlx5_core_modify_sq(mdev, sqn, in); + + kvfree(in); + + return err; +} + +static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq) +{ + mlx5_core_destroy_sq(sq->mdev, sq->sqn); + mlx5_wq_destroy(&sq->wq_ctrl); + kfree(sq->wr_priv); + kfree(sq->dep_wqe); +} + +static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn, + void *sqc_data, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + int err; + + err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq); + if (err) + return err; + + err = hws_send_ring_set_sq_rdy(mdev, sq->sqn); + if (err) + hws_send_ring_close_sq(sq); + + return err; +} + +static int hws_send_ring_open_sq(struct mlx5hws_context *ctx, + int numa_node, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + size_t buf_sz, sq_log_buf_sz; + void *sqc_data, *wq; + int err; + + sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL); + if (!sqc_data) + return -ENOMEM; + + buf_sz = queue->num_entries * MAX_WQES_PER_RULE; + sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz)); + + wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, ctx->pd_num); + MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz); + + err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data); + if (err) + goto err_free_sqc; + + err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data, + queue, sq, cq); + if (err) + goto err_free_sq; + + kvfree(sqc_data); + + return 0; +err_free_sq: + hws_send_ring_free_sq(sq); +err_free_sqc: + kvfree(sqc_data); + return err; +} + +static void hws_cq_complete(struct mlx5_core_cq *mcq, + struct mlx5_eqe *eqe) +{ + pr_err("CQ completion CQ: #%u\n", mcq->cqn); +} + +static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev, + int numa_node, + struct mlx5hws_send_engine *queue, + void *cqc_data, + struct mlx5hws_send_ring_cq *cq) +{ + struct mlx5_core_cq *mcq = &cq->mcq; + struct mlx5_wq_param param; + struct mlx5_cqe64 *cqe; + int err; + u32 i; + + param.buf_numa_node = numa_node; + param.db_numa_node = numa_node; + + err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl); + if (err) + return err; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + mcq->comp = hws_cq_complete; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + cqe->op_own = 0xf1; + } + + cq->mdev = mdev; + + return 0; +} + +static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev, + struct mlx5hws_send_engine *queue, + void *cqc_data, + struct mlx5hws_send_ring_cq *cq) +{ + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_core_cq *mcq = &cq->mcq; + void *in, *cqc; + int inlen, eqn; + int err; + + err = mlx5_comp_eqn_get(mdev, 0, &eqn); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); + + kvfree(in); + + return err; +} + +static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev, + struct mlx5hws_send_engine *queue, + int numa_node, + struct mlx5hws_send_ring_cq *cq) +{ + void *cqc_data; + int err; + + cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); + if (!cqc_data) + return -ENOMEM; + + MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries); + MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries)); + + err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq); + if (err) + goto err_out; + + err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq); + if (err) + goto err_free_cq; + + kvfree(cqc_data); + + return 0; + +err_free_cq: + mlx5_wq_destroy(&cq->wq_ctrl); +err_out: + kvfree(cqc_data); + return err; +} + +static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq) +{ + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static void hws_send_ring_close(struct mlx5hws_send_engine *queue) +{ + hws_send_ring_close_sq(&queue->send_ring.send_sq); + hws_send_ring_close_cq(&queue->send_ring.send_cq); +} + +static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue) +{ + int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev)); + struct mlx5hws_send_ring *ring = &queue->send_ring; + int err; + + err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); + if (err) + return err; + + err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq, + &ring->send_cq); + if (err) + goto close_cq; + + return err; + +close_cq: + hws_send_ring_close_cq(&ring->send_cq); + return err; +} + +void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) +{ + hws_send_ring_close(queue); + kfree(queue->completed.entries); +} + +int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size) +{ + int err; + + mutex_init(&queue->lock); + + queue->num_entries = roundup_pow_of_two(queue_size); + queue->used_entries = 0; + + queue->completed.entries = kcalloc(queue->num_entries, + sizeof(queue->completed.entries[0]), + GFP_KERNEL); + if (!queue->completed.entries) + return -ENOMEM; + + queue->completed.pi = 0; + queue->completed.ci = 0; + queue->completed.mask = queue->num_entries - 1; + err = mlx5hws_send_ring_open(ctx, queue); + if (err) + goto free_completed_entries; + + return 0; + +free_completed_entries: + kfree(queue->completed.entries); + return err; +} + +static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues) +{ + while (queues--) + mlx5hws_send_queue_close(&ctx->send_queue[queues]); +} + +static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx) +{ + int bwc_queues = ctx->queues - 1; + int i; + + if (!mlx5hws_context_bwc_supported(ctx)) + return; + + for (i = 0; i < bwc_queues; i++) + mutex_destroy(&ctx->bwc_send_queue_locks[i]); + kfree(ctx->bwc_send_queue_locks); +} + +void mlx5hws_send_queues_close(struct mlx5hws_context *ctx) +{ + hws_send_queues_bwc_locks_destroy(ctx); + __hws_send_queues_close(ctx, ctx->queues); + kfree(ctx->send_queue); +} + +static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx) +{ + /* Number of BWC queues is equal to number of the usual HWS queues */ + int bwc_queues = ctx->queues - 1; + int i; + + if (!mlx5hws_context_bwc_supported(ctx)) + return 0; + + ctx->queues += bwc_queues; + + ctx->bwc_send_queue_locks = kcalloc(bwc_queues, + sizeof(*ctx->bwc_send_queue_locks), + GFP_KERNEL); + + if (!ctx->bwc_send_queue_locks) + return -ENOMEM; + + for (i = 0; i < bwc_queues; i++) + mutex_init(&ctx->bwc_send_queue_locks[i]); + + return 0; +} + +int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size) +{ + int err = 0; + u32 i; + + /* Open one extra queue for control path */ + ctx->queues = queues + 1; + + /* open a separate set of queues and locks for bwc API */ + err = hws_bwc_send_queues_init(ctx); + if (err) + return err; + + ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL); + if (!ctx->send_queue) { + err = -ENOMEM; + goto free_bwc_locks; + } + + for (i = 0; i < ctx->queues; i++) { + err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size); + if (err) + goto close_send_queues; + } + + return 0; + +close_send_queues: + __hws_send_queues_close(ctx, i); + + kfree(ctx->send_queue); + +free_bwc_locks: + hws_send_queues_bwc_locks_destroy(ctx); + + return err; +} + +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions) +{ + struct mlx5hws_send_ring_sq *send_sq; + struct mlx5hws_send_engine *queue; + bool wait_comp = false; + s64 polled = 0; + + queue = &ctx->send_queue[queue_id]; + send_sq = &queue->send_ring.send_sq; + + switch (actions) { + case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC: + wait_comp = true; + fallthrough; + case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC: + if (send_sq->head_dep_idx != send_sq->tail_dep_idx) + /* Send dependent WQEs to drain the queue */ + mlx5hws_send_all_dep_wqe(queue); + else + /* Signal on the last posted WQE */ + mlx5hws_send_engine_flush_queue(queue); + + /* Poll queue until empty */ + while (wait_comp && !mlx5hws_send_engine_empty(queue)) + hws_send_engine_poll_cq(queue, NULL, &polled, 0); + + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +hws_send_wqe_fw(struct mlx5_core_dev *mdev, + u32 pd_num, + struct mlx5hws_send_engine_post_attr *send_attr, + struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl, + void *send_wqe_match_data, + void *send_wqe_match_tag, + void *send_wqe_range_data, + void *send_wqe_range_tag, + bool is_jumbo, + u8 gta_opcode) +{ + bool has_range = send_wqe_range_data || send_wqe_range_tag; + bool has_match = send_wqe_match_data || send_wqe_match_tag; + struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0}; + struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0}; + struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0}; + struct mlx5hws_cmd_generate_wqe_attr attr = {0}; + struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0}; + struct mlx5_cqe64 cqe; + u32 flags = 0; + int ret; + + /* Set WQE control */ + wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode); + wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16); + flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0; + wqe_ctrl.flags = cpu_to_be32(flags); + wqe_ctrl.imm = cpu_to_be32(send_attr->id); + + /* Set GTA WQE CTRL */ + memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix)); + gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28); + + /* Set GTA match WQE DATA */ + if (has_match) { + if (send_wqe_match_data) + memcpy(>a_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0)); + else + hws_send_wqe_set_tag(>a_wqe_data0, send_wqe_match_tag, is_jumbo); + + gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8); + attr.gta_data_0 = (u8 *)>a_wqe_data0; + } + + /* Set GTA range WQE DATA */ + if (has_range) { + if (send_wqe_range_data) + memcpy(>a_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1)); + else + hws_send_wqe_set_tag(>a_wqe_data1, send_wqe_range_tag, false); + + gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8); + attr.gta_data_1 = (u8 *)>a_wqe_data1; + } + + attr.pdn = pd_num; + attr.wqe_ctrl = (u8 *)&wqe_ctrl; + attr.gta_ctrl = (u8 *)>a_wqe_ctrl; + +send_wqe: + ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe); + if (ret) { + mlx5_core_err(mdev, "Failed to write WQE using command"); + return ret; + } + + if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) && + (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) { + *send_attr->used_id = send_attr->id; + return 0; + } + + /* Retry if rule failed */ + if (send_attr->retry_id) { + wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id); + send_attr->id = send_attr->retry_id; + send_attr->retry_id = 0; + goto send_wqe; + } + + return -1; +} + +void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr; + struct mlx5hws_rule *rule = send_attr->rule; + struct mlx5_core_dev *mdev; + u16 queue_id; + u32 pdn; + int ret; + + queue_id = queue - ctx->send_queue; + mdev = ctx->mdev; + pdn = ctx->pd_num; + + /* Writing through FW can't HW fence, therefore we drain the queue */ + if (send_attr->fence) + mlx5hws_send_queue_action(ctx, + queue_id, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + + if (ste_attr->rtc_1) { + send_attr->id = ste_attr->rtc_1; + send_attr->used_id = ste_attr->used_id_rtc_1; + send_attr->retry_id = ste_attr->retry_rtc_1; + ret = hws_send_wqe_fw(mdev, pdn, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->range_wqe_data, + ste_attr->range_wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode); + if (ret) + goto fail_rule; + } + + if (ste_attr->rtc_0) { + send_attr->id = ste_attr->rtc_0; + send_attr->used_id = ste_attr->used_id_rtc_0; + send_attr->retry_id = ste_attr->retry_rtc_0; + ret = hws_send_wqe_fw(mdev, pdn, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->range_wqe_data, + ste_attr->range_wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode); + if (ret) + goto fail_rule; + } + + /* Increase the status, this only works on good flow as the enum + * is arrange it away creating -> created -> deleting -> deleted + */ + if (likely(rule)) + rule->status++; + + mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS); + + return; + +fail_rule: + if (likely(rule)) + rule->status = !rule->rtc_0 && !rule->rtc_1 ? + MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING; + + mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h new file mode 100644 index 000000000000..b50825d6dc53 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_SEND_H_ +#define MLX5HWS_SEND_H_ + +/* As a single operation requires at least two WQEBBS. + * This means a maximum of 16 such operations per rule. + */ +#define MAX_WQES_PER_RULE 32 + +enum mlx5hws_wqe_opcode { + MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c, +}; + +enum mlx5hws_wqe_opmod { + MLX5HWS_WQE_OPMOD_GTA_STE = 0, + MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1, +}; + +enum mlx5hws_wqe_gta_opcode { + MLX5HWS_WQE_GTA_OP_ACTIVATE = 0, + MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1, +}; + +enum mlx5hws_wqe_gta_opmod { + MLX5HWS_WQE_GTA_OPMOD_STE = 0, + MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1, +}; + +enum mlx5hws_wqe_gta_sz { + MLX5HWS_WQE_SZ_GTA_CTRL = 48, + MLX5HWS_WQE_SZ_GTA_DATA = 64, +}; + +/* WQE Control segment. */ +struct mlx5hws_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + __be32 flags; + __be32 imm; +}; + +struct mlx5hws_wqe_gta_ctrl_seg { + __be32 op_dirix; + __be32 stc_ix[5]; + __be32 rsvd0[6]; +}; + +struct mlx5hws_wqe_gta_data_seg_ste { + __be32 rsvd0_ctr_id; + __be32 rsvd1_definer; + __be32 rsvd2[3]; + union { + struct { + __be32 action[3]; + __be32 tag[8]; + }; + __be32 jumbo[11]; + }; +}; + +struct mlx5hws_wqe_gta_data_seg_arg { + __be32 action_args[8]; +}; + +struct mlx5hws_wqe_gta { + struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl; + union { + struct mlx5hws_wqe_gta_data_seg_ste seg_ste; + struct mlx5hws_wqe_gta_data_seg_arg seg_arg; + }; +}; + +struct mlx5hws_send_ring_cq { + struct mlx5_core_dev *mdev; + struct mlx5_cqwq wq; + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_core_cq mcq; + u16 poll_wqe; +}; + +struct mlx5hws_send_ring_priv { + struct mlx5hws_rule *rule; + void *user_data; + u32 num_wqebbs; + u32 id; + u32 retry_id; + u32 *used_id; +}; + +struct mlx5hws_send_ring_dep_wqe { + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl; + struct mlx5hws_wqe_gta_data_seg_ste wqe_data; + struct mlx5hws_rule *rule; + u32 rtc_0; + u32 rtc_1; + u32 retry_rtc_0; + u32 retry_rtc_1; + u32 direct_index; + void *user_data; +}; + +struct mlx5hws_send_ring_sq { + struct mlx5_core_dev *mdev; + u16 cur_post; + u16 buf_mask; + struct mlx5hws_send_ring_priv *wr_priv; + unsigned int last_idx; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + unsigned int head_dep_idx; + unsigned int tail_dep_idx; + u32 sqn; + struct mlx5_wq_cyc wq; + struct mlx5_wq_ctrl wq_ctrl; + void __iomem *uar_map; +}; + +struct mlx5hws_send_ring { + struct mlx5hws_send_ring_cq send_cq; + struct mlx5hws_send_ring_sq send_sq; +}; + +struct mlx5hws_completed_poll_entry { + void *user_data; + enum mlx5hws_flow_op_status status; +}; + +struct mlx5hws_completed_poll { + struct mlx5hws_completed_poll_entry *entries; + u16 ci; + u16 pi; + u16 mask; +}; + +struct mlx5hws_send_engine { + struct mlx5hws_send_ring send_ring; + struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */ + struct mlx5hws_completed_poll completed; + u16 used_entries; + u16 num_entries; + bool err; + struct mutex lock; /* Protects the send engine */ +}; + +struct mlx5hws_send_engine_post_ctrl { + struct mlx5hws_send_engine *queue; + struct mlx5hws_send_ring *send_ring; + size_t num_wqebbs; +}; + +struct mlx5hws_send_engine_post_attr { + u8 opcode; + u8 opmod; + u8 notify_hw; + u8 fence; + u8 match_definer_id; + u8 range_definer_id; + size_t len; + struct mlx5hws_rule *rule; + u32 id; + u32 retry_id; + u32 *used_id; + void *user_data; +}; + +struct mlx5hws_send_ste_attr { + u32 rtc_0; + u32 rtc_1; + u32 retry_rtc_0; + u32 retry_rtc_1; + u32 *used_id_rtc_0; + u32 *used_id_rtc_1; + bool wqe_tag_is_jumbo; + u8 gta_opcode; + u32 direct_index; + struct mlx5hws_send_engine_post_attr send_attr; + struct mlx5hws_rule_match_tag *wqe_tag; + struct mlx5hws_rule_match_tag *range_wqe_tag; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data; +}; + +struct mlx5hws_send_ring_dep_wqe * +mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue); + +int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size); + +void mlx5hws_send_queues_close(struct mlx5hws_context *ctx); + +int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size); + +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions); + +int mlx5hws_send_test(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size); + +struct mlx5hws_send_engine_post_ctrl +mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl, + char **buf, size_t *len); + +void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl, + struct mlx5hws_send_engine_post_attr *attr); + +void mlx5hws_send_ste(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr); + +void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr); + +void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue); + +static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq; + + return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe); +} + +static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue) +{ + return queue->used_entries >= queue->num_entries; +} + +static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue) +{ + queue->used_entries++; +} + +static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue) +{ + queue->used_entries--; +} + +static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue, + void *user_data, + int comp_status) +{ + struct mlx5hws_completed_poll *comp = &queue->completed; + + comp->entries[comp->pi].status = comp_status; + comp->entries[comp->pi].user_data = user_data; + + comp->pi = (comp->pi + 1) & comp->mask; +} + +static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue) +{ + return queue->err; +} + +#endif /* MLX5HWS_SEND_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c new file mode 100644 index 000000000000..9dbc3e9da5ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl) +{ + return tbl->ft_id; +} + +static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl, + struct mlx5hws_cmd_ft_create_attr *ft_attr) +{ + ft_attr->type = tbl->fw_ft_type; + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1; + else + ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1; + ft_attr->rtc_valid = true; +} + +static void hws_table_set_cap_attr(struct mlx5hws_table *tbl, + struct mlx5hws_cmd_ft_create_attr *ft_attr) +{ + /* Enabling reformat_en or decap_en for the first flow table + * must be done when all VFs are down. + * However, HWS doesn't know when it is required to create the first FT. + * On the other hand, HWS doesn't use all these FT capabilities at all + * (the API doesn't even provide a way to specify these flags), so we'll + * just set these caps on all the flow tables. + * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A. + */ + if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) { + ft_attr->reformat_en = true; + ft_attr->decap_en = true; + } +} + +static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; + struct mlx5hws_cmd_forward_tbl *default_miss; + struct mlx5hws_cmd_set_fte_dest dest = {0}; + struct mlx5hws_context *ctx = tbl->ctx; + u8 tbl_type = tbl->type; + + if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) + return 0; + + if (ctx->common_res[tbl_type].default_miss) { + ctx->common_res[tbl_type].default_miss->refcount++; + return 0; + } + + ft_attr.type = tbl->fw_ft_type; + ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */ + ft_attr.rtc_valid = false; + + dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.destination_id = ctx->caps->eswitch_manager_vport_number; + + fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_attr.dests_num = 1; + fte_attr.dests = &dest; + + default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr); + if (!default_miss) { + mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type); + return -EINVAL; + } + + /* ctx->ctrl_lock must be held here */ + ctx->common_res[tbl_type].default_miss = default_miss; + ctx->common_res[tbl_type].default_miss->refcount++; + + return 0; +} + +/* Called under ctx->ctrl_lock */ +static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_forward_tbl *default_miss; + struct mlx5hws_context *ctx = tbl->ctx; + u8 tbl_type = tbl->type; + + if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) + return; + + default_miss = ctx->common_res[tbl_type].default_miss; + if (--default_miss->refcount) + return; + + mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss); + ctx->common_res[tbl_type].default_miss = NULL; +} + +static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + int ret; + + if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB)) + pr_warn("HWS: invalid table type %d\n", tbl->type); + + mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx, + tbl->fw_ft_type, + tbl->type, + &ft_attr); + + ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n"); + return ret; + } + + return 0; +} + +int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, + struct mlx5hws_table *tbl, + u32 *ft_id) +{ + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + int ret; + + hws_table_init_next_ft_attr(tbl, &ft_attr); + hws_table_set_cap_attr(tbl, &ft_attr); + + ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed creating default ft\n"); + return ret; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + /* Take/create ref over the default miss */ + ret = hws_table_up_default_fdb_miss_tbl(tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n"); + goto free_ft_obj; + } + ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n"); + goto down_miss_tbl; + } + } + + return 0; + +down_miss_tbl: + hws_table_down_default_fdb_miss_tbl(tbl); +free_ft_obj: + mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id); + return ret; +} + +void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, + u32 ft_id) +{ + mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id); + hws_table_down_default_fdb_miss_tbl(tbl); +} + +static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) { + mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hws_table_init(struct mlx5hws_table *tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + ret = hws_table_init_check_hws_support(ctx, tbl); + if (ret) + return ret; + + if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) { + pr_warn("HWS: invalid table type %d\n", tbl->type); + return -EOPNOTSUPP; + } + + mutex_lock(&ctx->ctrl_lock); + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to create flow table object\n"); + mutex_unlock(&ctx->ctrl_lock); + return ret; + } + + ret = mlx5hws_action_get_default_stc(ctx, tbl->type); + if (ret) + goto tbl_destroy; + + INIT_LIST_HEAD(&tbl->matchers_list); + INIT_LIST_HEAD(&tbl->default_miss.head); + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +tbl_destroy: + mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id); + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static void hws_table_uninit(struct mlx5hws_table *tbl) +{ + mutex_lock(&tbl->ctx->ctrl_lock); + mlx5hws_action_put_default_stc(tbl->ctx, tbl->type); + mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id); + mutex_unlock(&tbl->ctx->ctrl_lock); +} + +struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx, + struct mlx5hws_table_attr *attr) +{ + struct mlx5hws_table *tbl; + int ret; + + if (attr->type > MLX5HWS_TABLE_TYPE_FDB) { + mlx5hws_err(ctx, "Invalid table type %d\n", attr->type); + return NULL; + } + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + tbl->ctx = ctx; + tbl->type = attr->type; + tbl->level = attr->level; + + ret = hws_table_init(tbl); + if (ret) { + mlx5hws_err(ctx, "Failed to initialise table\n"); + goto free_tbl; + } + + mutex_lock(&ctx->ctrl_lock); + list_add(&tbl->tbl_list_node, &ctx->tbl_list); + mutex_unlock(&ctx->ctrl_lock); + + return tbl; + +free_tbl: + kfree(tbl); + return NULL; +} + +int mlx5hws_table_destroy(struct mlx5hws_table *tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + mutex_lock(&ctx->ctrl_lock); + if (!list_empty(&tbl->matchers_list)) { + mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n"); + ret = -EBUSY; + goto unlock_err; + } + + if (!list_empty(&tbl->default_miss.head)) { + mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n"); + ret = -EBUSY; + goto unlock_err; + } + + list_del_init(&tbl->tbl_list_node); + mutex_unlock(&ctx->ctrl_lock); + + hws_table_uninit(tbl); + kfree(tbl); + + return 0; + +unlock_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl) +{ + struct mlx5hws_matcher *matcher; + + if (list_empty(&tbl->matchers_list)) + return tbl->ft_id; + + matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node); + return matcher->end_ft_id; +} + +int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + int ret; + + /* Due to FW limitation, resetting the flow table to default action will + * disconnect RTC when ignore_flow_level_rtc_valid is not supported. + */ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) + return 0; + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + return hws_table_connect_to_default_miss_tbl(tbl, ft_id); + + ft_attr.type = tbl->fw_ft_type; + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT; + + ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n"); + return ret; + } + + return 0; +} + +int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 rtc_0_id, + u32 rtc_1_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; + ft_attr.type = fw_ft_type; + ft_attr.rtc_id_0 = rtc_0_id; + ft_attr.rtc_id_1 = rtc_1_id; + + return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); +} + +static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; + ft_attr.type = fw_ft_type; + ft_attr.table_miss_id = next_ft_id; + + return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); +} + +int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl) +{ + struct mlx5hws_table *src_tbl; + int ret; + + if (list_empty(&dst_tbl->default_miss.head)) + return 0; + + list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) { + ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl); + if (ret) { + mlx5hws_err(dst_tbl->ctx, + "Failed to update source miss table, unexpected behavior\n"); + return ret; + } + } + + return 0; +} + +int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, + struct mlx5hws_table *dst_tbl) +{ + struct mlx5hws_matcher *matcher; + u32 last_ft_id; + int ret; + + last_ft_id = hws_table_get_last_ft(src_tbl); + + if (dst_tbl) { + if (list_empty(&dst_tbl->matchers_list)) { + /* Connect src_tbl last_ft to dst_tbl start anchor */ + ret = hws_table_ft_set_next_ft(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + dst_tbl->ft_id); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + 0, 0); + if (ret) + return ret; + } else { + /* Connect src_tbl last_ft to first matcher RTC */ + matcher = list_first_entry(&dst_tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) + return ret; + + /* Reset next miss FT to default */ + ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id); + if (ret) + return ret; + } + } else { + /* Reset next miss FT to default */ + ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + 0, 0); + if (ret) + return ret; + } + + src_tbl->default_miss.miss_tbl = dst_tbl; + + return 0; +} + +static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl) +{ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) { + mlx5hws_err(tbl->ctx, "Default miss table is not supported\n"); + return -EOPNOTSUPP; + } + + if ((miss_tbl && miss_tbl->type != tbl->type)) { + mlx5hws_err(tbl->ctx, "Invalid arguments\n"); + return -EINVAL; + } + + return 0; +} + +int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_table *old_miss_tbl; + int ret; + + ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl); + if (ret) + return ret; + + mutex_lock(&ctx->ctrl_lock); + + old_miss_tbl = tbl->default_miss.miss_tbl; + ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl); + if (ret) + goto out; + + if (old_miss_tbl) + list_del_init(&tbl->default_miss.next); + + old_miss_tbl = tbl->default_miss.miss_tbl; + if (old_miss_tbl) + list_del_init(&old_miss_tbl->default_miss.head); + + if (miss_tbl) + list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head); + + mutex_unlock(&ctx->ctrl_lock); + return 0; +out: + mutex_unlock(&ctx->ctrl_lock); + return -ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h new file mode 100644 index 000000000000..dd50420eec9e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_TABLE_H_ +#define MLX5HWS_TABLE_H_ + +struct mlx5hws_default_miss { + /* My miss table */ + struct mlx5hws_table *miss_tbl; + struct list_head next; + /* Tables missing to my table */ + struct list_head head; +}; + +struct mlx5hws_table { + struct mlx5hws_context *ctx; + u32 ft_id; + enum mlx5hws_table_type type; + u32 fw_ft_type; + u32 level; + struct list_head matchers_list; + struct list_head tbl_list_node; + struct mlx5hws_default_miss default_miss; +}; + +static inline +u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type, + u8 *ret_type) +{ + if (type != MLX5HWS_TABLE_TYPE_FDB) + return -EOPNOTSUPP; + + *ret_type = FS_FT_FDB; + + return 0; +} + +static inline +u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type, + bool is_mirror) +{ + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX; + + return 0; +} + +int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, + struct mlx5hws_table *tbl, + u32 *ft_id); + +void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, + u32 ft_id); + +int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, + struct mlx5hws_table *dst_tbl); + +int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl); + +int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id); + +int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 rtc_0_id, + u32 rtc_1_id); + +#endif /* MLX5HWS_TABLE_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c new file mode 100644 index 000000000000..faf42421c43f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx) +{ + int ret; + + if (!ctx->caps->eswitch_manager) + return 0; + + xa_init(&ctx->vports.vport_gvmi_xa); + + /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports + * (vport 0 of other function, VFs and SFs) will be queried dynamically. + */ + + ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi); + if (ret) + return ret; + + ctx->vports.uplink_gvmi = 0; + return 0; +} + +void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx) +{ + if (ctx->caps->eswitch_manager) + xa_destroy(&ctx->vports.vport_gvmi_xa); +} + +static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport) +{ + u16 vport_gvmi; + int ret; + + ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi); + if (ret) + return -EINVAL; + + ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport, + xa_mk_value(vport_gvmi), GFP_KERNEL); + if (ret) + mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret); + + return ret; +} + +static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport) +{ + return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF : + vport == MLX5_VPORT_PF; +} + +int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi) +{ + void *entry; + int ret; + + if (!ctx->caps->eswitch_manager) + return -EINVAL; + + if (hws_vport_is_esw_mgr_vport(ctx, vport)) { + *vport_gvmi = ctx->vports.esw_manager_gvmi; + return 0; + } + + if (vport == MLX5_VPORT_UPLINK) { + *vport_gvmi = ctx->vports.uplink_gvmi; + return 0; + } + +load_entry: + entry = xa_load(&ctx->vports.vport_gvmi_xa, vport); + + if (!xa_is_value(entry)) { + ret = hws_vport_add_gvmi(ctx, vport); + if (ret && ret != -EBUSY) + return ret; + goto load_entry; + } + + *vport_gvmi = (u16)xa_to_value(entry); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h new file mode 100644 index 000000000000..0912fc166b3a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_VPORT_H_ +#define MLX5HWS_VPORT_H_ + +int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx); + +void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx); + +int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi); + +#endif /* MLX5HWS_VPORT_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c index e87049dfd223..ef05ae8f5039 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -10,6 +10,56 @@ #define FBNIC_SN_STR_LEN 24 +static int fbnic_version_running_put(struct devlink_info_req *req, + struct fbnic_fw_ver *fw_ver, + char *ver_name) +{ + char running_ver[FBNIC_FW_VER_MAX_SIZE]; + int err; + + fbnic_mk_fw_ver_str(fw_ver->version, running_ver); + err = devlink_info_version_running_put(req, ver_name, running_ver); + if (err) + return err; + + if (strlen(fw_ver->commit) > 0) { + char commit_name[FBNIC_SN_STR_LEN]; + + snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name); + err = devlink_info_version_running_put(req, commit_name, + fw_ver->commit); + if (err) + return err; + } + + return 0; +} + +static int fbnic_version_stored_put(struct devlink_info_req *req, + struct fbnic_fw_ver *fw_ver, + char *ver_name) +{ + char stored_ver[FBNIC_FW_VER_MAX_SIZE]; + int err; + + fbnic_mk_fw_ver_str(fw_ver->version, stored_ver); + err = devlink_info_version_stored_put(req, ver_name, stored_ver); + if (err) + return err; + + if (strlen(fw_ver->commit) > 0) { + char commit_name[FBNIC_SN_STR_LEN]; + + snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name); + err = devlink_info_version_stored_put(req, commit_name, + fw_ver->commit); + if (err) + return err; + } + + return 0; +} + static int fbnic_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) @@ -17,6 +67,31 @@ static int fbnic_devlink_info_get(struct devlink *devlink, struct fbnic_dev *fbd = devlink_priv(devlink); int err; + err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt, + DEVLINK_INFO_VERSION_GENERIC_FW); + if (err) + return err; + + err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader, + DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt, + DEVLINK_INFO_VERSION_GENERIC_FW); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader, + DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI); + if (err) + return err; + if (fbd->dsn) { unsigned char serial[FBNIC_SN_STR_LEN]; u8 dsn[8]; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index ce2435987fb6..2e3eb37a45cd 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,12 +46,13 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL - select PHYLIB select FIXED_PHY select CRC16 select CRC32 + select PHYLINK help - Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip + Support for the Microchip LAN743x and PCI11x1x families of PCI + Express Ethernet devices To compile this driver as a module, choose M here. The module will be called lan743x. diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 0f1c0edec460..1a1cbd034eda 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1054,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, struct ethtool_keee *eee) { struct lan743x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - u32 buf; - int ret; - - if (!phydev) - return -EIO; - if (!phydev->drv) { - netif_err(adapter, drv, adapter->netdev, - "Missing PHY Driver\n"); - return -EIO; - } - ret = phy_ethtool_get_eee(phydev, eee); - if (ret < 0) - return ret; + eee->tx_lpi_timer = lan743x_csr_read(adapter, + MAC_EEE_TX_LPI_REQ_DLY_CNT); - buf = lan743x_csr_read(adapter, MAC_CR); - if (buf & MAC_CR_EEE_EN_) { - /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ - buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); - eee->tx_lpi_timer = buf; - } else { - eee->tx_lpi_timer = 0; - } - - return 0; + return phylink_ethtool_get_eee(adapter->phylink, eee); } static int lan743x_ethtool_set_eee(struct net_device *netdev, struct ethtool_keee *eee) { - struct lan743x_adapter *adapter; - struct phy_device *phydev; - u32 buf = 0; + struct lan743x_adapter *adapter = netdev_priv(netdev); + u32 tx_lpi_timer; - if (!netdev) - return -EINVAL; - adapter = netdev_priv(netdev); - if (!adapter) - return -EINVAL; - phydev = netdev->phydev; - if (!phydev) - return -EIO; - if (!phydev->drv) { - netif_err(adapter, drv, adapter->netdev, - "Missing PHY Driver\n"); - return -EIO; - } + tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); + if (tx_lpi_timer != eee->tx_lpi_timer) { + u32 mac_cr = lan743x_csr_read(adapter, MAC_CR); + + /* Software should only change this field when Energy Efficient + * Ethernet Enable (EEEEN) is cleared. + * This function will trigger an autonegotiation restart and + * eee will be reenabled during link up if eee was negotiated. + */ + lan743x_mac_eee_enable(adapter, false); + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, + eee->tx_lpi_timer); - if (eee->eee_enabled) { - buf = (u32)eee->tx_lpi_timer; - lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); + if (mac_cr & MAC_CR_EEE_EN_) + lan743x_mac_eee_enable(adapter, true); } - return phy_ethtool_set_eee(phydev, eee); + return phylink_ethtool_set_eee(adapter->phylink, eee); +} + +static int +lan743x_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(adapter->phylink, cmd); +} + +static int +lan743x_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(adapter->phylink, cmd); } #ifdef CONFIG_PM @@ -1120,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, wol->supported = 0; wol->wolopts = 0; - if (netdev->phydev) - phy_ethtool_get_wol(netdev->phydev, wol); + phylink_ethtool_get_wol(adapter->phylink, wol); if (wol->supported != adapter->phy_wol_supported) netif_warn(adapter, drv, adapter->netdev, @@ -1162,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, !(adapter->phy_wol_supported & WAKE_MAGICSECURE)) phy_wol.wolopts &= ~WAKE_MAGIC; - ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol); + ret = phylink_ethtool_set_wol(adapter->phylink, wol); if (ret && (ret != -EOPNOTSUPP)) return ret; @@ -1351,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct lan743x_adapter *adapter = netdev_priv(dev); - struct lan743x_phy *phy = &adapter->phy; - if (phy->fc_request_control & FLOW_CTRL_TX) - pause->tx_pause = 1; - if (phy->fc_request_control & FLOW_CTRL_RX) - pause->rx_pause = 1; - pause->autoneg = phy->fc_autoneg; + phylink_ethtool_get_pauseparam(adapter->phylink, pause); } static int lan743x_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct lan743x_adapter *adapter = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - struct lan743x_phy *phy = &adapter->phy; - - if (!phydev) - return -ENODEV; - - if (!phy_validate_pause(phydev, pause)) - return -EINVAL; - - phy->fc_request_control = 0; - if (pause->rx_pause) - phy->fc_request_control |= FLOW_CTRL_RX; - if (pause->tx_pause) - phy->fc_request_control |= FLOW_CTRL_TX; - - phy->fc_autoneg = pause->autoneg; - - if (pause->autoneg == AUTONEG_DISABLE) - lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause, - pause->rx_pause); - else - phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); - - return 0; + return phylink_ethtool_set_pauseparam(adapter->phylink, pause); } const struct ethtool_ops lan743x_ethtool_ops = { @@ -1413,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_ts_info = lan743x_ethtool_get_ts_info, .get_eee = lan743x_ethtool_get_eee, .set_eee = lan743x_ethtool_set_eee, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = lan743x_ethtool_get_link_ksettings, + .set_link_ksettings = lan743x_ethtool_set_link_ksettings, .get_regs_len = lan743x_get_regs_len, .get_regs = lan743x_get_regs, .get_pauseparam = lan743x_get_pauseparam, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e418539565b1..4dc5adcda6a3 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -15,6 +15,7 @@ #include <linux/rtnetlink.h> #include <linux/iopoll.h> #include <linux/crc16.h> +#include <linux/phylink.h> #include "lan743x_main.h" #include "lan743x_ethtool.h" @@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter, return ret; } +static int lan743x_get_lsd(int speed, int duplex, u8 mss) +{ + int lsd; + + switch (speed) { + case SPEED_2500: + if (mss == MASTER_SLAVE_STATE_SLAVE) + lsd = LINK_2500_SLAVE; + else + lsd = LINK_2500_MASTER; + break; + case SPEED_1000: + if (mss == MASTER_SLAVE_STATE_SLAVE) + lsd = LINK_1000_SLAVE; + else + lsd = LINK_1000_MASTER; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + lsd = LINK_100FD; + else + lsd = LINK_100HD; + break; + case SPEED_10: + if (duplex == DUPLEX_FULL) + lsd = LINK_10FD; + else + lsd = LINK_10HD; + break; + default: + lsd = -EINVAL; + } + + return lsd; +} + static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter, u16 baud) { @@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter, VR_MII_BAUD_RATE_1P25GBPS); } -static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter, - bool *status) -{ - int ret; - - ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, - VR_MII_GEN2_4_MPLL_CTRL1); - if (ret < 0) - return ret; - - if (ret == VR_MII_MPLL_MULTIPLIER_125 || - ret == VR_MII_MPLL_MULTIPLIER_50) - *status = true; - else - *status = false; - - return 0; -} - -static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter) +static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter) { enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd; int mii_ctrl; @@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state) return 0; } -static int lan743x_sgmii_config(struct lan743x_adapter *adapter) +static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct phy_device *phydev = netdev->phydev; - enum lan743x_sgmii_lsd lsd = POWER_DOWN; int mii_ctl; - bool status; int ret; - switch (phydev->speed) { - case SPEED_2500: - if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) - lsd = LINK_2500_MASTER; - else - lsd = LINK_2500_SLAVE; - break; - case SPEED_1000: - if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) - lsd = LINK_1000_MASTER; - else - lsd = LINK_1000_SLAVE; - break; - case SPEED_100: - if (phydev->duplex) - lsd = LINK_100FD; - else - lsd = LINK_100HD; - break; - case SPEED_10: - if (phydev->duplex) - lsd = LINK_10FD; - else - lsd = LINK_10HD; - break; - default: - netif_err(adapter, drv, adapter->netdev, - "Invalid speed %d\n", phydev->speed); - return -EINVAL; - } - - adapter->sgmii_lsd = lsd; - ret = lan743x_sgmii_aneg_update(adapter); - if (ret < 0) { - netif_err(adapter, drv, adapter->netdev, - "error %d SGMII cfg failed\n", ret); - return ret; - } - - ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); - if (ret < 0) { - netif_err(adapter, drv, adapter->netdev, - "error %d SGMII get mode failed\n", ret); - return ret; - } - - if (status) - netif_dbg(adapter, drv, adapter->netdev, - "SGMII 2.5G mode enable\n"); - else - netif_dbg(adapter, drv, adapter->netdev, - "SGMII 1G mode enable\n"); - /* SGMII/1000/2500BASE-X PCS power down */ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); if (mii_ctl < 0) @@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter) if (ret < 0) return ret; - ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); - if (ret < 0) - return ret; - - return 0; + return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); } static void lan743x_mac_set_address(struct lan743x_adapter *adapter, @@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) 50000, 1000000); } -static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u16 local_adv, u16 remote_adv) -{ - struct lan743x_phy *phy = &adapter->phy; - u8 cap; - - if (phy->fc_autoneg) - cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); - else - cap = phy->fc_request_control; - - lan743x_mac_flow_ctrl_set_enables(adapter, - cap & FLOW_CTRL_TX, - cap & FLOW_CTRL_RX); -} - static int lan743x_phy_init(struct lan743x_adapter *adapter) { return lan743x_phy_reset(adapter); } -static void lan743x_phy_link_status_change(struct net_device *netdev) -{ - struct lan743x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - u32 data; - - phy_print_status(phydev); - if (phydev->state == PHY_RUNNING) { - int remote_advertisement = 0; - int local_advertisement = 0; - - data = lan743x_csr_read(adapter, MAC_CR); - - /* set duplex mode */ - if (phydev->duplex) - data |= MAC_CR_DPX_; - else - data &= ~MAC_CR_DPX_; - - /* set bus speed */ - switch (phydev->speed) { - case SPEED_10: - data &= ~MAC_CR_CFG_H_; - data &= ~MAC_CR_CFG_L_; - break; - case SPEED_100: - data &= ~MAC_CR_CFG_H_; - data |= MAC_CR_CFG_L_; - break; - case SPEED_1000: - data |= MAC_CR_CFG_H_; - data &= ~MAC_CR_CFG_L_; - break; - case SPEED_2500: - data |= MAC_CR_CFG_H_; - data |= MAC_CR_CFG_L_; - break; - } - lan743x_csr_write(adapter, MAC_CR, data); - - local_advertisement = - linkmode_adv_to_mii_adv_t(phydev->advertising); - remote_advertisement = - linkmode_adv_to_mii_adv_t(phydev->lp_advertising); - - lan743x_phy_update_flowcontrol(adapter, local_advertisement, - remote_advertisement); - lan743x_ptp_update_latency(adapter, phydev->speed); - if (phydev->interface == PHY_INTERFACE_MODE_SGMII || - phydev->interface == PHY_INTERFACE_MODE_1000BASEX || - phydev->interface == PHY_INTERFACE_MODE_2500BASEX) - lan743x_sgmii_config(adapter); - - data = lan743x_csr_read(adapter, MAC_CR); - if (phydev->enable_tx_lpi) - data |= MAC_CR_EEE_EN_; - else - data &= ~MAC_CR_EEE_EN_; - lan743x_csr_write(adapter, MAC_CR, data); - } -} - -static void lan743x_phy_close(struct lan743x_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct phy_device *phydev = netdev->phydev; - - phy_stop(netdev->phydev); - phy_disconnect(netdev->phydev); - - /* using phydev here as phy_disconnect NULLs netdev->phydev */ - if (phy_is_pseudo_fixed_link(phydev)) - fixed_phy_unregister(phydev); - -} - static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) { u32 id_rev; @@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) adapter->phy_interface = PHY_INTERFACE_MODE_MII; else adapter->phy_interface = PHY_INTERFACE_MODE_RGMII; -} - -static int lan743x_phy_open(struct lan743x_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct lan743x_phy *phy = &adapter->phy; - struct fixed_phy_status fphy_status = { - .link = 1, - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - }; - struct phy_device *phydev; - int ret = -EIO; - - /* try devicetree phy, or fixed link */ - phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, - lan743x_phy_link_status_change); - - if (!phydev) { - /* try internal phy */ - phydev = phy_find_first(adapter->mdiobus); - if (!phydev) { - if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == - ID_REV_ID_LAN7431_) { - phydev = fixed_phy_register(PHY_POLL, - &fphy_status, NULL); - if (IS_ERR(phydev)) { - netdev_err(netdev, "No PHY/fixed_PHY found\n"); - return PTR_ERR(phydev); - } - } else { - goto return_error; - } - } - - lan743x_phy_interface_select(adapter); - - ret = phy_connect_direct(netdev, phydev, - lan743x_phy_link_status_change, - adapter->phy_interface); - if (ret) - goto return_error; - } - - /* MAC doesn't support 1000T Half */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* support both flow controls */ - phy_support_asym_pause(phydev); - phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phy->fc_autoneg = phydev->autoneg; - - phy_start(phydev); - phy_start_aneg(phydev); - phy_attached_info(phydev); - return 0; -return_error: - return ret; + netif_dbg(adapter, drv, adapter->netdev, + "selected phy interface: 0x%X\n", adapter->phy_interface); } static void lan743x_rfe_open(struct lan743x_adapter *adapter) @@ -3061,6 +2870,336 @@ return_error: return ret; } +static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from the External PHY via SGMII */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d sgmii aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d 1000basex aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d 2500basex aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable) +{ + u32 mac_cr; + + mac_cr = lan743x_csr_read(adapter, MAC_CR); + if (enable) + mac_cr |= MAC_CR_EEE_EN_; + else + mac_cr &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, mac_cr); +} + +static void lan743x_phylink_mac_config(struct phylink_config *config, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + switch (state->interface) { + case PHY_INTERFACE_MODE_2500BASEX: + ret = lan743x_phylink_2500basex_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "2500BASEX config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "2500BASEX mode selected and configured\n"); + break; + case PHY_INTERFACE_MODE_1000BASEX: + ret = lan743x_phylink_1000basex_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "1000BASEX config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "1000BASEX mode selected and configured\n"); + break; + case PHY_INTERFACE_MODE_SGMII: + ret = lan743x_phylink_sgmii_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "SGMII config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "SGMII mode selected and configured\n"); + break; + default: + netif_dbg(adapter, drv, adapter->netdev, + "RGMII/GMII/MII(0x%X) mode enable\n", + state->interface); + break; + } +} + +static void lan743x_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + netif_tx_stop_all_queues(to_net_dev(config->dev)); + lan743x_mac_eee_enable(adapter, false); +} + +static void lan743x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int mac_cr; + u8 cap; + + mac_cr = lan743x_csr_read(adapter, MAC_CR); + /* Pre-initialize register bits. + * Resulting value corresponds to SPEED_10 + */ + mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_); + if (speed == SPEED_2500) + mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_; + else if (speed == SPEED_1000) + mac_cr |= MAC_CR_CFG_H_; + else if (speed == SPEED_100) + mac_cr |= MAC_CR_CFG_L_; + + lan743x_csr_write(adapter, MAC_CR, mac_cr); + + lan743x_ptp_update_latency(adapter, speed); + + /* Flow Control operation */ + cap = 0; + if (tx_pause) + cap |= FLOW_CTRL_TX; + if (rx_pause) + cap |= FLOW_CTRL_RX; + + lan743x_mac_flow_ctrl_set_enables(adapter, + cap & FLOW_CTRL_TX, + cap & FLOW_CTRL_RX); + + if (phydev) + lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi); + + netif_tx_wake_all_queues(netdev); +} + +static const struct phylink_mac_ops lan743x_phylink_mac_ops = { + .mac_config = lan743x_phylink_mac_config, + .mac_link_down = lan743x_phylink_mac_link_down, + .mac_link_up = lan743x_phylink_mac_link_up, +}; + +static int lan743x_phylink_create(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct phylink *pl; + + adapter->phylink_config.dev = &netdev->dev; + adapter->phylink_config.type = PHYLINK_NETDEV; + adapter->phylink_config.mac_managed_pm = false; + + adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + lan743x_phy_interface_select(adapter); + + switch (adapter->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + __set_bit(PHY_INTERFACE_MODE_SGMII, + adapter->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + adapter->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + adapter->phylink_config.supported_interfaces); + adapter->phylink_config.mac_capabilities |= MAC_2500FD; + break; + case PHY_INTERFACE_MODE_GMII: + __set_bit(PHY_INTERFACE_MODE_GMII, + adapter->phylink_config.supported_interfaces); + break; + case PHY_INTERFACE_MODE_MII: + __set_bit(PHY_INTERFACE_MODE_MII, + adapter->phylink_config.supported_interfaces); + break; + default: + phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces); + } + + pl = phylink_create(&adapter->phylink_config, NULL, + adapter->phy_interface, &lan743x_phylink_mac_ops); + + if (IS_ERR(pl)) { + netdev_err(netdev, "Could not create phylink (%pe)\n", pl); + return PTR_ERR(pl); + } + + adapter->phylink = pl; + netdev_dbg(netdev, "lan743x phylink created"); + + return 0; +} + +static bool lan743x_phy_handle_exists(struct device_node *dn) +{ + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; +} + +static int lan743x_phylink_connect(struct lan743x_adapter *adapter) +{ + struct device_node *dn = adapter->pdev->dev.of_node; + struct net_device *dev = adapter->netdev; + struct phy_device *phydev; + int ret; + + if (dn) + ret = phylink_of_phy_connect(adapter->phylink, dn, 0); + + if (!dn || (ret && !lan743x_phy_handle_exists(dn))) { + phydev = phy_find_first(adapter->mdiobus); + if (phydev) { + /* attach the mac to the phy */ + ret = phylink_connect_phy(adapter->phylink, phydev); + } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) == + ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) { + struct phylink_link_state state; + unsigned long caps; + + caps = adapter->phylink_config.mac_capabilities; + if (caps & MAC_2500FD) { + state.speed = SPEED_2500; + state.duplex = DUPLEX_FULL; + } else if (caps & MAC_1000FD) { + state.speed = SPEED_1000; + state.duplex = DUPLEX_FULL; + } else { + state.speed = SPEED_UNKNOWN; + state.duplex = DUPLEX_UNKNOWN; + } + + ret = phylink_set_fixed_link(adapter->phylink, &state); + if (ret) { + netdev_err(dev, "Could not set fixed link\n"); + return ret; + } + } else { + netdev_err(dev, "no PHY found\n"); + return -ENXIO; + } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; + } + + phylink_start(adapter->phylink); + + return 0; +} + +static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter) +{ + phylink_stop(adapter->phylink); + phylink_disconnect_phy(adapter->phylink); +} + static int lan743x_netdev_close(struct net_device *netdev) { struct lan743x_adapter *adapter = netdev_priv(netdev); @@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev) lan743x_ptp_close(adapter); - lan743x_phy_close(adapter); + lan743x_phylink_disconnect(adapter); lan743x_mac_close(adapter); @@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_intr; - ret = lan743x_phy_open(adapter); + ret = lan743x_phylink_connect(adapter); if (ret) goto close_mac; ret = lan743x_ptp_open(adapter); if (ret) - goto close_phy; + goto close_mac; lan743x_rfe_open(adapter); @@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev) goto close_tx; } + if (netdev->phydev) + phy_support_eee(netdev->phydev); + #ifdef CONFIG_PM if (adapter->netdev->phydev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; @@ -3143,9 +3285,8 @@ close_rx: lan743x_rx_close(&adapter->rx[index]); } lan743x_ptp_close(adapter); - -close_phy: - lan743x_phy_close(adapter); + if (adapter->phylink) + lan743x_phylink_disconnect(adapter); close_mac: lan743x_mac_close(adapter); @@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, static int lan743x_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lan743x_adapter *adapter = netdev_priv(netdev); + if (!netif_running(netdev)) return -EINVAL; if (cmd == SIOCSHWTSTAMP) return lan743x_ptp_ioctl(netdev, ifr, cmd); - return phy_mii_ioctl(netdev->phydev, ifr, cmd); + + return phylink_mii_ioctl(adapter->phylink, ifr, cmd); } static void lan743x_netdev_set_multicast(struct net_device *netdev) @@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) mdiobus_unregister(adapter->mdiobus); } +static void lan743x_destroy_phylink(struct lan743x_adapter *adapter) +{ + phylink_destroy(adapter->phylink); + adapter->phylink = NULL; +} + static void lan743x_full_cleanup(struct lan743x_adapter *adapter) { unregister_netdev(adapter->netdev); + lan743x_destroy_phylink(adapter); lan743x_mdiobus_cleanup(adapter); lan743x_hardware_cleanup(adapter); lan743x_pci_cleanup(adapter); @@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, NETIF_F_HW_CSUM | NETIF_F_RXCSUM; adapter->netdev->hw_features = adapter->netdev->features; - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); + ret = lan743x_phylink_create(adapter); + if (ret < 0) { + netif_err(adapter, probe, netdev, + "failed to setup phylink (%d)\n", ret); + goto cleanup_mdiobus; + } ret = register_netdev(adapter->netdev); if (ret < 0) - goto cleanup_mdiobus; + goto cleanup_phylink; return 0; +cleanup_phylink: + lan743x_destroy_phylink(adapter); + cleanup_mdiobus: lan743x_mdiobus_cleanup(adapter); @@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev) MAC_WK_SRC_WK_FR_SAVED_; lan743x_csr_write(adapter, MAC_WK_SRC, data); + rtnl_lock(); /* open netdev when netdev is at running state while resume. * For instance, it is true when system wakesup after pm-suspend * However, it is false when system wakes up after suspend GUI menu @@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev) lan743x_netdev_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 3b2585a384e2..8ef897c114d3 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -5,6 +5,7 @@ #define _LAN743X_H #include <linux/phy.h> +#include <linux/phylink.h> #include "lan743x_ptp.h" #define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>" @@ -1083,6 +1084,8 @@ struct lan743x_adapter { u32 flags; u32 hw_cfg; phy_interface_t phy_interface; + struct phylink *phylink; + struct phylink_config phylink_config; }; #define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) @@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter); void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, bool tx_enable, bool rx_enable); int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr); +void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable); #endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig index f9ebffc04eb8..f663b6e12466 100644 --- a/drivers/net/ethernet/microchip/lan966x/Kconfig +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -8,6 +8,7 @@ config LAN966X_SWITCH select PHYLINK select PAGE_POOL select VCAP + select FDMA help This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index 3b6ac331691d..4cdbe263502c 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o # Provide include files ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap +ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 3960534ac2ad..502670718104 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -6,31 +6,55 @@ #include "lan966x_main.h" -static int lan966x_fdma_channel_active(struct lan966x *lan966x) -{ - return lan_rd(lan966x, FDMA_CH_ACTIVE); -} - -static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, - struct lan966x_db *db) +static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) { + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + struct lan966x_rx *rx = &lan966x->rx; struct page *page; page = page_pool_dev_alloc_pages(rx->page_pool); if (unlikely(!page)) - return NULL; + return -ENOMEM; + + rx->page[dcb][db] = page; + *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; + + return 0; +} - db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; +static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + + *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr; - return page; + return 0; +} + +static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + + *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM; + + return 0; +} + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); } static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) { + struct fdma *fdma = &rx->fdma; int i, j; - for (i = 0; i < FDMA_DCB_MAX; ++i) { - for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) + for (i = 0; i < fdma->n_dcbs; ++i) { + for (j = 0; j < fdma->n_dbs; ++j) page_pool_put_full_page(rx->page_pool, rx->page[i][j], false); } @@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) { + struct fdma *fdma = &rx->fdma; struct page *page; - page = rx->page[rx->dcb_index][rx->db_index]; + page = rx->page[fdma->dcb_index][fdma->db_index]; if (unlikely(!page)) return; page_pool_recycle_direct(rx->page_pool, page); } -static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, - struct lan966x_rx_dcb *dcb, - u64 nextptr) -{ - struct lan966x_db *db; - int i; - - for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { - db = &dcb->db[i]; - db->status = FDMA_DCB_STATUS_INTR; - } - - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); - - rx->last_entry->nextptr = nextptr; - rx->last_entry = dcb; -} - static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; struct page_pool_params pp_params = { .order = rx->page_order, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .pool_size = FDMA_DCB_MAX, + .pool_size = rx->fdma.n_dcbs, .nid = NUMA_NO_NODE, .dev = lan966x->dev, .dma_dir = DMA_FROM_DEVICE, @@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; - struct lan966x_rx_dcb *dcb; - struct lan966x_db *db; - struct page *page; - int i, j; - int size; + struct fdma *fdma = &rx->fdma; + int err; if (lan966x_fdma_rx_alloc_page_pool(rx)) return PTR_ERR(rx->page_pool); - /* calculate how many pages are needed to allocate the dcbs */ - size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - - rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); - if (!rx->dcbs) - return -ENOMEM; - - rx->last_entry = rx->dcbs; - rx->db_index = 0; - rx->dcb_index = 0; - - /* Now for each dcb allocate the dbs */ - for (i = 0; i < FDMA_DCB_MAX; ++i) { - dcb = &rx->dcbs[i]; - dcb->info = 0; - - /* For each db allocate a page and map it to the DB dataptr. */ - for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { - db = &dcb->db[j]; - page = lan966x_fdma_rx_alloc_page(rx, db); - if (!page) - return -ENOMEM; - - db->status = 0; - rx->page[i][j] = page; - } + err = fdma_alloc_coherent(lan966x->dev, fdma); + if (err) + return err; - lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); - } + fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); return 0; } -static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx) -{ - rx->dcb_index++; - rx->dcb_index &= FDMA_DCB_MAX - 1; -} - -static void lan966x_fdma_rx_free(struct lan966x_rx *rx) -{ - struct lan966x *lan966x = rx->lan966x; - u32 size; - - /* Now it is possible to do the cleanup of dcb */ - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); -} - static void lan966x_fdma_rx_start(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; u32 mask; /* When activating a channel, first is required to write the first DCB * address and then to activate it */ - lan_wr(lower_32_bits((u64)rx->dma), lan966x, - FDMA_DCB_LLP(rx->channel_id)); - lan_wr(upper_32_bits((u64)rx->dma), lan966x, - FDMA_DCB_LLP1(rx->channel_id)); + lan_wr(lower_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP(fdma->channel_id)); + lan_wr(upper_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP1(fdma->channel_id)); - lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_MEM_SET(1), - lan966x, FDMA_CH_CFG(rx->channel_id)); + lan966x, FDMA_CH_CFG(fdma->channel_id)); /* Start fdma */ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), @@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) /* Enable interrupts */ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); - mask |= BIT(rx->channel_id); + mask |= BIT(fdma->channel_id); lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), FDMA_INTR_DB_ENA_INTR_DB_ENA, lan966x, FDMA_INTR_DB_ENA); /* Activate the channel */ - lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), FDMA_CH_ACTIVATE_CH_ACTIVATE, lan966x, FDMA_CH_ACTIVATE); } @@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; u32 val; /* Disable the channel */ - lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), FDMA_CH_DISABLE_CH_DISABLE, lan966x, FDMA_CH_DISABLE); readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, - val, !(val & BIT(rx->channel_id)), + val, !(val & BIT(fdma->channel_id)), READL_SLEEP_US, READL_TIMEOUT_US); - lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), FDMA_CH_DB_DISCARD_DB_DISCARD, lan966x, FDMA_CH_DB_DISCARD); } @@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; - lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)), FDMA_CH_RELOAD_CH_RELOAD, lan966x, FDMA_CH_RELOAD); } -static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, - struct lan966x_tx_dcb *dcb) -{ - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = 0; -} - static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - struct lan966x_tx_dcb *dcb; - struct lan966x_db *db; - int size; - int i, j; + struct fdma *fdma = &tx->fdma; + int err; - tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf), GFP_KERNEL); if (!tx->dcbs_buf) return -ENOMEM; - /* calculate how many pages are needed to allocate the dcbs */ - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); - if (!tx->dcbs) + err = fdma_alloc_coherent(lan966x->dev, fdma); + if (err) goto out; - /* Now for each dcb allocate the db */ - for (i = 0; i < FDMA_DCB_MAX; ++i) { - dcb = &tx->dcbs[i]; - - for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { - db = &dcb->db[j]; - db->dataptr = 0; - db->status = 0; - } - - lan966x_fdma_tx_add_dcb(tx, dcb); - } + fdma_dcbs_init(fdma, 0, 0); return 0; @@ -280,33 +221,30 @@ out: static void lan966x_fdma_tx_free(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - int size; kfree(tx->dcbs_buf); - - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); + fdma_free_coherent(lan966x->dev, &tx->fdma); } static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; + struct fdma *fdma = &tx->fdma; u32 mask; /* When activating a channel, first is required to write the first DCB * address and then to activate it */ - lan_wr(lower_32_bits((u64)tx->dma), lan966x, - FDMA_DCB_LLP(tx->channel_id)); - lan_wr(upper_32_bits((u64)tx->dma), lan966x, - FDMA_DCB_LLP1(tx->channel_id)); + lan_wr(lower_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP(fdma->channel_id)); + lan_wr(upper_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP1(fdma->channel_id)); - lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_MEM_SET(1), - lan966x, FDMA_CH_CFG(tx->channel_id)); + lan966x, FDMA_CH_CFG(fdma->channel_id)); /* Start fdma */ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), @@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) /* Enable interrupts */ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); - mask |= BIT(tx->channel_id); + mask |= BIT(fdma->channel_id); lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), FDMA_INTR_DB_ENA_INTR_DB_ENA, lan966x, FDMA_INTR_DB_ENA); /* Activate the channel */ - lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), FDMA_CH_ACTIVATE_CH_ACTIVATE, lan966x, FDMA_CH_ACTIVATE); } @@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; + struct fdma *fdma = &tx->fdma; u32 val; /* Disable the channel */ - lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), FDMA_CH_DISABLE_CH_DISABLE, lan966x, FDMA_CH_DISABLE); readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, - val, !(val & BIT(tx->channel_id)), + val, !(val & BIT(fdma->channel_id)), READL_SLEEP_US, READL_TIMEOUT_US); - lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), FDMA_CH_DB_DISCARD_DB_DISCARD, lan966x, FDMA_CH_DB_DISCARD); tx->activated = false; - tx->last_in_use = -1; } static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) @@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) struct lan966x *lan966x = tx->lan966x; /* Write the registers to reload the channel */ - lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)), FDMA_CH_RELOAD_CH_RELOAD, lan966x, FDMA_CH_RELOAD); } @@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) struct lan966x_tx *tx = &lan966x->tx; struct lan966x_rx *rx = &lan966x->rx; struct lan966x_tx_dcb_buf *dcb_buf; + struct fdma *fdma = &tx->fdma; struct xdp_frame_bulk bq; - struct lan966x_db *db; unsigned long flags; bool clear = false; + struct fdma_db *db; int i; xdp_frame_bulk_init(&bq); spin_lock_irqsave(&lan966x->tx_lock, flags); - for (i = 0; i < FDMA_DCB_MAX; ++i) { + for (i = 0; i < fdma->n_dcbs; ++i) { dcb_buf = &tx->dcbs_buf[i]; if (!dcb_buf->used) continue; - db = &tx->dcbs[i].db[0]; - if (!(db->status & FDMA_DCB_STATUS_DONE)) + db = fdma_db_get(fdma, i, 0); + if (!fdma_db_is_done(db)) continue; dcb_buf->dev->stats.tx_packets++; @@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) spin_unlock_irqrestore(&lan966x->tx_lock, flags); } -static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) -{ - struct lan966x_db *db; - - /* Check if there is any data */ - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) - return false; - - return true; -} - static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; struct lan966x_port *port; - struct lan966x_db *db; + struct fdma_db *db; struct page *page; - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - page = rx->page[rx->dcb_index][rx->db_index]; + db = fdma_db_next_get(fdma); + page = rx->page[fdma->dcb_index][fdma->db_index]; if (unlikely(!page)) return FDMA_ERROR; @@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, u64 src_port) { struct lan966x *lan966x = rx->lan966x; - struct lan966x_db *db; + struct fdma *fdma = &rx->fdma; struct sk_buff *skb; + struct fdma_db *db; struct page *page; u64 timestamp; /* Get the received frame and unmap it */ - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - page = rx->page[rx->dcb_index][rx->db_index]; + db = fdma_db_next_get(fdma); + page = rx->page[fdma->dcb_index][fdma->db_index]; - skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + skb = build_skb(page_address(page), fdma->db_size); if (unlikely(!skb)) goto free_page; @@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) { struct lan966x *lan966x = container_of(napi, struct lan966x, napi); struct lan966x_rx *rx = &lan966x->rx; - int dcb_reload = rx->dcb_index; - struct lan966x_rx_dcb *old_dcb; - struct lan966x_db *db; + int old_dcb, dcb_reload, counter = 0; + struct fdma *fdma = &rx->fdma; bool redirect = false; struct sk_buff *skb; - struct page *page; - int counter = 0; u64 src_port; - u64 nextptr; + + dcb_reload = fdma->dcb_index; lan966x_fdma_tx_clear_buf(lan966x, weight); /* Get all received skb */ while (counter < weight) { - if (!lan966x_fdma_rx_more_frames(rx)) + if (!fdma_has_frames(fdma)) break; counter++; @@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) break; case FDMA_ERROR: lan966x_fdma_rx_free_page(rx); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); goto allocate_new; case FDMA_REDIRECT: redirect = true; fallthrough; case FDMA_TX: - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); continue; case FDMA_DROP: lan966x_fdma_rx_free_page(rx); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); continue; } skb = lan966x_fdma_rx_get_frame(rx, src_port); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); if (!skb) goto allocate_new; @@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) allocate_new: /* Allocate new pages and map them */ - while (dcb_reload != rx->dcb_index) { - db = &rx->dcbs[dcb_reload].db[rx->db_index]; - page = lan966x_fdma_rx_alloc_page(rx, db); - if (unlikely(!page)) - break; - rx->page[dcb_reload][rx->db_index] = page; - - old_dcb = &rx->dcbs[dcb_reload]; + while (dcb_reload != fdma->dcb_index) { + old_dcb = dcb_reload; dcb_reload++; - dcb_reload &= FDMA_DCB_MAX - 1; + dcb_reload &= fdma->n_dcbs - 1; + + fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); - nextptr = rx->dma + ((unsigned long)old_dcb - - (unsigned long)rx->dcbs); - lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); lan966x_fdma_rx_reload(rx); } @@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) { struct lan966x_tx_dcb_buf *dcb_buf; + struct fdma *fdma = &tx->fdma; int i; - for (i = 0; i < FDMA_DCB_MAX; ++i) { + for (i = 0; i < fdma->n_dcbs; ++i) { dcb_buf = &tx->dcbs_buf[i]; - if (!dcb_buf->used && i != tx->last_in_use) + if (!dcb_buf->used && + !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i])) return i; } return -1; } -static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, - int next_to_use, int len, - dma_addr_t dma_addr) -{ - struct lan966x_tx_dcb *next_dcb; - struct lan966x_db *next_db; - - next_dcb = &tx->dcbs[next_to_use]; - next_dcb->nextptr = FDMA_DCB_INVALID_DATA; - - next_db = &next_dcb->db[0]; - next_db->dataptr = dma_addr; - next_db->status = FDMA_DCB_STATUS_SOF | - FDMA_DCB_STATUS_EOF | - FDMA_DCB_STATUS_INTR | - FDMA_DCB_STATUS_BLOCKO(0) | - FDMA_DCB_STATUS_BLOCKL(len); -} - -static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) +static void lan966x_fdma_tx_start(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - struct lan966x_tx_dcb *dcb; if (likely(lan966x->tx.activated)) { - /* Connect current dcb to the next db */ - dcb = &tx->dcbs[tx->last_in_use]; - dcb->nextptr = tx->dma + (next_to_use * - sizeof(struct lan966x_tx_dcb)); - lan966x_fdma_tx_reload(tx); } else { /* Because it is first time, then just activate */ lan966x->tx.activated = true; lan966x_fdma_tx_activate(tx); } - - /* Move to next dcb because this last in use */ - tx->last_in_use = next_to_use; } int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) @@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->data.xdpf = xdpf; next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; - - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, - xdpf->len + IFH_LEN_BYTES, - dma_addr); } else { page = ptr; @@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->data.page = page; next_dcb_buf->len = len + IFH_LEN_BYTES; - - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, - len + IFH_LEN_BYTES, - dma_addr + XDP_PACKET_HEADROOM); } /* Fill up the buffer */ @@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->ptp = false; next_dcb_buf->dev = port->dev; + __fdma_dcb_add(&tx->fdma, + next_to_use, + 0, + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len), + &fdma_nextptr_cb, + &lan966x_fdma_xdp_tx_dataptr_cb); + /* Start the transmission */ - lan966x_fdma_tx_start(tx, next_to_use); + lan966x_fdma_tx_start(tx); out: spin_unlock(&lan966x->tx_lock); @@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) goto release; } - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr); - /* Fill up the buffer */ next_dcb_buf = &tx->dcbs_buf[next_to_use]; next_dcb_buf->use_skb = true; @@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) next_dcb_buf->ptp = false; next_dcb_buf->dev = dev; + fdma_dcb_add(&tx->fdma, + next_to_use, + 0, + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len)); + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) next_dcb_buf->ptp = true; /* Start the transmission */ - lan966x_fdma_tx_start(tx, next_to_use); + lan966x_fdma_tx_start(tx); return NETDEV_TX_OK; @@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) { struct page_pool *page_pool; - dma_addr_t rx_dma; - void *rx_dcbs; - u32 size; + struct fdma fdma_rx_old; int err; /* Store these for later to free them */ - rx_dma = lan966x->rx.dma; - rx_dcbs = lan966x->rx.dcbs; + memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma)); page_pool = lan966x->rx.page_pool; napi_synchronize(&lan966x->napi); @@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) goto restore; lan966x_fdma_rx_start(&lan966x->rx); - size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + fdma_free_coherent(lan966x->dev, &fdma_rx_old); page_pool_destroy(page_pool); @@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) return err; restore: lan966x->rx.page_pool = page_pool; - lan966x->rx.dma = rx_dma; - lan966x->rx.dcbs = rx_dcbs; + memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma)); lan966x_fdma_rx_start(&lan966x->rx); return err; @@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x) return 0; lan966x->rx.lan966x = lan966x; - lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL; + lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX; + lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS; + lan966x->rx.fdma.priv = lan966x; + lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma); + lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; + lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; + lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb; lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); lan966x->tx.lan966x = lan966x; - lan966x->tx.channel_id = FDMA_INJ_CHANNEL; - lan966x->tx.last_in_use = -1; + lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX; + lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS; + lan966x->tx.fdma.priv = lan966x; + lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma); + lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; + lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; + lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb; err = lan966x_fdma_rx_alloc(&lan966x->rx); if (err) @@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x) err = lan966x_fdma_tx_alloc(&lan966x->tx); if (err) { - lan966x_fdma_rx_free(&lan966x->rx); + fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); return err; } @@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x) napi_disable(&lan966x->napi); lan966x_fdma_rx_free_pages(&lan966x->rx); - lan966x_fdma_rx_free(&lan966x->rx); + fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); page_pool_destroy(lan966x->rx.page_pool); lan966x_fdma_tx_free(&lan966x->tx); } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index f8bebbcf77b2..25cb2f61986f 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -16,6 +16,7 @@ #include <net/switchdev.h> #include <net/xdp.h> +#include <fdma_api.h> #include <vcap_api.h> #include <vcap_api_client.h> @@ -76,15 +77,6 @@ #define FDMA_RX_DCB_MAX_DBS 1 #define FDMA_TX_DCB_MAX_DBS 1 -#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) - -#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) -#define FDMA_DCB_STATUS_SOF BIT(16) -#define FDMA_DCB_STATUS_EOF BIT(17) -#define FDMA_DCB_STATUS_INTR BIT(18) -#define FDMA_DCB_STATUS_DONE BIT(19) -#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) -#define FDMA_DCB_INVALID_DATA 0x1 #define FDMA_XTR_CHANNEL 6 #define FDMA_INJ_CHANNEL 0 @@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt { struct lan966x_port; -struct lan966x_db { - u64 dataptr; - u64 status; -}; - -struct lan966x_rx_dcb { - u64 nextptr; - u64 info; - struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; -}; - -struct lan966x_tx_dcb { - u64 nextptr; - u64 info; - struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; -}; - struct lan966x_rx { struct lan966x *lan966x; - /* Pointer to the array of hardware dcbs. */ - struct lan966x_rx_dcb *dcbs; - - /* Pointer to the last address in the dcbs. */ - struct lan966x_rx_dcb *last_entry; + struct fdma fdma; /* For each DB, there is a page */ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; - /* Represents the db_index, it can have a value between 0 and - * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS - * it means that the DCB can be reused. - */ - int db_index; - - /* Represents the index in the dcbs. It has a value between 0 and - * FDMA_DCB_MAX - */ - int dcb_index; - - /* Represents the dma address to the dcbs array */ - dma_addr_t dma; - /* Represents the page order that is used to allocate the pages for the * RX buffers. This value is calculated based on max MTU of the devices. */ @@ -252,8 +209,6 @@ struct lan966x_rx { */ u32 max_mtu; - u8 channel_id; - struct page_pool *page_pool; }; @@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf { struct lan966x_tx { struct lan966x *lan966x; - /* Pointer to the dcb list */ - struct lan966x_tx_dcb *dcbs; - u16 last_in_use; - - /* Represents the DMA address to the first entry of the dcb entries. */ - dma_addr_t dma; + struct fdma fdma; /* Array of dcbs that are given to the HW */ struct lan966x_tx_dcb_buf *dcbs_buf; - u8 channel_id; - bool activated; }; diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index b3c28260adf8..e172638b0601 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -582,17 +582,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_set); int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct kernel_ethtool_ts_info *info) { - info->phc_index = ocelot->ptp_clock ? - ptp_clock_index(ocelot->ptp_clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + if (ocelot->ptp_clock) { + info->phc_index = ptp_clock_index(ocelot->ptp_clock); + } else { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index 3f7519e435b8..01fe76786f77 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -23,6 +23,7 @@ config IONIC depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select DIMLIB + select PAGE_POOL help This enables the support for the Pensando family of Ethernet adapters. More specific information on this driver can be diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index f2f07bf88545..c8c710cfe70c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -181,10 +181,7 @@ struct ionic_queue; struct ionic_qcq; #define IONIC_MAX_BUF_LEN ((u16)-1) -#define IONIC_PAGE_SIZE PAGE_SIZE -#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) -#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ - __GFP_COMP | __GFP_MEMALLOC) +#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN) #define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \ (VLAN_ETH_HLEN + \ @@ -238,9 +235,8 @@ struct ionic_queue { unsigned int index; unsigned int num_descs; unsigned int max_sg_elems; + u64 features; - unsigned int type; - unsigned int hw_index; unsigned int hw_type; bool xdp_flush; union { @@ -250,18 +246,23 @@ struct ionic_queue { struct ionic_admin_cmd *adminq; }; union { - void __iomem *cmb_base; - struct ionic_txq_desc __iomem *cmb_txq; - struct ionic_rxq_desc __iomem *cmb_rxq; - }; - union { void *sg_base; struct ionic_txq_sg_desc *txq_sgl; struct ionic_txq_sg_desc_v1 *txq_sgl_v1; struct ionic_rxq_sg_desc *rxq_sgl; }; struct xdp_rxq_info *xdp_rxq_info; + struct bpf_prog *xdp_prog; + struct page_pool *page_pool; struct ionic_queue *partner; + + union { + void __iomem *cmb_base; + struct ionic_txq_desc __iomem *cmb_txq; + struct ionic_rxq_desc __iomem *cmb_rxq; + }; + unsigned int type; + unsigned int hw_index; dma_addr_t base_pa; dma_addr_t cmb_base_pa; dma_addr_t sg_base_pa; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 86774d9922d8..40496587b2b3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -13,6 +13,7 @@ #include <linux/cpumask.h> #include <linux/crash_dump.h> #include <linux/vmalloc.h> +#include <net/page_pool/helpers.h> #include "ionic.h" #include "ionic_bus.h" @@ -46,8 +47,9 @@ static int ionic_start_queues(struct ionic_lif *lif); static void ionic_stop_queues(struct ionic_lif *lif); static void ionic_lif_queue_identify(struct ionic_lif *lif); -static int ionic_xdp_queues_config(struct ionic_lif *lif); -static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); +static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif); +static void ionic_unregister_rxq_info(struct ionic_queue *q); +static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id); static void ionic_dim_work(struct work_struct *work) { @@ -380,6 +382,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) if (!(qcq->flags & IONIC_QCQ_F_INITED)) return; + ionic_unregister_rxq_info(&qcq->q); if (qcq->flags & IONIC_QCQ_F_INTR) { ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_SET); @@ -437,9 +440,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) qcq->sg_base_pa = 0; } - ionic_xdp_unregister_rxq_info(&qcq->q); - ionic_qcq_intr_free(lif, qcq); + page_pool_destroy(qcq->q.page_pool); + qcq->q.page_pool = NULL; + ionic_qcq_intr_free(lif, qcq); vfree(qcq->q.info); qcq->q.info = NULL; } @@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, unsigned int cq_desc_size, unsigned int sg_desc_size, unsigned int desc_info_size, - unsigned int pid, struct ionic_qcq **qcq) + unsigned int pid, struct bpf_prog *xdp_prog, + struct ionic_qcq **qcq) { struct ionic_dev *idev = &lif->ionic->idev; struct device *dev = lif->ionic->dev; @@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, goto err_out_free_qcq; } + if (type == IONIC_QTYPE_RXQ) { + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = num_descs, + .nid = NUMA_NO_NODE, + .dev = lif->ionic->dev, + .napi = &new->napi, + .dma_dir = DMA_FROM_DEVICE, + .max_len = PAGE_SIZE, + .netdev = lif->netdev, + }; + + if (xdp_prog) + pp_params.dma_dir = DMA_BIDIRECTIONAL; + + new->q.page_pool = page_pool_create(&pp_params); + if (IS_ERR(new->q.page_pool)) { + netdev_err(lif->netdev, "Cannot create page_pool\n"); + err = PTR_ERR(new->q.page_pool); + new->q.page_pool = NULL; + goto err_out_free_q_info; + } + } + new->q.type = type; new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; @@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, desc_size, sg_desc_size, pid); if (err) { netdev_err(lif->netdev, "Cannot initialize queue\n"); - goto err_out_free_q_info; + goto err_out_free_page_pool; } err = ionic_alloc_qcq_interrupt(lif, new); if (err) - goto err_out_free_q_info; + goto err_out_free_page_pool; err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); if (err) { @@ -712,6 +742,8 @@ err_out_free_irq: devm_free_irq(dev, new->intr.vector, &new->napi); ionic_intr_free(lif->ionic, new->intr.index); } +err_out_free_page_pool: + page_pool_destroy(new->q.page_pool); err_out_free_q_info: vfree(new->q.info); err_out_free_qcq: @@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) sizeof(struct ionic_admin_comp), 0, sizeof(struct ionic_admin_desc_info), - lif->kern_pid, &lif->adminqcq); + lif->kern_pid, NULL, &lif->adminqcq); if (err) return err; ionic_debugfs_add_qcq(lif, lif->adminqcq); @@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) sizeof(union ionic_notifyq_comp), 0, sizeof(struct ionic_admin_desc_info), - lif->kern_pid, &lif->notifyqcq); + lif->kern_pid, NULL, &lif->notifyqcq); if (err) goto err_out; ionic_debugfs_add_qcq(lif, lif->notifyqcq); @@ -925,6 +957,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); else netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); + err = ionic_register_rxq_info(q, qcq->napi.napi_id); + if (err) { + netif_napi_del(&qcq->napi); + return err; + } qcq->flags |= IONIC_QCQ_F_INITED; @@ -960,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &txq); + lif->kern_pid, NULL, &txq); if (err) goto err_qcq_alloc; @@ -1020,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &rxq); + lif->kern_pid, NULL, &rxq); if (err) goto err_qcq_alloc; @@ -1037,7 +1074,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) goto err_qcq_init; if (test_bit(IONIC_LIF_F_UP, lif->state)) { - ionic_rx_fill(&rxq->q); + ionic_rx_fill(&rxq->q, NULL); err = ionic_qcq_enable(rxq); if (err) goto err_qcq_enable; @@ -2046,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &lif->txqcqs[i]); + lif->kern_pid, NULL, &lif->txqcqs[i]); if (err) goto err_out; @@ -2078,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &lif->rxqcqs[i]); + lif->kern_pid, lif->xdp_prog, + &lif->rxqcqs[i]); if (err) goto err_out; @@ -2143,9 +2181,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) int derr = 0; int i, err; - err = ionic_xdp_queues_config(lif); - if (err) - return err; + ionic_xdp_rxqs_prog_update(lif); for (i = 0; i < lif->nxqs; i++) { if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { @@ -2154,7 +2190,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif) goto err_out; } - ionic_rx_fill(&lif->rxqcqs[i]->q); + ionic_rx_fill(&lif->rxqcqs[i]->q, + READ_ONCE(lif->rxqcqs[i]->q.xdp_prog)); err = ionic_qcq_enable(lif->rxqcqs[i]); if (err) goto err_out; @@ -2167,7 +2204,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) } if (lif->hwstamp_rxq) { - ionic_rx_fill(&lif->hwstamp_rxq->q); + ionic_rx_fill(&lif->hwstamp_rxq->q, NULL); err = ionic_qcq_enable(lif->hwstamp_rxq); if (err) goto err_out_hwstamp_rx; @@ -2192,7 +2229,7 @@ err_out: derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); } - ionic_xdp_queues_config(lif); + ionic_xdp_rxqs_prog_update(lif); return err; } @@ -2651,7 +2688,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif) ionic_vf_start(ionic); } -static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) +static void ionic_unregister_rxq_info(struct ionic_queue *q) { struct xdp_rxq_info *xi; @@ -2665,7 +2702,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) kfree(xi); } -static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) +static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) { struct xdp_rxq_info *rxq_info; int err; @@ -2676,15 +2713,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); if (err) { - dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", - q->index, err); + netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n", + q->index, err); goto err_out; } - err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); + err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool); if (err) { - dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", - q->index, err); + netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n", + q->index, err); xdp_rxq_info_unreg(rxq_info); goto err_out; } @@ -2698,44 +2735,20 @@ err_out: return err; } -static int ionic_xdp_queues_config(struct ionic_lif *lif) +static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif) { + struct bpf_prog *xdp_prog; unsigned int i; - int err; if (!lif->rxqcqs) - return 0; - - /* There's no need to rework memory if not going to/from NULL program. - * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info - * This way we don't need to keep an *xdp_prog in every queue struct. - */ - if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) - return 0; + return; + xdp_prog = READ_ONCE(lif->xdp_prog); for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { struct ionic_queue *q = &lif->rxqcqs[i]->q; - if (q->xdp_rxq_info) { - ionic_xdp_unregister_rxq_info(q); - continue; - } - - err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); - if (err) { - dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", - i, err); - goto err_out; - } + WRITE_ONCE(q->xdp_prog, xdp_prog); } - - return 0; - -err_out: - for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) - ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); - - return err; } static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) @@ -2765,11 +2778,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) if (!netif_running(netdev)) { old_prog = xchg(&lif->xdp_prog, bpf->prog); + } else if (lif->xdp_prog && bpf->prog) { + old_prog = xchg(&lif->xdp_prog, bpf->prog); + ionic_xdp_rxqs_prog_update(lif); } else { + struct ionic_queue_params qparams; + + ionic_init_queue_params(lif, &qparams); + qparams.xdp_prog = bpf->prog; mutex_lock(&lif->queue_lock); - ionic_stop_queues_reconfig(lif); + ionic_reconfigure_queues(lif, &qparams); old_prog = xchg(&lif->xdp_prog, bpf->prog); - ionic_start_queues_reconfig(lif); mutex_unlock(&lif->queue_lock); } @@ -2871,13 +2890,23 @@ err_out: static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) { - /* only swapping the queues, not the napi, flags, or other stuff */ + /* only swapping the queues and napi, not flags or other stuff */ + swap(a->napi, b->napi); + + if (a->q.type == IONIC_QTYPE_RXQ) { + swap(a->q.page_pool, b->q.page_pool); + a->q.page_pool->p.napi = &a->napi; + if (b->q.page_pool) /* is NULL when increasing queue count */ + b->q.page_pool->p.napi = &b->napi; + } + swap(a->q.features, b->q.features); swap(a->q.num_descs, b->q.num_descs); swap(a->q.desc_size, b->q.desc_size); swap(a->q.base, b->q.base); swap(a->q.base_pa, b->q.base_pa); swap(a->q.info, b->q.info); + swap(a->q.xdp_prog, b->q.xdp_prog); swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); swap(a->q.partner, b->q.partner); swap(a->q_base, b->q_base); @@ -2928,7 +2957,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, } if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs || - qparam->rxq_features != lif->rxq_features) { + qparam->rxq_features != lif->rxq_features || + qparam->xdp_prog != lif->xdp_prog) { rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); if (!rx_qcqs) { @@ -2959,7 +2989,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 4, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &lif->txqcqs[i]); + lif->kern_pid, NULL, &lif->txqcqs[i]); if (err) goto err_out; } @@ -2968,7 +2998,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &tx_qcqs[i]); + lif->kern_pid, NULL, &tx_qcqs[i]); if (err) goto err_out; } @@ -2990,7 +3020,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 4, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &lif->rxqcqs[i]); + lif->kern_pid, NULL, &lif->rxqcqs[i]); if (err) goto err_out; } @@ -2999,11 +3029,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &rx_qcqs[i]); + lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]); if (err) goto err_out; rx_qcqs[i]->q.features = qparam->rxq_features; + rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog; } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 3e1005293c4a..e01756fb7fdd 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -268,6 +268,7 @@ struct ionic_queue_params { unsigned int ntxq_descs; unsigned int nrxq_descs; u64 rxq_features; + struct bpf_prog *xdp_prog; bool intr_split; bool cmb_tx; bool cmb_rx; @@ -280,6 +281,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif, qparam->ntxq_descs = lif->ntxq_descs; qparam->nrxq_descs = lif->nrxq_descs; qparam->rxq_features = lif->rxq_features; + qparam->xdp_prog = lif->xdp_prog; qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state); qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index fc79baad4561..0eeda7e502db 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -6,6 +6,7 @@ #include <linux/if_vlan.h> #include <net/ip6_checksum.h> #include <net/netdev_queues.h> +#include <net/page_pool/helpers.h> #include "ionic.h" #include "ionic_lif.h" @@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info) static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info) { - return buf_info->dma_addr + buf_info->page_offset; + return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset; } -static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) +static void __ionic_rx_put_buf(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + bool recycle_direct) { - return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset); -} - -static int ionic_rx_page_alloc(struct ionic_queue *q, - struct ionic_buf_info *buf_info) -{ - struct device *dev = q->dev; - dma_addr_t dma_addr; - struct page *page; - - page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); - if (unlikely(!page)) { - net_err_ratelimited("%s: %s page alloc failed\n", - dev_name(dev), q->name); - q_to_rx_stats(q)->alloc_err++; - return -ENOMEM; - } - - dma_addr = dma_map_page(dev, page, 0, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, dma_addr))) { - __free_pages(page, 0); - net_err_ratelimited("%s: %s dma map failed\n", - dev_name(dev), q->name); - q_to_rx_stats(q)->dma_map_err++; - return -EIO; - } - - buf_info->dma_addr = dma_addr; - buf_info->page = page; - buf_info->page_offset = 0; - - return 0; -} - -static void ionic_rx_page_free(struct ionic_queue *q, - struct ionic_buf_info *buf_info) -{ - struct device *dev = q->dev; - - if (unlikely(!buf_info)) { - net_err_ratelimited("%s: %s invalid buf_info in free\n", - dev_name(dev), q->name); - return; - } - if (!buf_info->page) return; - dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(buf_info->page, 0); + page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct); buf_info->page = NULL; + buf_info->len = 0; + buf_info->page_offset = 0; } -static bool ionic_rx_buf_recycle(struct ionic_queue *q, - struct ionic_buf_info *buf_info, u32 len) -{ - u32 size; - - /* don't re-use pages allocated in low-mem condition */ - if (page_is_pfmemalloc(buf_info->page)) - return false; - - /* don't re-use buffers from non-local numa nodes */ - if (page_to_nid(buf_info->page) != numa_mem_id()) - return false; - - size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); - buf_info->page_offset += size; - if (buf_info->page_offset >= IONIC_PAGE_SIZE) - return false; - get_page(buf_info->page); +static void ionic_rx_put_buf(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + __ionic_rx_put_buf(q, buf_info, false); +} - return true; +static void ionic_rx_put_buf_direct(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + __ionic_rx_put_buf(q, buf_info, true); } static void ionic_rx_add_skb_frag(struct ionic_queue *q, struct sk_buff *skb, struct ionic_buf_info *buf_info, - u32 off, u32 len, + u32 headroom, u32 len, bool synced) { if (!synced) - dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info), - off, len, DMA_FROM_DEVICE); + page_pool_dma_sync_for_cpu(q->page_pool, + buf_info->page, + buf_info->page_offset + headroom, + len); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf_info->page, buf_info->page_offset + off, - len, - IONIC_PAGE_SIZE); + buf_info->page, buf_info->page_offset + headroom, + len, buf_info->len); - if (!ionic_rx_buf_recycle(q, buf_info, len)) { - dma_unmap_page(q->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - buf_info->page = NULL; - } + /* napi_gro_frags() will release/recycle the + * page_pool buffers from the frags list + */ + buf_info->page = NULL; + buf_info->len = 0; + buf_info->page_offset = 0; } static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, @@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, q_to_rx_stats(q)->alloc_err++; return NULL; } + skb_mark_for_recycle(skb); if (headroom) frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); else - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE); if (unlikely(!buf_info->page)) goto err_bad_buf_page; @@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, for (i = 0; i < num_sg_elems; i++, buf_info++) { if (unlikely(!buf_info->page)) goto err_bad_buf_page; - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + frag_len = min_t(u16, len, buf_info->len); ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced); len -= frag_len; } @@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, struct ionic_rx_desc_info *desc_info, unsigned int headroom, unsigned int len, + unsigned int num_sg_elems, bool synced) { struct ionic_buf_info *buf_info; struct device *dev = q->dev; struct sk_buff *skb; + int i; buf_info = &desc_info->bufs[0]; @@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, q_to_rx_stats(q)->alloc_err++; return NULL; } - - if (unlikely(!buf_info->page)) { - dev_kfree_skb(skb); - return NULL; - } + skb_mark_for_recycle(skb); if (!synced) - dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), - headroom, len, DMA_FROM_DEVICE); + page_pool_dma_sync_for_cpu(q->page_pool, + buf_info->page, + buf_info->page_offset + headroom, + len); + skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len); - dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info), - headroom, len, DMA_FROM_DEVICE); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); + /* recycle the Rx buffer now that we're done with it */ + ionic_rx_put_buf_direct(q, buf_info); + buf_info++; + for (i = 0; i < num_sg_elems; i++, buf_info++) + ionic_rx_put_buf_direct(q, buf_info); + return skb; } static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, - struct ionic_tx_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info, + bool in_napi) { - unsigned int nbufs = desc_info->nbufs; - struct ionic_buf_info *buf_info; - struct device *dev = q->dev; - int i; + struct xdp_frame_bulk bq; - if (!nbufs) + if (!desc_info->nbufs) return; - buf_info = desc_info->bufs; - dma_unmap_single(dev, buf_info->dma_addr, - buf_info->len, DMA_TO_DEVICE); - if (desc_info->act == XDP_TX) - __free_pages(buf_info->page, 0); - buf_info->page = NULL; + xdp_frame_bulk_init(&bq); + rcu_read_lock(); /* need for xdp_return_frame_bulk */ - buf_info++; - for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) { - dma_unmap_page(dev, buf_info->dma_addr, - buf_info->len, DMA_TO_DEVICE); - if (desc_info->act == XDP_TX) - __free_pages(buf_info->page, 0); - buf_info->page = NULL; + if (desc_info->act == XDP_TX) { + if (likely(in_napi)) + xdp_return_frame_rx_napi(desc_info->xdpf); + else + xdp_return_frame(desc_info->xdpf); + } else if (desc_info->act == XDP_REDIRECT) { + ionic_tx_desc_unmap_bufs(q, desc_info); + xdp_return_frame_bulk(desc_info->xdpf, &bq); } - if (desc_info->act == XDP_REDIRECT) - xdp_return_frame(desc_info->xdpf); + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); desc_info->nbufs = 0; desc_info->xdpf = NULL; @@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, buf_info = desc_info->bufs; stats = q_to_tx_stats(q); - dma_addr = ionic_tx_map_single(q, frame->data, len); - if (!dma_addr) - return -EIO; + if (act == XDP_TX) { + dma_addr = page_pool_get_dma_addr(page) + + off + XDP_PACKET_HEADROOM; + dma_sync_single_for_device(q->dev, dma_addr, + len, DMA_TO_DEVICE); + } else /* XDP_REDIRECT */ { + dma_addr = ionic_tx_map_single(q, frame->data, len); + if (!dma_addr) + return -EIO; + } + buf_info->dma_addr = dma_addr; buf_info->len = len; buf_info->page = page; @@ -387,10 +346,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, frag = sinfo->frags; elem = ionic_tx_sg_elems(q); for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) { - dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (!dma_addr) { - ionic_tx_desc_unmap_bufs(q, desc_info); - return -EIO; + if (act == XDP_TX) { + struct page *pg = skb_frag_page(frag); + + dma_addr = page_pool_get_dma_addr(pg) + + skb_frag_off(frag); + dma_sync_single_for_device(q->dev, dma_addr, + skb_frag_size(frag), + DMA_TO_DEVICE); + } else { + dma_addr = ionic_tx_map_frag(q, frag, 0, + skb_frag_size(frag)); + if (dma_mapping_error(q->dev, dma_addr)) { + ionic_tx_desc_unmap_bufs(q, desc_info); + return -EIO; + } } bi->dma_addr = dma_addr; bi->len = skb_frag_size(frag); @@ -481,15 +451,13 @@ int ionic_xdp_xmit(struct net_device *netdev, int n, return nxmit; } -static void ionic_xdp_rx_put_bufs(struct ionic_queue *q, - struct ionic_buf_info *buf_info, - int nbufs) +static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + int nbufs) { int i; for (i = 0; i < nbufs; i++) { - dma_unmap_page(q->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); buf_info->page = NULL; buf_info++; } @@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), XDP_PACKET_HEADROOM, frag_len, false); - - dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), - XDP_PACKET_HEADROOM, frag_len, - DMA_FROM_DEVICE); - + page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page, + buf_info->page_offset + XDP_PACKET_HEADROOM, + frag_len); prefetchw(&xdp_buf.data_hard_start); /* We limit MTU size to one buffer if !xdp_has_frags, so @@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, do { if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { err = -ENOSPC; - goto out_xdp_abort; + break; } frag = &sinfo->frags[sinfo->nr_frags]; sinfo->nr_frags++; bi++; - frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi)); - dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi), - 0, frag_len, DMA_FROM_DEVICE); + frag_len = min_t(u16, remain_len, bi->len); + page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page, + buf_info->page_offset, + frag_len); skb_frag_fill_page_desc(frag, bi->page, 0, frag_len); sinfo->xdp_frags_size += frag_len; remain_len -= frag_len; @@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, return false; /* false = we didn't consume the packet */ case XDP_DROP: - ionic_rx_page_free(rxq, buf_info); + ionic_rx_put_buf_direct(rxq, buf_info); stats->xdp_drop++; break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(&xdp_buf); - if (!xdpf) - goto out_xdp_abort; + if (!xdpf) { + err = -ENOSPC; + break; + } txq = rxq->partner; nq = netdev_get_tx_queue(netdev, txq->index); @@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ionic_q_space_avail(txq), 1, 1)) { __netif_tx_unlock(nq); - goto out_xdp_abort; + err = -EIO; + break; } err = ionic_xdp_post_frame(txq, xdpf, XDP_TX, @@ -598,49 +568,47 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, __netif_tx_unlock(nq); if (unlikely(err)) { netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); - goto out_xdp_abort; + break; } - ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); + ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs); stats->xdp_tx++; - - /* the Tx completion will free the buffers */ break; case XDP_REDIRECT: err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); if (unlikely(err)) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); - goto out_xdp_abort; + break; } - ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); + ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs); rxq->xdp_flush = true; stats->xdp_redirect++; break; case XDP_ABORTED: default: - goto out_xdp_abort; + err = -EIO; + break; } - return true; - -out_xdp_abort: - trace_xdp_exception(netdev, xdp_prog, xdp_action); - ionic_rx_page_free(rxq, buf_info); - stats->xdp_aborted++; + if (err) { + ionic_rx_put_buf_direct(rxq, buf_info); + trace_xdp_exception(netdev, xdp_prog, xdp_action); + stats->xdp_aborted++; + } return true; } static void ionic_rx_clean(struct ionic_queue *q, struct ionic_rx_desc_info *desc_info, - struct ionic_rxq_comp *comp) + struct ionic_rxq_comp *comp, + struct bpf_prog *xdp_prog) { struct net_device *netdev = q->lif->netdev; struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; - struct bpf_prog *xdp_prog; - unsigned int headroom; + unsigned int headroom = 0; struct sk_buff *skb; bool synced = false; bool use_copybreak; @@ -648,7 +616,14 @@ static void ionic_rx_clean(struct ionic_queue *q, stats = q_to_rx_stats(q); - if (comp->status) { + if (unlikely(comp->status)) { + /* Most likely status==2 and the pkt received was bigger + * than the buffer available: comp->len will show the + * pkt size received that didn't fit the advertised desc.len + */ + dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n", + q->index, comp->status, comp->len, q->rxq[q->head_idx].len); + stats->dropped++; return; } @@ -657,18 +632,18 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->pkts++; stats->bytes += len; - xdp_prog = READ_ONCE(q->lif->xdp_prog); if (xdp_prog) { if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) return; synced = true; + headroom = XDP_PACKET_HEADROOM; } - headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; use_copybreak = len <= q->lif->rx_copybreak; if (use_copybreak) skb = ionic_rx_copybreak(netdev, q, desc_info, - headroom, len, synced); + headroom, len, + comp->num_sg_elems, synced); else skb = ionic_rx_build_skb(q, desc_info, headroom, len, comp->num_sg_elems, synced); @@ -744,7 +719,7 @@ static void ionic_rx_clean(struct ionic_queue *q, napi_gro_frags(&qcq->napi); } -bool ionic_rx_service(struct ionic_cq *cq) +static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog) { struct ionic_rx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; @@ -766,11 +741,16 @@ bool ionic_rx_service(struct ionic_cq *cq) q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); /* clean the related q entry, only one per qc completion */ - ionic_rx_clean(q, desc_info, comp); + ionic_rx_clean(q, desc_info, comp, xdp_prog); return true; } +bool ionic_rx_service(struct ionic_cq *cq) +{ + return __ionic_rx_service(cq, NULL); +} + static inline void ionic_write_cmb_desc(struct ionic_queue *q, void *desc) { @@ -781,7 +761,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q, memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0])); } -void ionic_rx_fill(struct ionic_queue *q) +void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog) { struct net_device *netdev = q->lif->netdev; struct ionic_rx_desc_info *desc_info; @@ -789,6 +769,9 @@ void ionic_rx_fill(struct ionic_queue *q) struct ionic_buf_info *buf_info; unsigned int fill_threshold; struct ionic_rxq_desc *desc; + unsigned int first_frag_len; + unsigned int first_buf_len; + unsigned int headroom = 0; unsigned int remain_len; unsigned int frag_len; unsigned int nfrags; @@ -806,35 +789,43 @@ void ionic_rx_fill(struct ionic_queue *q) len = netdev->mtu + VLAN_ETH_HLEN; - for (i = n_fill; i; i--) { - unsigned int headroom; - unsigned int buf_len; + if (xdp_prog) { + /* Always alloc the full size buffer, but only need + * the actual frag_len in the descriptor + * XDP uses space in the first buffer, so account for + * head room, tail room, and ip header in the first frag size. + */ + headroom = XDP_PACKET_HEADROOM; + first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom; + first_frag_len = min_t(u16, len + headroom, first_buf_len); + } else { + /* Use MTU size if smaller than max buffer size */ + first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE); + first_buf_len = first_frag_len; + } + for (i = n_fill; i; i--) { + /* fill main descriptor - buf[0] */ nfrags = 0; remain_len = len; desc = &q->rxq[q->head_idx]; desc_info = &q->rx_info[q->head_idx]; buf_info = &desc_info->bufs[0]; - if (!buf_info->page) { /* alloc a new buffer? */ - if (unlikely(ionic_rx_page_alloc(q, buf_info))) { - desc->addr = 0; - desc->len = 0; - return; - } + buf_info->len = first_buf_len; + frag_len = first_frag_len - headroom; + + /* get a new buffer if we can't reuse one */ + if (!buf_info->page) + buf_info->page = page_pool_alloc(q->page_pool, + &buf_info->page_offset, + &buf_info->len, + GFP_ATOMIC); + if (unlikely(!buf_info->page)) { + buf_info->len = 0; + return; } - /* fill main descriptor - buf[0] - * XDP uses space in the first buffer, so account for - * head room, tail room, and ip header in the first frag size. - */ - headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; - if (q->xdp_rxq_info) - buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN; - else - buf_len = ionic_rx_buf_size(buf_info); - frag_len = min_t(u16, len, buf_len); - desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; @@ -844,16 +835,26 @@ void ionic_rx_fill(struct ionic_queue *q) /* fill sg descriptors - buf[1..n] */ sg_elem = q->rxq_sgl[q->head_idx].elems; for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) { - if (!buf_info->page) { /* alloc a new sg buffer? */ - if (unlikely(ionic_rx_page_alloc(q, buf_info))) { - sg_elem->addr = 0; - sg_elem->len = 0; + frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE); + + /* Recycle any leftover buffers that are too small to reuse */ + if (unlikely(buf_info->page && buf_info->len < frag_len)) + ionic_rx_put_buf_direct(q, buf_info); + + /* Get new buffer if needed */ + if (!buf_info->page) { + buf_info->len = frag_len; + buf_info->page = page_pool_alloc(q->page_pool, + &buf_info->page_offset, + &buf_info->len, + GFP_ATOMIC); + if (unlikely(!buf_info->page)) { + buf_info->len = 0; return; } } sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); - frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info)); sg_elem->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -883,17 +884,12 @@ void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q) { struct ionic_rx_desc_info *desc_info; - struct ionic_buf_info *buf_info; unsigned int i, j; for (i = 0; i < q->num_descs; i++) { desc_info = &q->rx_info[i]; - for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) { - buf_info = &desc_info->bufs[j]; - if (buf_info->page) - ionic_rx_page_free(q, buf_info); - } - + for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) + ionic_rx_put_buf(q, &desc_info->bufs[j]); desc_info->nbufs = 0; } @@ -974,6 +970,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq) } } +static unsigned int ionic_rx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do) +{ + struct ionic_queue *q = cq->bound_q; + unsigned int work_done = 0; + struct bpf_prog *xdp_prog; + + if (work_to_do == 0) + return 0; + + xdp_prog = READ_ONCE(q->xdp_prog); + while (__ionic_rx_service(cq, xdp_prog)) { + if (cq->tail_idx == cq->num_descs - 1) + cq->done_color = !cq->done_color; + + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + if (++work_done >= work_to_do) + break; + } + ionic_rx_fill(q, xdp_prog); + ionic_xdp_do_flush(cq); + + return work_done; +} + int ionic_rx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); @@ -984,12 +1006,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) if (unlikely(!budget)) return budget; - work_done = ionic_cq_service(cq, budget, - ionic_rx_service, NULL, NULL); - - ionic_rx_fill(cq->bound_q); + work_done = ionic_rx_cq_service(cq, budget); - ionic_xdp_do_flush(cq); if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -1030,12 +1048,8 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) if (unlikely(!budget)) return budget; - rx_work_done = ionic_cq_service(rxcq, budget, - ionic_rx_service, NULL, NULL); - - ionic_rx_fill(rxcq->bound_q); + rx_work_done = ionic_rx_cq_service(rxcq, budget); - ionic_xdp_do_flush(rxcq); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { ionic_dim_update(rxqcq, 0); flags |= IONIC_INTR_CRED_UNMASK; @@ -1166,7 +1180,7 @@ static void ionic_tx_clean(struct ionic_queue *q, struct sk_buff *skb; if (desc_info->xdpf) { - ionic_xdp_tx_desc_clean(q->partner, desc_info); + ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi); stats->clean++; if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index))) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 9e73e324e7a1..b2b9a2dc9eb8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -4,9 +4,11 @@ #ifndef _IONIC_TXRX_H_ #define _IONIC_TXRX_H_ +struct bpf_prog; + void ionic_tx_flush(struct ionic_cq *cq); -void ionic_rx_fill(struct ionic_queue *q); +void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog); void ionic_rx_empty(struct ionic_queue *q); void ionic_tx_empty(struct ionic_queue *q); int ionic_rx_napi(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 63e3dac4d5f7..9d6399a5c780 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -326,25 +326,18 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *i struct qede_ptp *ptp = edev->ptp; if (!ptp) { - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (ptp->clock) info->phc_index = ptp_clock_index(ptp->clock); - else - info->phc_index = -1; info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bcef8ab715bf..d7cdea8f604d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) { - int err; - u32 word; struct qlcnic_cmd_args cmd; - const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL }; + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + u32 word; + int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); if (err) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3cb1c4f5c91a..45ac8befba29 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -578,7 +578,7 @@ struct rtl8169_counters { __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; - __le16 tx_underun; + __le16 tx_underrun; }; struct rtl8169_tc_offsets { @@ -1843,7 +1843,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, data[9] = le64_to_cpu(counters->rx_broadcast); data[10] = le32_to_cpu(counters->rx_multicast); data[11] = le16_to_cpu(counters->tx_aborted); - data[12] = le16_to_cpu(counters->tx_underun); + data[12] = le16_to_cpu(counters->tx_underrun); } static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 7882f2c0e1a4..869183e1565e 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -98,7 +98,7 @@ struct rtase_counters { __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; - __le16 tx_underun; + __le16 tx_underrun; } __packed; static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8) @@ -1619,8 +1619,8 @@ static void rtase_dump_state(const struct net_device *dev) le32_to_cpu(counters->rx_multicast)); netdev_err(dev, "tx_aborted %d\n", le16_to_cpu(counters->tx_aborted)); - netdev_err(dev, "tx_underun %d\n", - le16_to_cpu(counters->tx_underun)); + netdev_err(dev, "tx_underrun %d\n", + le16_to_cpu(counters->tx_underrun)); } static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7d69302ffa0a..de131fc5fa0b 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .sensor_event = efx_mcdi_sensor_event, .rx_recycle_ring_size = efx_ef10_recycle_ring_size, }; + +const struct efx_nic_type efx_x4_nic_type = { + .is_vf = false, + .mem_bar = efx_ef10_pf_mem_bar, + .mem_map_size = efx_ef10_mem_map_size, + .probe = efx_ef10_probe_pf, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .init = efx_ef10_init_nic, + .fini = efx_ef10_fini_nic, + .map_reset_reason = efx_ef10_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats_pf, + .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol, + .set_wol = efx_ef10_set_wol, + .resume_wol = efx_port_dummy_op_void, + .get_fec_stats = efx_ef10_get_fec_stats, + .test_chip = efx_ef10_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_write = efx_ef10_tx_write, + .tx_limit_len = efx_ef10_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, + .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_mcdi_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, + .ev_probe = efx_mcdi_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_probe, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_remove = efx_ef10_filter_table_remove, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_ef10_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, + .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, +#ifdef CONFIG_SFC_SRIOV + /* currently set to the VF versions of these functions + * because SRIOV will be reimplemented later. + */ + .vswitching_probe = efx_ef10_vswitching_probe_vf, + .vswitching_restore = efx_ef10_vswitching_restore_vf, + .vswitching_remove = efx_ef10_vswitching_remove_vf, +#endif + .get_mac_address = efx_ef10_get_mac_address_pf, + .set_mac_address = efx_ef10_set_mac_address, + .tso_versions = efx_ef10_tso_versions, + + .get_phys_port_id = efx_ef10_get_phys_port_id, + .revision = EFX_REV_X4, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .option_descriptors = true, + .min_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, + .offload_features = EF10_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, + .check_caps = ef10_check_caps, + .print_additional_fwver = efx_ef10_print_additional_fwver, + .sensor_event = efx_mcdi_sensor_event, + .rx_recycle_ring_size = efx_ef10_recycle_ring_size, +}; + diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 6f1a01ded7d4..36b3b57e2055 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, {0} /* end of list */ }; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 848b1923133a..bb1930818beb 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -230,11 +230,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, { struct efx_nic *efx = efx_netdev_priv(net_dev); - /* Software capabilities */ - ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE); - ts_info->phc_index = -1; - efx_ptp_get_ts_info(efx, ts_info); return 0; } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 1db64fc6e909..9fa5c4c713ab 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, extern const struct efx_nic_type efx_hunt_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; +extern const struct efx_nic_type efx_x4_nic_type; + #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 466df5348b29..7ec4ac7b7ff5 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -21,6 +21,7 @@ enum { */ EFX_REV_HUNT_A0 = 4, EFX_REV_EF100 = 5, + EFX_REV_X4 = 6, }; static inline int efx_nic_rev(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 88ddc226b012..c5ad84db9613 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -230,11 +230,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); - /* Software capabilities */ - ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE); - ts_info->phc_index = -1; - efx_siena_ptp_get_ts_info(efx, ts_info); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 31c387cc5f26..a1858f083eef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -58,10 +58,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; - /* Enable FPE interrupt */ - if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) - value |= GMAC_INT_FPE_EN; - writel(value, ioaddr + GMAC_INT_EN); if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) @@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = { .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, + .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size, + .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size, + .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, @@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = { .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, + .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size, + .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size, + .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index e02cebc3f1b7..08add508db84 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable) + bool tx_enable, bool pmac_enable) { u32 value; - if (enable) { + if (tx_enable) { cfg->fpe_csr = EFPE; value = readl(ioaddr + GMAC_RXQ_CTRL1); value &= ~GMAC_RXQCTRL_FPRQ; @@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, cfg->fpe_csr = 0; } writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); + + value = readl(ioaddr + GMAC_INT_EN); + + if (pmac_enable) { + if (!(value & GMAC_INT_FPE_EN)) { + /* Dummy read to clear any pending masked interrupts */ + readl(ioaddr + MAC_FPE_CTRL_STS); + + value |= GMAC_INT_FPE_EN; + } + } else { + value &= ~GMAC_INT_FPE_EN; + } + + writel(value, ioaddr + GMAC_INT_EN); } int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) @@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) if (value & TRSP) { status |= FPE_EVENT_TRSP; - netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n"); } if (value & TVER) { status |= FPE_EVENT_TVER; - netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n"); } if (value & RRSP) { status |= FPE_EVENT_RRSP; - netdev_info(dev, "FPE: Respond mPacket is received\n"); + netdev_dbg(dev, "FPE: Respond mPacket is received\n"); } if (value & RVER) { status |= FPE_EVENT_RVER; - netdev_info(dev, "FPE: Verify mPacket is received\n"); + netdev_dbg(dev, "FPE: Verify mPacket is received\n"); } return status; @@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, writel(value, ioaddr + MAC_FPE_CTRL_STS); } + +int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr) +{ + return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS)); +} + +void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size) +{ + u32 value; + + value = readl(ioaddr + MTL_FPE_CTRL_STS); + writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ), + ioaddr + MTL_FPE_CTRL_STS); +} + +#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping" +#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]" + +int dwmac5_fpe_map_preemption_class(struct net_device *ndev, + struct netlink_ext_ack *extack, u32 pclass) +{ + u32 val, offset, count, queue_weight, preemptible_txqs = 0; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 num_tc = ndev->num_tc; + + if (!pclass) + goto update_mapping; + + /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware. + * + * Synopsys Databook: + * "The number of Tx DMA channels is equal to the number of Tx queues, + * and is direct one-to-one mapping." + */ + for (u32 tc = 0; tc < num_tc; tc++) { + count = ndev->tc_to_txq[tc].count; + offset = ndev->tc_to_txq[tc].offset; + + if (pclass & BIT(tc)) + preemptible_txqs |= GENMASK(offset + count - 1, offset); + + /* This is 1:1 mapping, go to next TC */ + if (count == 1) + continue; + + if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) { + NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG); + return -EINVAL; + } + + queue_weight = priv->plat->tx_queues_cfg[offset].weight; + + for (u32 i = 1; i < count; i++) { + if (priv->plat->tx_queues_cfg[offset + i].weight != + queue_weight) { + NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG, + queue_weight, tc); + return -EINVAL; + } + } + } + +update_mapping: + val = readl(priv->ioaddr + MTL_FPE_CTRL_STS); + writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS), + priv->ioaddr + MTL_FPE_CTRL_STS); + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index bf33a51d229e..6c6eb6790e83 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -39,6 +39,12 @@ #define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) #define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) +#define MTL_FPE_CTRL_STS 0x00000c90 +/* Preemption Classification */ +#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8) +/* Additional Fragment Size of preempted frames */ +#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0) + #define MTL_RXP_CONTROL_STATUS 0x00000ca0 #define RXPI BIT(31) #define NPE GENMASK(23, 16) @@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, u32 sub_second_inc, u32 systime_flags); void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable); + bool tx_enable, bool pmac_enable); void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); +int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr); +void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size); +int dwmac5_fpe_map_preemption_class(struct net_device *ndev, + struct netlink_ext_ack *extack, u32 pclass); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index cbf2dd976ab1..f519d43738b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -1504,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, writel(value, ioaddr + XGMAC_RX_CONFIG); } -static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, - u32 num_txq, - u32 num_rxq, bool enable) +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool tx_enable, bool pmac_enable) { u32 value; - if (!enable) { + if (!tx_enable) { value = readl(ioaddr + XGMAC_FPE_CTRL_STS); value &= ~XGMAC_EFPE; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 29367105df54..88cce28b2f98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwmac4_tc_ops, .mmc = &dwmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwmac4_setup, @@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxgmac2_setup, @@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxlgmac2_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxlgmac2_setup, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 7e90f34b8c88..d5a9f01ecac5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -7,6 +7,7 @@ #include <linux/netdevice.h> #include <linux/stmmac.h> +#include <net/pkt_cls.h> #define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ ({ \ @@ -28,6 +29,8 @@ struct stmmac_extra_stats; struct stmmac_priv; struct stmmac_safety_stats; +struct stmmac_fpe_cfg; +enum stmmac_mpacket_type; struct dma_desc; struct dma_extended_desc; struct dma_edesc; @@ -419,11 +422,16 @@ struct stmmac_ops { void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable); + bool tx_enable, bool pmac_enable); void (*fpe_send_mpacket)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); + int (*fpe_get_add_frag_size)(const void __iomem *ioaddr); + void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size); + int (*fpe_map_preemption_class)(struct net_device *ndev, + struct netlink_ext_ack *extack, + u32 pclass); }; #define stmmac_core_init(__priv, __args...) \ @@ -528,6 +536,12 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) #define stmmac_fpe_irq_status(__priv, __args...) \ stmmac_do_callback(__priv, mac, fpe_irq_status, __args) +#define stmmac_fpe_get_add_frag_size(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args) +#define stmmac_fpe_set_add_frag_size(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args) +#define stmmac_fpe_map_preemption_class(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -615,6 +629,8 @@ struct stmmac_tc_ops { struct tc_etf_qopt_offload *qopt); int (*query_caps)(struct stmmac_priv *priv, struct tc_query_caps_base *base); + int (*setup_mqprio)(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ @@ -631,6 +647,8 @@ struct stmmac_tc_ops { stmmac_do_callback(__priv, tc, setup_etf, __args) #define stmmac_tc_query_caps(__priv, __args...) \ stmmac_do_callback(__priv, tc, query_caps, __args) +#define stmmac_tc_setup_mqprio(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_mqprio, __args) struct stmmac_counters; @@ -674,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops; extern const struct stmmac_ops dwmac410_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac4_tc_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; +extern const struct stmmac_tc_ops dwxgmac_tc_ops; extern const struct stmmac_ops dwxgmac210_ops; extern const struct stmmac_ops dwxlgmac2_ops; extern const struct stmmac_dma_ops dwxgmac210_dma_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b23b920eedb1..ea135203ff2e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -146,6 +146,32 @@ struct stmmac_channel { u32 index; }; +/* FPE link-partner hand-shaking mPacket type */ +enum stmmac_mpacket_type { + MPACKET_VERIFY = 0, + MPACKET_RESPONSE = 1, +}; + +#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3 +#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128 + +struct stmmac_fpe_cfg { + /* Serialize access to MAC Merge state between ethtool requests + * and link state updates. + */ + spinlock_t lock; + + u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ + + enum ethtool_mm_verify_status status; + struct timer_list verify_timer; + bool verify_enabled; + int verify_retries; + bool pmac_enabled; + u32 verify_time; + bool tx_enabled; +}; + struct stmmac_tc_entry { bool in_use; bool in_hw; @@ -339,11 +365,8 @@ struct stmmac_priv { struct workqueue_struct *wq; struct work_struct service_task; - /* Workqueue for handling FPE hand-shaking */ - unsigned long fpe_task_state; - struct workqueue_struct *fpe_wq; - struct work_struct fpe_task; - char wq_name[IFNAMSIZ + 4]; + /* Frame Preemption feature (FPE) */ + struct stmmac_fpe_cfg fpe_cfg; /* TC Handling */ unsigned int tc_entries_max; @@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); -void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); +void stmmac_fpe_apply(struct stmmac_priv *priv); static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 220c582904f4..2a37592a6281 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -19,6 +19,7 @@ #include "stmmac.h" #include "dwmac_dma.h" #include "dwxgmac2.h" +#include "dwmac5.h" #define REG_SPACE_SIZE 0x1060 #define GMAC4_REG_SPACE_SIZE 0x116C @@ -1200,13 +1201,13 @@ static int stmmac_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (priv->ptp_clock) info->phc_index = ptp_clock_index(priv->ptp_clock); + else + info->phc_index = 0; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1263,6 +1264,98 @@ static int stmmac_set_tunable(struct net_device *dev, return ret; } +static int stmmac_get_mm(struct net_device *ndev, + struct ethtool_mm_state *state) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long flags; + u32 frag_size; + + if (!priv->dma_cap.fpesel) + return -EOPNOTSUPP; + + spin_lock_irqsave(&priv->fpe_cfg.lock, flags); + + state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; + state->verify_enabled = priv->fpe_cfg.verify_enabled; + state->pmac_enabled = priv->fpe_cfg.pmac_enabled; + state->verify_time = priv->fpe_cfg.verify_time; + state->tx_enabled = priv->fpe_cfg.tx_enabled; + state->verify_status = priv->fpe_cfg.status; + state->rx_min_frag_size = ETH_ZLEN; + + /* FPE active if common tx_enabled and + * (verification success or disabled(forced)) + */ + if (state->tx_enabled && + (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || + state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED)) + state->tx_active = true; + else + state->tx_active = false; + + frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr); + state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size); + + spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags); + + return 0; +} + +static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + unsigned long flags; + u32 frag_size; + int err; + + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, + &frag_size, extack); + if (err) + return err; + + /* Wait for the verification that's currently in progress to finish */ + timer_shutdown_sync(&fpe_cfg->verify_timer); + + spin_lock_irqsave(&fpe_cfg->lock, flags); + + fpe_cfg->verify_enabled = cfg->verify_enabled; + fpe_cfg->pmac_enabled = cfg->pmac_enabled; + fpe_cfg->verify_time = cfg->verify_time; + fpe_cfg->tx_enabled = cfg->tx_enabled; + + if (!cfg->verify_enabled) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + + stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size); + stmmac_fpe_apply(priv); + + spin_unlock_irqrestore(&fpe_cfg->lock, flags); + + return 0; +} + +static void stmmac_get_mm_stats(struct net_device *ndev, + struct ethtool_mm_stats *s) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_counters *mmc = &priv->mmc; + + if (!priv->dma_cap.rmon) + return; + + stmmac_mmc_read(priv, priv->mmcaddr, mmc); + + s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr; + s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr; + s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr; + s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr; + s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr; + s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr; +} + static const struct ethtool_ops stmmac_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -1301,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .set_tunable = stmmac_set_tunable, .get_link_ksettings = stmmac_ethtool_get_link_ksettings, .set_link_ksettings = stmmac_ethtool_set_link_ksettings, + .get_mm = stmmac_get_mm, + .set_mm = stmmac_set_mm, + .get_mm_stats = stmmac_get_mm_stats, }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d9fca8d1227c..d3895d7eecfc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) { - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + unsigned long flags; - if (is_up && *hs_enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, - MPACKET_VERIFY); + timer_shutdown_sync(&fpe_cfg->verify_timer); + + spin_lock_irqsave(&fpe_cfg->lock, flags); + + if (is_up && fpe_cfg->pmac_enabled) { + /* VERIFY process requires pmac enabled when NIC comes up */ + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false, true); + + /* New link => maybe new partner => new verification process */ + stmmac_fpe_apply(priv); } else { - *lo_state = FPE_STATE_OFF; - *lp_state = FPE_STATE_OFF; + /* No link => turn off EFPE */ + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false, false); } + + spin_unlock_irqrestore(&fpe_cfg->lock, flags); } static void stmmac_mac_link_down(struct phylink_config *config, @@ -3358,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } -static int stmmac_fpe_start_wq(struct stmmac_priv *priv) -{ - char *name; - - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); - clear_bit(__FPE_REMOVING, &priv->fpe_task_state); - - name = priv->wq_name; - sprintf(name, "%s-fpe", priv->dev->name); - - priv->fpe_wq = create_singlethread_workqueue(name); - if (!priv->fpe_wq) { - netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); - - return -ENOMEM; - } - netdev_info(priv->dev, "FPE workqueue start"); - - return 0; -} - /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -3533,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) stmmac_set_hw_vlan_mode(priv, priv->hw); - if (priv->dma_cap.fpesel) { - stmmac_fpe_start_wq(priv); - - if (priv->plat->fpe_cfg->enable) - stmmac_fpe_handshake(priv, true); - } - return 0; } @@ -4036,18 +4021,6 @@ static int stmmac_open(struct net_device *dev) return ret; } -static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) -{ - set_bit(__FPE_REMOVING, &priv->fpe_task_state); - - if (priv->fpe_wq) { - destroy_workqueue(priv->fpe_wq); - priv->fpe_wq = NULL; - } - - netdev_info(priv->dev, "FPE workqueue stop"); -} - /** * stmmac_release - close entry point of the driver * @dev : device pointer. @@ -4095,10 +4068,10 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); - pm_runtime_put(priv->device); - if (priv->dma_cap.fpesel) - stmmac_fpe_stop_wq(priv); + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); + + pm_runtime_put(priv->device); return 0; } @@ -5982,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev, static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) { - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - - if (status == FPE_EVENT_UNKNOWN || !*hs_enable) - return; + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; - /* If LP has sent verify mPacket, LP is FPE capable */ - if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { - if (*lp_state < FPE_STATE_CAPABLE) - *lp_state = FPE_STATE_CAPABLE; + /* This is interrupt context, just spin_lock() */ + spin_lock(&fpe_cfg->lock); - /* If user has requested FPE enable, quickly response */ - if (*hs_enable) - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_RESPONSE); - } + if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN) + goto unlock_out; - /* If Local has sent verify mPacket, Local is FPE capable */ - if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { - if (*lo_state < FPE_STATE_CAPABLE) - *lo_state = FPE_STATE_CAPABLE; - } + /* LP has sent verify mPacket */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_RESPONSE); - /* If LP has sent response mPacket, LP is entering FPE ON */ - if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) - *lp_state = FPE_STATE_ENTERING_ON; + /* Local has sent verify mPacket */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; - /* If Local has sent response mPacket, Local is entering FPE ON */ - if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) - *lo_state = FPE_STATE_ENTERING_ON; + /* LP has sent response mPacket */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP && + fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; - if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && - !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && - priv->fpe_wq) { - queue_work(priv->fpe_wq, &priv->fpe_task); - } +unlock_out: + spin_unlock(&fpe_cfg->lock); } static void stmmac_common_interrupt(struct stmmac_priv *priv) @@ -6257,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_QUERY_CAPS: return stmmac_tc_query_caps(priv, priv, type_data); + case TC_SETUP_QDISC_MQPRIO: + return stmmac_tc_setup_mqprio(priv, priv, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &stmmac_block_cb_list, @@ -7376,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } -#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" -static void stmmac_fpe_lp_task(struct work_struct *work) +/** + * stmmac_fpe_verify_timer - Timer for MAC Merge verification + * @t: timer_list struct containing private info + * + * Verify the MAC Merge capability in the local TX direction, by + * transmitting Verify mPackets up to 3 times. Wait until link + * partner responds with a Response mPacket, otherwise fail. + */ +static void stmmac_fpe_verify_timer(struct timer_list *t) { - struct stmmac_priv *priv = container_of(work, struct stmmac_priv, - fpe_task); - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - bool *enable = &fpe_cfg->enable; - int retries = 20; - - while (retries-- > 0) { - /* Bail out immediately if FPE handshake is OFF */ - if (*lo_state == FPE_STATE_OFF || !*hs_enable) - break; - - if (*lo_state == FPE_STATE_ENTERING_ON && - *lp_state == FPE_STATE_ENTERING_ON) { - stmmac_fpe_configure(priv, priv->ioaddr, - fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - *enable); - - netdev_info(priv->dev, "configured FPE\n"); + struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer); + struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv, + fpe_cfg); + unsigned long flags; + bool rearm = false; - *lo_state = FPE_STATE_ON; - *lp_state = FPE_STATE_ON; - netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); - break; - } + spin_lock_irqsave(&fpe_cfg->lock, flags); - if ((*lo_state == FPE_STATE_CAPABLE || - *lo_state == FPE_STATE_ENTERING_ON) && - *lp_state != FPE_STATE_ON) { - netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, - *lo_state, *lp_state); + switch (fpe_cfg->status) { + case ETHTOOL_MM_VERIFY_STATUS_INITIAL: + case ETHTOOL_MM_VERIFY_STATUS_VERIFYING: + if (fpe_cfg->verify_retries != 0) { stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_VERIFY); + fpe_cfg, MPACKET_VERIFY); + rearm = true; + } else { + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED; } - /* Sleep then retry */ - msleep(500); + + fpe_cfg->verify_retries--; + break; + + case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + true, true); + break; + + default: + break; } - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + if (rearm) { + mod_timer(&fpe_cfg->verify_timer, + jiffies + msecs_to_jiffies(fpe_cfg->verify_time)); + } + + spin_unlock_irqrestore(&fpe_cfg->lock, flags); } -void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg) { - if (priv->plat->fpe_cfg->hs_enable != enable) { - if (enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - priv->plat->fpe_cfg, - MPACKET_VERIFY); - } else { - priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; - priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; - } + if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled && + fpe_cfg->verify_enabled && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) { + timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0); + mod_timer(&fpe_cfg->verify_timer, jiffies); + } +} + +void stmmac_fpe_apply(struct stmmac_priv *priv) +{ + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + + /* If verification is disabled, configure FPE right away. + * Otherwise let the timer code do it. + */ + if (!fpe_cfg->verify_enabled) { + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + fpe_cfg->tx_enabled, + fpe_cfg->pmac_enabled); + } else { + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; + fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; - priv->plat->fpe_cfg->hs_enable = enable; + if (netif_running(priv->dev)) + stmmac_fpe_verify_timer_arm(fpe_cfg); } } @@ -7555,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); - /* Initialize Link Partner FPE workqueue */ - INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); - /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -7722,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device, mutex_init(&priv->lock); + priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; + priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; + priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0); + spin_lock_init(&priv->fpe_cfg.lock); + /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed. Viceversa the driver'll try to @@ -7895,16 +7878,8 @@ int stmmac_suspend(struct device *dev) } rtnl_unlock(); - if (priv->dma_cap.fpesel) { - /* Disable FPE */ - stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, false); - - stmmac_fpe_handshake(priv, false); - stmmac_fpe_stop_wq(priv); - } + if (priv->dma_cap.fpesel) + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); priv->speed = SPEED_UNKNOWN; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 996f2bcd07a2..832998bc020b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv) if (ret) return -ENOMEM; - if (!priv->plat->fpe_cfg) { - priv->plat->fpe_cfg = devm_kzalloc(priv->device, - sizeof(*priv->plat->fpe_cfg), - GFP_KERNEL); - if (!priv->plat->fpe_cfg) - return -ENOMEM; - } else { - memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); - } - /* Fail silently as we can still use remaining features, e.g. CBS */ if (!dma_cap->frpsel) return 0; @@ -941,9 +931,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; + struct netlink_ext_ack *extack = qopt->mqprio.extack; struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; - bool fpe = false; int i, ret = 0; u64 ctr; @@ -1028,16 +1018,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv, switch (qopt->entries[i].command) { case TC_TAPRIO_CMD_SET_GATES: - if (fpe) - return -EINVAL; break; case TC_TAPRIO_CMD_SET_AND_HOLD: gates |= BIT(0); - fpe = true; break; case TC_TAPRIO_CMD_SET_AND_RELEASE: gates &= ~BIT(0); - fpe = true; break; default: return -EOPNOTSUPP; @@ -1068,16 +1054,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv, tc_taprio_map_maxsdu_txq(priv, qopt); - if (fpe && !priv->dma_cap.fpesel) { - mutex_unlock(&priv->est_lock); - return -EOPNOTSUPP; - } - - /* Actual FPE register configuration will be done after FPE handshake - * is success. - */ - priv->plat->fpe_cfg->enable = fpe; - ret = stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); mutex_unlock(&priv->est_lock); @@ -1086,12 +1062,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv, goto disable; } - netdev_info(priv->dev, "configured EST\n"); - - if (fpe) { - stmmac_fpe_handshake(priv, true); - netdev_info(priv->dev, "start FPE handshake\n"); - } + ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack, + qopt->mqprio.preemptible_tcs); + if (ret) + goto disable; return 0; @@ -1109,16 +1083,7 @@ disable: mutex_unlock(&priv->est_lock); } - priv->plat->fpe_cfg->enable = false; - stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - false); - netdev_info(priv->dev, "disabled FPE\n"); - - stmmac_fpe_handshake(priv, false); - netdev_info(priv->dev, "stop FPE handshake\n"); + stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0); return ret; } @@ -1174,6 +1139,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return err; } +static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + if (!qopt->mqprio.preemptible_tcs) + return tc_setup_taprio(priv, qopt); + + NL_SET_ERR_MSG_MOD(qopt->mqprio.extack, + "taprio with FPE is not implemented for this MAC"); + + return -EOPNOTSUPP; +} + static int tc_setup_etf(struct stmmac_priv *priv, struct tc_etf_qopt_offload *qopt) { @@ -1198,6 +1175,13 @@ static int tc_query_caps(struct stmmac_priv *priv, struct tc_query_caps_base *base) { switch (base->type) { + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_caps *caps = base->caps; + + caps->validate_queue_counts = true; + + return 0; + } case TC_SETUP_QDISC_TAPRIO: { struct tc_taprio_caps *caps = base->caps; @@ -1214,6 +1198,81 @@ static int tc_query_caps(struct stmmac_priv *priv, } } +static void stmmac_reset_tc_mqprio(struct net_device *ndev, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + netdev_reset_tc(ndev); + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); + stmmac_fpe_map_preemption_class(priv, ndev, extack, 0); +} + +static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct netlink_ext_ack *extack = mqprio->extack; + struct tc_mqprio_qopt *qopt = &mqprio->qopt; + u32 offset, count, num_stack_tx_queues = 0; + struct net_device *ndev = priv->dev; + u32 num_tc = qopt->num_tc; + int err; + + if (!num_tc) { + stmmac_reset_tc_mqprio(ndev, extack); + return 0; + } + + err = netdev_set_num_tc(ndev, num_tc); + if (err) + return err; + + for (u32 tc = 0; tc < num_tc; tc++) { + offset = qopt->offset[tc]; + count = qopt->count[tc]; + num_stack_tx_queues += count; + + err = netdev_set_tc_queue(ndev, tc, count, offset); + if (err) + goto err_reset_tc; + } + + err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); + if (err) + goto err_reset_tc; + + err = stmmac_fpe_map_preemption_class(priv, ndev, extack, + mqprio->preemptible_tcs); + if (err) + goto err_reset_tc; + + return 0; + +err_reset_tc: + stmmac_reset_tc_mqprio(ndev, extack); + + return err; +} + +static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ + NL_SET_ERR_MSG_MOD(mqprio->extack, + "mqprio HW offload is not implemented for this MAC"); + return -EOPNOTSUPP; +} + +const struct stmmac_tc_ops dwmac4_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio_without_fpe, + .setup_etf = tc_setup_etf, + .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_mqprio_unimplemented, +}; + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, @@ -1222,4 +1281,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = { .setup_taprio = tc_setup_taprio, .setup_etf = tc_setup_etf, .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_dwmac510_mqprio, +}; + +const struct stmmac_tc_ops dwxgmac_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio_without_fpe, + .setup_etf = tc_setup_etf, + .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_mqprio_unimplemented, }; diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index 73b6cef10401..b715af21d23a 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev) static int emac_get_sset_count(struct net_device *ndev, int stringset) { + struct prueth_emac *emac = netdev_priv(ndev); switch (stringset) { case ETH_SS_STATS: - return ICSSG_NUM_ETHTOOL_STATS; + if (emac->prueth->pa_stats) + return ICSSG_NUM_ETHTOOL_STATS; + else + return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS; default: return -EOPNOTSUPP; } @@ -78,6 +82,7 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset) static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct prueth_emac *emac = netdev_priv(ndev); u8 *p = data; int i; @@ -86,8 +91,9 @@ static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data) for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) if (!icssg_all_miig_stats[i].standard_stats) ethtool_puts(&p, icssg_all_miig_stats[i].name); - for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) - ethtool_puts(&p, icssg_all_pa_stats[i].name); + if (emac->prueth->pa_stats) + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) + ethtool_puts(&p, icssg_all_pa_stats[i].name); break; default: break; @@ -106,8 +112,9 @@ static void emac_get_ethtool_stats(struct net_device *ndev, if (!icssg_all_miig_stats[i].standard_stats) *(data++) = emac->stats[i]; - for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) - *(data++) = emac->pa_stats[i]; + if (emac->prueth->pa_stats) + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) + *(data++) = emac->pa_stats[i]; } static int emac_get_ts_info(struct net_device *ndev, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index becdda143c19..6644203d6bb7 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1185,7 +1185,7 @@ static int prueth_probe(struct platform_device *pdev) prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); if (IS_ERR(prueth->pa_stats)) { dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); - return -ENODEV; + prueth->pa_stats = NULL; } if (eth0_node) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 06a15c0b2acc..8800bd3a8d07 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -42,11 +42,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac) emac->stats[i] -= tx_pkt_cnt * 8; } - for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { - reg = ICSSG_FW_STATS_BASE + icssg_all_pa_stats[i].offset * - PRUETH_NUM_MACS + slice * sizeof(u32); - regmap_read(prueth->pa_stats, reg, &val); - emac->pa_stats[i] += val; + if (prueth->pa_stats) { + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { + reg = ICSSG_FW_STATS_BASE + + icssg_all_pa_stats[i].offset * + PRUETH_NUM_MACS + slice * sizeof(u32); + regmap_read(prueth->pa_stats, reg, &val); + emac->pa_stats[i] += val; + } } } @@ -70,9 +73,11 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)]; } - for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { - if (!strcmp(icssg_all_pa_stats[i].name, stat_name)) - return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)]; + if (emac->prueth->pa_stats) { + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { + if (!strcmp(icssg_all_pa_stats[i].name, stat_name)) + return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)]; + } } netdev_err(emac->ndev, "Invalid stats %s\n", stat_name); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 43d81b40f761..d64b8abcf018 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -529,8 +529,6 @@ struct skbuf_dma_descriptor { * supported, the maximum frame size would be 9k. Else it is * 1522 bytes (assuming support for basic VLAN) * @rxmem: Stores rx memory size for jumbo frame handling. - * @csum_offload_on_tx_path: Stores the checksum selection on TX side. - * @csum_offload_on_rx_path: Stores the checksum selection on RX side. * @coalesce_count_rx: Store the irq coalesce on RX side. * @coalesce_usec_rx: IRQ coalesce delay for RX * @coalesce_count_tx: Store the irq coalesce on TX side. @@ -609,9 +607,6 @@ struct axienet_local { u32 max_frm_size; u32 rxmem; - int csum_offload_on_tx_path; - int csum_offload_on_rx_path; - u32 coalesce_count_rx; u32 coalesce_usec_rx; u32 coalesce_count_tx; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 374dff70ef0d..ea7d7c03f48e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1188,9 +1188,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget) csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { skb->ip_summed = CHECKSUM_UNNECESSARY; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } @@ -2639,38 +2637,28 @@ static int axienet_probe(struct platform_device *pdev) if (!ret) { switch (value) { case 1: - lp->csum_offload_on_tx_path = - XAE_FEATURE_PARTIAL_TX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; - /* Can checksum TCP/UDP over IPv4. */ - ndev->features |= NETIF_F_IP_CSUM; + /* Can checksum any contiguous range */ + ndev->features |= NETIF_F_HW_CSUM; break; case 2: - lp->csum_offload_on_tx_path = - XAE_FEATURE_FULL_TX_CSUM; lp->features |= XAE_FEATURE_FULL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; - default: - lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; } } ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); if (!ret) { switch (value) { case 1: - lp->csum_offload_on_rx_path = - XAE_FEATURE_PARTIAL_RX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; case 2: - lp->csum_offload_on_rx_path = - XAE_FEATURE_FULL_RX_CSUM; lp->features |= XAE_FEATURE_FULL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; - default: - lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; } } /* For supporting jumbo frames, the Axi Ethernet hardware must have diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 56df37f8d50a..aef316278eb4 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1026,9 +1026,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev, if (info->phc_index < 0) { info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping = |