aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/agere/Kconfig1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c391
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c71
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c403
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h18
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/macb.h13
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c180
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h39
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c89
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h15
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h19
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c364
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c197
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c123
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c126
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c579
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c98
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c28
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c37
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c529
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c387
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c403
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h146
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c32
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c1
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c2
-rw-r--r--drivers/net/ethernet/nxp/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c15
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c73
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c21
-rw-r--r--drivers/net/ethernet/rocker/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
158 files changed, 5115 insertions, 2157 deletions
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
index d92516ae59cc..9cd750184947 100644
--- a/drivers/net/ethernet/agere/Kconfig
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -21,6 +21,7 @@ config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
depends on PCI
select PHYLIB
+ select CRC32
help
This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 5f8769aa469d..02087d443e73 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -71,7 +71,8 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("DMA address has more bits that the device supports\n");
+ netdev_err(ena_dev->net_device,
+ "DMA address has more bits that the device supports\n");
return -EINVAL;
}
@@ -83,6 +84,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
@@ -90,7 +92,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
&sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -105,6 +107,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
@@ -112,7 +115,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
&cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -135,7 +138,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
&aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -156,7 +159,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- pr_err("AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device,
+ "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -176,18 +180,21 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
u16 command_id, bool capture)
{
if (unlikely(command_id >= admin_queue->q_depth)) {
- pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, admin_queue->q_depth);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
if (unlikely(!admin_queue->comp_ctx)) {
- pr_err("Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- pr_err("Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Completion context is occupied\n");
return NULL;
}
@@ -217,7 +224,8 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -259,6 +267,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
@@ -266,7 +275,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
admin_queue->comp_ctx =
devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -337,7 +346,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device,
+ "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -363,7 +373,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device,
+ "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -423,7 +434,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -444,7 +455,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
if (unlikely(!comp_ctx)) {
- pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "comp_ctx is NULL. Changing the admin queue running state\n");
admin_queue->running_state = false;
return;
}
@@ -496,10 +508,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
admin_queue->stats.completed_cmd += comp_num;
}
-static int ena_com_comp_status_to_errno(u8 comp_status)
+static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ u8 comp_status)
{
if (unlikely(comp_status != 0))
- pr_err("Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -546,7 +560,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
break;
if (time_is_before_jiffies(timeout)) {
- pr_err("Wait for completion (polling) timeout\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.no_completion++;
@@ -562,7 +577,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- pr_err("Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -573,7 +589,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
comp_ctx->status);
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@@ -615,7 +631,8 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -637,8 +654,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->header_location_ctrl =
llq_default_cfg->llq_header_location;
} else {
- pr_err("Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid header location control, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
@@ -652,14 +670,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
} else {
- pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid desc_stride_ctrl, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl, supported_feat,
- llq_info->desc_stride_ctrl);
+ netdev_err(ena_dev->net_device,
+ "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_stride_ctrl,
+ supported_feat, llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -680,20 +700,23 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_info->desc_list_entry_size = 256;
} else {
- pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_ring_entry_size, supported_feat,
- llq_info->desc_list_entry_size);
+ netdev_err(ena_dev->net_device,
+ "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
+ llq_info->desc_list_entry_size);
}
if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
+ netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -716,14 +739,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
} else {
- pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid descs_num_before_header, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ netdev_err(ena_dev->net_device,
+ "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_num_decs_before_header,
+ supported_feat, llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -739,7 +764,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- pr_err("Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device,
+ "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -766,15 +792,17 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) {
- pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode,
+ admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
- pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
- comp_ctx->cmd_opcode, comp_ctx->status);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device didn't send a completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
* This will happen if there is a completion without an interrupt
@@ -787,7 +815,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
}
}
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@@ -834,15 +862,17 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ netdev_err(ena_dev->net_device,
+ "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- pr_err("Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device,
+ "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -901,7 +931,8 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -951,7 +982,8 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device,
+ "Reg read timeout occurred\n");
return -ETIME;
}
@@ -991,7 +1023,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- pr_debug("Feature %d isn't supported\n", feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ feature_id);
return -EOPNOTSUPP;
}
@@ -1010,7 +1043,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -1027,8 +1060,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- pr_err("Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
return ret;
}
@@ -1130,9 +1164,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
- 1 << log_size, 1 << get_resp.u.ind_table.min_size,
- 1 << get_resp.u.ind_table.max_size);
+ netdev_err(ena_dev->net_device,
+ "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size, 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
return -EINVAL;
}
@@ -1223,7 +1258,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device,
+ "Memory address set failed\n");
return ret;
}
}
@@ -1234,7 +1270,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- pr_err("Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1252,7 +1289,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+ io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1286,7 +1324,8 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
if (unlikely(!intr_delay_resolution)) {
- pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ netdev_err(ena_dev->net_device,
+ "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@@ -1321,22 +1360,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
if (IS_ERR(comp_ctx)) {
- if (comp_ctx == ERR_PTR(-ENODEV))
- pr_debug("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ ret = PTR_ERR(comp_ctx);
+ if (ret == -ENODEV)
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Failed to submit command [%d]\n", ret);
else
- pr_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Failed to submit command [%d]\n", ret);
- return PTR_ERR(comp_ctx);
+ return ret;
}
ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
if (unlikely(ret)) {
if (admin_queue->running_state)
- pr_err("Failed to process command. ret = %d\n", ret);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Failed to process command. ret = %d\n", ret);
else
- pr_debug("Failed to process command. ret = %d\n", ret);
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Failed to process command. ret = %d\n", ret);
}
return ret;
}
@@ -1365,7 +1407,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -1375,7 +1417,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- pr_err("Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1394,7 +1437,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+ io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1404,8 +1448,9 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Invalid queue number %d but the max is %d\n", qid,
- ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1471,7 +1516,8 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1513,13 +1559,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
- pr_info("Can't get aenq configuration\n");
+ dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
return ret;
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
- get_resp.u.aenq.supported_groups, groups_flag);
+ netdev_warn(ena_dev->net_device,
+ "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
+ get_resp.u.aenq.supported_groups, groups_flag);
return -EOPNOTSUPP;
}
@@ -1538,7 +1585,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1546,20 +1594,21 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
{
u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- int width;
+ u32 width;
if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
- pr_debug("ENA dma width: %d\n", width);
+ netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- pr_err("DMA width illegal value: %d\n", width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+ width);
return -EINVAL;
}
@@ -1583,23 +1632,24 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
(ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
- pr_info("ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
- ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+ dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+ dev_info(ena_dev->dmadev,
+ "ENA controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
ctrl_ver_masked =
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
@@ -1608,7 +1658,8 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
/* Validate the ctrl version without the implementation ID */
if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
- pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ netdev_err(ena_dev->net_device,
+ "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -1;
}
@@ -1741,12 +1792,13 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- pr_err("Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device,
+ "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1823,8 +1875,9 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
- ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1882,8 +1935,9 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
- ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -2035,8 +2089,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrome, timestamp);
+ netdev_dbg(ena_dev->net_device,
+ "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
@@ -2079,19 +2134,20 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
(cap == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read32 timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- pr_err("Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device,
+ "Device isn't ready, can't reset device\n");
return -EINVAL;
}
timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
if (timeout == 0) {
- pr_err("Invalid timeout value\n");
+ netdev_err(ena_dev->net_device, "Invalid timeout value\n");
return -EINVAL;
}
@@ -2107,7 +2163,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- pr_err("Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device,
+ "Reset indication didn't turn on\n");
return rc;
}
@@ -2115,7 +2172,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- pr_err("Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device,
+ "Reset indication didn't turn off\n");
return rc;
}
@@ -2152,7 +2210,8 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- pr_err("Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2187,7 +2246,7 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
return ret;
}
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
@@ -2195,7 +2254,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2214,7 +2274,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2228,7 +2289,8 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- pr_err("Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2248,8 +2310,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
}
@@ -2260,8 +2322,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- pr_err("Func hash %d isn't supported by device, abort\n",
- rss->hash_func);
+ netdev_err(ena_dev->net_device,
+ "Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2278,7 +2341,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2290,8 +2353,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- pr_err("Failed to set hash function %d. error: %d\n",
- rss->hash_func, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
return -EINVAL;
}
@@ -2322,7 +2386,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- pr_err("Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device,
+ "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
@@ -2330,8 +2395,9 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
case ENA_ADMIN_TOEPLITZ:
if (key) {
if (key_len != sizeof(hash_key->key)) {
- pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ netdev_err(ena_dev->net_device,
+ "key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2343,7 +2409,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
rss->hash_init_val = init_val;
break;
default:
- pr_err("Invalid hash function (%d)\n", func);
+ netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
+ func);
return -EINVAL;
}
@@ -2429,8 +2496,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
}
@@ -2448,7 +2515,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2459,7 +2526,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2509,9 +2577,10 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
- i, hash_ctrl->supported_fields[i].fields,
- hash_ctrl->selected_fields[i].fields);
+ netdev_err(ena_dev->net_device,
+ "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
return -EOPNOTSUPP;
}
}
@@ -2535,7 +2604,8 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- pr_err("Invalid proto num (%u)\n", proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+ proto);
return -EINVAL;
}
@@ -2547,8 +2617,9 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
- proto, hash_fields, supported_fields);
+ netdev_err(ena_dev->net_device,
+ "Proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
}
hash_ctrl->selected_fields[proto].fields = hash_fields;
@@ -2588,14 +2659,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(
ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
if (ret) {
- pr_err("Failed to convert host indirection table to device table\n");
+ netdev_err(ena_dev->net_device,
+ "Failed to convert host indirection table to device table\n");
return ret;
}
@@ -2612,7 +2684,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2626,7 +2698,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2782,7 +2855,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2790,7 +2863,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2803,7 +2876,8 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2815,12 +2889,14 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
}
-static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
+static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
+ u32 coalesce_usecs,
u32 intr_delay_resolution,
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- pr_err("Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device,
+ "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2832,7 +2908,8 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
u32 tx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ tx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_tx_interval);
}
@@ -2840,7 +2917,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de
int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
u32 rx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ rx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_rx_interval);
}
@@ -2856,12 +2934,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ netdev_dbg(ena_dev->net_device,
+ "Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
- pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ netdev_err(ena_dev->net_device,
+ "Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
}
/* no moderation supported, disable adaptive support */
@@ -2909,7 +2989,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device,
+ "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 55097750d062..343caf41e709 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -303,6 +303,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
+ struct net_device *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
* @ena_dev: ENA communication layer struct
@@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
+/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
+{
+ return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
+}
+
+/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
+{
+ return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
+}
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 032ab9f20438..c3be751e7379 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) {
- pr_err("Error: trying to send more packets than tx burst allows\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Error: trying to send more packets than tx burst allows\n");
return -ENOSPC;
}
io_sq->entries_in_tx_burst_left--;
- pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ io_sq->qid, io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) {
- pr_err("Trying to write header larger than llq entry can accommodate\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- pr_err("Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- pr_err("Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Bounce buffer is NULL\n");
return NULL;
}
@@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
- io_cq->qid, *first_cdesc_idx, count);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
count = 0;
@@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
return 0;
}
-static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+ struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
@@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* num_bufs +1 for potential meta desc */
if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
- pr_debug("Not enough space in the tx queue\n");
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Not enough space in the tx queue\n");
return -ENOMEM;
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- pr_err("Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
return -EINVAL;
}
@@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- pr_err("Failed to create and store tx meta desc\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to create and store tx meta desc\n");
return rc;
}
@@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
- pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+ ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
- pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
- io_sq->next_to_comp);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ io_sq->qid, io_sq->next_to_comp);
/* Get rx flags from the last pkt */
- ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
ena_rx_ctx->descs = nb_hw_desc;
+
return 0;
}
@@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
desc->req_id = req_id;
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+ __func__, io_sq->qid, req_id);
+
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2c16c218818a..689313ee25a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
- num_descs, num_entries_needed);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Queue: %d num_descs: %d num_entries_needed: %d\n",
+ io_sq->qid, num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- pr_debug("Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
@@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) {
- pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
writel(head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
@@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- pr_err("Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 6cdd9efe8df3..d6cc7aa612b7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(xdp_pass),
ENA_STAT_RX_ENTRY(xdp_tx),
ENA_STAT_RX_ENTRY(xdp_invalid),
+ ENA_STAT_RX_ENTRY(xdp_redirect),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
@@ -839,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev,
/* The check for max value is already done in ethtool */
if (count < ENA_MIN_NUM_IO_QUEUES ||
(ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
+ !ena_xdp_legal_queue_count(adapter, count)))
return -EINVAL;
return ena_update_queue_count(adapter, count);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e98f45c2b22..06596fa1f9fe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -29,6 +29,8 @@ MODULE_LICENSE("GPL");
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5 * HZ)
+#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
+
#define ENA_NAPI_BUDGET 64
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
@@ -78,6 +80,15 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
+{
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
+}
+
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -90,9 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
return;
adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.tx_timeout++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
@@ -152,9 +161,8 @@ static int ena_xmit_common(struct net_device *dev,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
"Failed to prepare tx bufs\n");
- u64_stats_update_begin(&ring->syncp);
- ring->tx_stats.prepare_ctx_err++;
- u64_stats_update_end(&ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+ &ring->syncp);
if (rc != -ENOMEM) {
adapter->reset_reason =
ENA_REGS_RESET_DRIVER_INVALID_STATE;
@@ -225,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
return ret;
}
-static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_buff *xdp,
- void **push_hdr,
- u32 *push_len)
+static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ void **push_hdr,
+ u32 *push_len)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
+ tx_info->xdpf = xdpf;
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
@@ -262,9 +270,8 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
return 0;
error_report_dma_error:
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&xdp_ring->syncp);
+ ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
+ &xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf);
@@ -274,29 +281,24 @@ error_report_dma_error:
return -EINVAL;
}
-static int ena_xdp_xmit_buff(struct net_device *dev,
- struct xdp_buff *xdp,
- int qid,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ struct net_device *dev,
+ struct xdp_frame *xdpf,
+ int flags)
{
- struct ena_adapter *adapter = netdev_priv(dev);
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
- struct ena_ring *xdp_ring;
u16 next_to_use, req_id;
- int rc;
void *push_hdr;
u32 push_len;
+ int rc;
- xdp_ring = &adapter->tx_ring[qid];
next_to_use = xdp_ring->next_to_use;
req_id = xdp_ring->free_ids[next_to_use];
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- page_ref_inc(rx_info->page);
- tx_info->xdp_rx_page = rx_info->page;
- rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
if (unlikely(rc))
goto error_drop_packet;
@@ -311,34 +313,82 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
tx_info,
&ena_tx_ctx,
next_to_use,
- xdp->data_end - xdp->data);
+ xdpf->len);
if (rc)
goto error_unmap_dma;
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.doorbells++;
- u64_stats_update_end(&xdp_ring->syncp);
+ if (flags & XDP_XMIT_FLUSH) {
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
+ &xdp_ring->syncp);
+ }
- return NETDEV_TX_OK;
+ return rc;
error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL;
error_drop_packet:
- __free_page(tx_info->xdp_rx_page);
- return NETDEV_TX_OK;
+ xdp_return_frame(xdpf);
+ return rc;
}
-static int ena_xdp_execute(struct ena_ring *rx_ring,
- struct xdp_buff *xdp,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int qid, i, err, drops = 0;
+ struct ena_ring *xdp_ring;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ xdp_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
+ /* The descriptor is freed by ena_xdp_xmit_frame in case
+ * of an error.
+ */
+ if (err)
+ drops++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH) {
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
+ &xdp_ring->syncp);
+ }
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return n - drops;
+}
+
+static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
u32 verdict = XDP_PASS;
+ struct xdp_frame *xdpf;
u64 *xdp_stat;
+ int qid;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
@@ -348,28 +398,49 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
- if (verdict == XDP_TX) {
- ena_xdp_xmit_buff(rx_ring->netdev,
- xdp,
- rx_ring->qid + rx_ring->adapter->num_io_queues,
- rx_info);
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ break;
+ }
+
+ /* Find xmit queue */
+ qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
+ xdp_ring = &rx_ring->adapter->tx_ring[qid];
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
+ spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
- } else if (unlikely(verdict == XDP_ABORTED)) {
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ break;
+ }
+ fallthrough;
+ case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- } else if (unlikely(verdict == XDP_DROP)) {
+ break;
+ case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
- } else if (unlikely(verdict == XDP_PASS)) {
+ break;
+ case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
- } else {
+ break;
+ default:
bpf_warn_invalid_xdp_action(verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
}
- u64_stats_update_begin(&rx_ring->syncp);
- (*xdp_stat)++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
out:
rcu_read_unlock();
@@ -638,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
txr->disable_meta_caching = adapter->disable_meta_caching;
+ spin_lock_init(&txr->xdp_tx_lock);
/* Don't init RX queues for xdp queues */
if (!ENA_IS_XDP_INDEX(adapter, i)) {
@@ -922,9 +994,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
page = alloc_page(gfp);
if (unlikely(!page)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.page_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+ &rx_ring->syncp);
return -ENOMEM;
}
@@ -934,9 +1005,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.dma_mapping_err++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
+ &rx_ring->syncp);
__free_page(page);
return -EIO;
@@ -952,11 +1022,20 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
return 0;
}
+static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
+ ENA_PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+}
+
static void ena_free_rx_page(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{
struct page *page = rx_info->page;
- struct ena_com_buf *ena_buf = &rx_info->ena_buf;
if (unlikely(!page)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
@@ -964,9 +1043,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
- ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ ena_unmap_rx_buff(rx_ring, rx_info);
__free_page(page);
rx_info->page = NULL;
@@ -1009,9 +1086,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
}
if (unlikely(i < num)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.refil_partial++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
+ &rx_ring->syncp);
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Refilled rx qid %d with only %d buffers (from %d)\n",
rx_ring->qid, i, num);
@@ -1187,9 +1263,7 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
"Invalid req_id: %hu\n",
req_id);
- u64_stats_update_begin(&ring->syncp);
- ring->tx_stats.bad_req_id++;
- u64_stats_update_end(&ring->syncp);
+ ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
/* Trigger device reset */
ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
@@ -1300,9 +1374,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
if (netif_tx_queue_stopped(txq) && above_thresh &&
test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
netif_tx_wake_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_wakeup++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
+ &tx_ring->syncp);
}
__netif_tx_unlock(txq);
}
@@ -1321,9 +1394,8 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
rx_ring->rx_copybreak);
if (unlikely(!skb)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb. frags: %d\n", frags);
return NULL;
@@ -1395,9 +1467,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return NULL;
do {
- dma_unmap_page(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr),
- ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ ena_unmap_rx_buff(rx_ring, rx_info);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE);
@@ -1451,9 +1521,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_csum++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
return;
@@ -1464,9 +1533,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_csum++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
skb->ip_summed = CHECKSUM_NONE;
@@ -1475,13 +1543,11 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.csum_good++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
+ &rx_ring->syncp);
} else {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.csum_unchecked++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
+ &rx_ring->syncp);
skb->ip_summed = CHECKSUM_NONE;
}
} else {
@@ -1529,7 +1595,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return XDP_DROP;
- ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+ ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
if (ret == XDP_PASS) {
@@ -1559,6 +1625,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
struct sk_buff *skb;
int refill_required;
struct xdp_buff xdp;
+ int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
int rc = 0;
@@ -1606,22 +1673,25 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
- /* The page might not actually be freed here since the
- * page reference count is incremented in
- * ena_xdp_xmit_buff(), and it will be decreased only
- * when send completion was received from the device
- */
- if (xdp_verdict == XDP_TX)
- ena_free_rx_page(rx_ring,
- &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
for (i = 0; i < ena_rx_ctx.descs; i++) {
- rx_ring->free_ids[next_to_clean] =
- rx_ring->ena_bufs[i].req_id;
+ int req_id = rx_ring->ena_bufs[i].req_id;
+
+ rx_ring->free_ids[next_to_clean] = req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
+
+ /* Packets was passed for transmission, unmap it
+ * from RX side.
+ */
+ if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
+ ena_unmap_rx_buff(rx_ring,
+ &rx_ring->rx_buffer_info[req_id]);
+ rx_ring->rx_buffer_info[req_id].page = NULL;
+ }
}
if (xdp_verdict != XDP_PASS) {
+ xdp_flags |= xdp_verdict;
res_budget--;
continue;
}
@@ -1667,20 +1737,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required);
}
+ if (xdp_flags & XDP_REDIRECT)
+ xdp_do_flush_map();
+
return work_done;
error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_desc_num++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+ &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
} else {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_req_id++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+ &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
}
@@ -1741,9 +1812,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval,
true);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.unmask_interrupt++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
+ &tx_ring->syncp);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
@@ -1823,7 +1893,7 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
tx_pkts++;
total_done += tx_info->tx_descs;
- __free_page(tx_info->xdp_rx_page);
+ xdp_return_frame(xdpf);
xdp_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
xdp_ring->ring_size);
@@ -2550,9 +2620,8 @@ static int ena_up(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
netif_carrier_on(adapter->netdev);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.interface_up++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.interface_up, 1,
+ &adapter->syncp);
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2590,9 +2659,8 @@ static void ena_down(struct ena_adapter *adapter)
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.interface_down++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.interface_down, 1,
+ &adapter->syncp);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -2820,15 +2888,12 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
(header_len < tx_ring->tx_max_header_size))
return 0;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.linearize++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
rc = skb_linearize(skb);
if (unlikely(rc)) {
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.linearize_failed++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
+ &tx_ring->syncp);
}
return rc;
@@ -2868,9 +2933,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
tx_ring->push_buf_intermediate_buf);
*header_len = push_len;
if (unlikely(skb->data != *push_hdr)) {
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.llq_buffer_copy++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
+ &tx_ring->syncp);
delta = push_len - skb_head_len;
}
@@ -2927,9 +2991,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
return 0;
error_report_dma_error:
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL;
@@ -3006,9 +3069,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
__func__, qid);
netif_tx_stop_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_stop++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
+ &tx_ring->syncp);
/* There is a rare condition where this function decide to
* stop the queue but meanwhile clean_tx_irq updates
@@ -3023,9 +3085,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_wakeup++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
+ &tx_ring->syncp);
}
}
@@ -3034,9 +3095,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* has a mb
*/
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.doorbells++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
+ &tx_ring->syncp);
}
return NETDEV_TX_OK;
@@ -3242,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
+ .ndo_xdp_xmit = ena_xdp_xmit,
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3671,9 +3732,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
rc = -EIO;
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.missed_tx += missed_tx;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
+ &tx_ring->syncp);
return rc;
}
@@ -3756,9 +3816,8 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring->empty_rx_queue++;
if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.empty_rx_ring++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
+ &rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev,
"Trigger refill for ring %d\n", i);
@@ -3788,9 +3847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.wd_expired++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
+ &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
@@ -3801,9 +3859,8 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
netif_err(adapter, drv, adapter->netdev,
"ENA admin queue is not in running state!\n");
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.admin_q_pause++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
+ &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
@@ -4176,18 +4233,36 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->dmadev = &pdev->dev;
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
+ rc = -ENOMEM;
+ goto err_free_region;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ adapter->ena_dev = ena_dev;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ ena_dev->net_device = netdev;
+
+ pci_set_drvdata(pdev, adapter);
+
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_free_region;
+ goto err_netdev_destroy;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
- goto err_free_ena_dev;
+ goto err_device_destroy;
}
calc_queue_ctx.ena_dev = ena_dev;
@@ -4207,26 +4282,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- /* dev zeroed in init_etherdev */
- netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
- if (!netdev) {
- dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
- rc = -ENOMEM;
- goto err_device_destroy;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adapter = netdev_priv(netdev);
- pci_set_drvdata(pdev, adapter);
-
- adapter->ena_dev = ena_dev;
- adapter->netdev = netdev;
- adapter->pdev = pdev;
-
ena_set_conf_feat_params(adapter, &get_feat_ctx);
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
@@ -4257,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
dev_err(&pdev->dev,
"Failed to query interrupt moderation feature\n");
- goto err_netdev_destroy;
+ goto err_device_destroy;
}
ena_init_io_rings(adapter,
0,
@@ -4335,11 +4392,11 @@ err_free_msix:
ena_disable_msix(adapter);
err_worker_destroy:
del_timer(&adapter->timer_service);
-err_netdev_destroy:
- free_netdev(netdev);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+err_netdev_destroy:
+ free_netdev(netdev);
err_free_region:
ena_release_bars(ena_dev, pdev);
err_free_ena_dev:
@@ -4439,9 +4496,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ena_adapter *adapter = pci_get_drvdata(pdev);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.suspend++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
@@ -4462,9 +4517,7 @@ static int __maybe_unused ena_resume(struct device *dev_d)
struct ena_adapter *adapter = dev_get_drvdata(dev_d);
int rc;
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.resume++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
rtnl_lock();
rc = ena_restore_device(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 30eb686749dc..74af15d62ee1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -170,12 +170,6 @@ struct ena_tx_buffer {
* the xdp queues
*/
struct xdp_frame *xdpf;
- /* The rx page for the rx buffer that was received in rx and
- * re transmitted on xdp tx queues as a result of XDP_TX action.
- * We need to free the page once we finished cleaning the buffer in
- * clean_xdp_irq()
- */
- struct page *xdp_rx_page;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
@@ -239,6 +233,7 @@ struct ena_stats_rx {
u64 xdp_pass;
u64 xdp_tx;
u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct ena_ring {
@@ -263,6 +258,7 @@ struct ena_ring {
struct ena_com_io_sq *ena_com_io_sq;
struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
u16 next_to_use;
u16 next_to_clean;
@@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring)
return !!ring->xdp_bpf_prog;
}
-static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
{
return 2 * queues <= adapter->max_num_io_queues;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 7b80d924632a..f016f2e12ee7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2549,7 +2549,6 @@ static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
*/
static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
{
- s32 ret_val;
s16 mii_autoneg_adv_reg;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
@@ -2605,12 +2604,7 @@ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
- ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
-
- if (ret_val)
- return ret_val;
-
- return 0;
+ return atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index f335b7115c1b..dc34e38f97c7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -218,17 +218,17 @@ do { \
/* Set the coalescing timer for the given ib */
#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
- ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0))
/* Acks 'events' # of events for a given ib while disabling interrupts */
#define bna_ib_ack_disable_irq(_i_dbell, _events) \
- (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \
- (_i_dbell)->doorbell_addr));
+ (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \
+ (_i_dbell)->doorbell_addr))
/* Acks 'events' # of events for a given ib */
#define bna_ib_ack(_i_dbell, _events) \
- (writel(((_i_dbell)->doorbell_ack | (_events)), \
- (_i_dbell)->doorbell_addr));
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr))
#define bna_ib_start(_bna, _ib, _is_regular) \
{ \
@@ -259,12 +259,12 @@ do { \
}
#define bna_txq_prod_indx_doorbell(_tcb) \
- (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
- (_tcb)->q_dbell));
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell))
#define bna_rxq_prod_indx_doorbell(_rcb) \
- (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
- (_rcb)->q_dbell));
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell))
/* TxQ, RxQ, CQ related bits, offsets, macros */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 85858163bac5..e432a68ac520 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -23,6 +23,7 @@ config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
select PHYLINK
+ select CRC32
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1f5da4e4f4b2..d8c68906525a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -699,6 +699,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
@@ -1147,6 +1148,14 @@ struct macb_pm_data {
u32 usrio;
};
+struct macb_usrio_config {
+ u32 mii;
+ u32 rmii;
+ u32 rgmii;
+ u32 refclk;
+ u32 hdfctlen;
+};
+
struct macb_config {
u32 caps;
unsigned int dma_burst_length;
@@ -1155,6 +1164,7 @@ struct macb_config {
struct clk **rx_clk, struct clk **tsu_clk);
int (*init)(struct platform_device *pdev);
int jumbo_max_len;
+ const struct macb_usrio_config *usrio;
};
struct tsu_incr {
@@ -1253,8 +1263,6 @@ struct macb {
/* AT91RM9200 transmit queue (1 on wire + 1 queued) */
struct macb_tx_skb rm9200_txq[2];
- unsigned int rm9200_tx_tail;
- unsigned int rm9200_tx_len;
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
@@ -1288,6 +1296,7 @@ struct macb {
u32 rx_intr_mask;
struct macb_pm_data pm_data;
+ const struct macb_usrio_config *usrio;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7b1d195787dc..d5d910916c2e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -460,15 +460,14 @@ static void macb_init_buffers(struct macb *bp)
/**
* macb_set_tx_clk() - Set a clock to a new frequency
- * @clk: Pointer to the clock to change
+ * @bp: pointer to struct macb
* @speed: New frequency in Hz
- * @dev: Pointer to the struct net_device
*/
-static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!clk)
+ if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
@@ -485,7 +484,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
return;
}
- rate_rounded = clk_round_rate(clk, rate);
+ rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
return;
@@ -495,11 +494,12 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = abs(rate_rounded - rate);
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
- netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ netdev_warn(bp->dev,
+ "unable to generate target frequency: %ld Hz\n",
rate);
- if (clk_set_rate(clk, rate_rounded))
- netdev_err(dev, "adjusting tx_clk failed.\n");
+ if (clk_set_rate(bp->tx_clk, rate_rounded))
+ netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
static void macb_validate(struct phylink_config *config,
@@ -751,7 +751,7 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp->tx_clk, speed, ndev);
+ macb_set_tx_clk(bp, speed);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
@@ -3694,6 +3694,20 @@ static void macb_probe_queues(void __iomem *mem,
*num_queues = hweight32(*queue_mask);
}
+static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
+ struct clk *rx_clk, struct clk *tsu_clk)
+{
+ struct clk_bulk_data clks[] = {
+ { .clk = tsu_clk, },
+ { .clk = rx_clk, },
+ { .clk = pclk, },
+ { .clk = hclk, },
+ { .clk = tx_clk },
+ };
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
+}
+
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk, struct clk **tsu_clk)
@@ -3913,15 +3927,15 @@ static int macb_init(struct platform_device *pdev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (phy_interface_mode_is_rgmii(bp->phy_interface))
- val = GEM_BIT(RGMII);
+ val = bp->usrio->rgmii;
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(RMII);
+ val = bp->usrio->rmii;
else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(MII);
+ val = bp->usrio->mii;
if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
- val |= MACB_BIT(CLKEN);
+ val |= bp->usrio->refclk;
macb_or_gem_writel(bp, USRIO, val);
}
@@ -4032,7 +4046,6 @@ static int at91ether_start(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
- MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -4049,7 +4062,6 @@ static void at91ether_stop(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
- MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -4119,10 +4131,11 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- unsigned long flags;
- if (lp->rm9200_tx_len < 2) {
- int desc = lp->rm9200_tx_tail;
+ if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+ int desc = 0;
+
+ netif_stop_queue(dev);
/* Store packet information (to free when Tx completed) */
lp->rm9200_txq[desc].skb = skb;
@@ -4136,15 +4149,6 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&lp->lock, flags);
-
- lp->rm9200_tx_tail = (desc + 1) & 1;
- lp->rm9200_tx_len++;
- if (lp->rm9200_tx_len > 1)
- netif_stop_queue(dev);
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
/* Set address of the data in the Transmit Address register */
macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
/* Set length of the packet in the Transmit Control register */
@@ -4210,8 +4214,6 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
struct macb *lp = netdev_priv(dev);
u32 intstatus, ctl;
unsigned int desc;
- unsigned int qlen;
- u32 tsr;
/* MAC Interrupt Status register indicates what interrupts are pending.
* It is automatically cleared once read.
@@ -4223,39 +4225,21 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
at91ether_rx(dev);
/* Transmit complete */
- if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) {
+ if (intstatus & MACB_BIT(TCOMP)) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
dev->stats.tx_errors++;
- spin_lock(&lp->lock);
-
- tsr = macb_readl(lp, TSR);
-
- /* we have three possibilities here:
- * - all pending packets transmitted (TGO, implies BNQ)
- * - only first packet transmitted (!TGO && BNQ)
- * - two frames pending (!TGO && !BNQ)
- * Note that TGO ("transmit go") is called "IDLE" on RM9200.
- */
- qlen = (tsr & MACB_BIT(TGO)) ? 0 :
- (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2;
-
- while (lp->rm9200_tx_len > qlen) {
- desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1;
+ desc = 0;
+ if (lp->rm9200_txq[desc].skb) {
dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
lp->rm9200_txq[desc].skb = NULL;
dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
- lp->rm9200_tx_len--;
}
-
- if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- spin_unlock(&lp->lock);
+ netif_wake_queue(dev);
}
/* Work-around for EMAC Errata section 41.3.1 */
@@ -4406,8 +4390,10 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
- if (!mgmt)
- return -ENOMEM;
+ if (!mgmt) {
+ err = -ENOMEM;
+ goto err_disable_clks;
+ }
init.name = "sifive-gemgxl-mgmt";
init.ops = &fu540_c000_ops;
@@ -4418,16 +4404,26 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->hw.init = &init;
*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
- if (IS_ERR(*tx_clk))
- return PTR_ERR(*tx_clk);
+ if (IS_ERR(*tx_clk)) {
+ err = PTR_ERR(*tx_clk);
+ goto err_disable_clks;
+ }
err = clk_prepare_enable(*tx_clk);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
- else
+ *tx_clk = NULL;
+ goto err_disable_clks;
+ } else {
dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
+ }
return 0;
+
+err_disable_clks:
+ macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
+
+ return err;
}
static int fu540_c000_init(struct platform_device *pdev)
@@ -4439,6 +4435,21 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static const struct macb_usrio_config macb_default_usrio = {
+ .mii = MACB_BIT(MII),
+ .rmii = MACB_BIT(RMII),
+ .rgmii = GEM_BIT(RGMII),
+ .refclk = MACB_BIT(CLKEN),
+};
+
+static const struct macb_usrio_config sama7g5_usrio = {
+ .mii = 0,
+ .rmii = 1,
+ .rgmii = 2,
+ .refclk = BIT(2),
+ .hdfctlen = BIT(6),
+};
+
static const struct macb_config fu540_c000_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP,
@@ -4446,12 +4457,14 @@ static const struct macb_config fu540_c000_config = {
.clk_init = fu540_c000_clk_init,
.init = fu540_c000_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config at91sam9260_config = {
.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3macb_config = {
@@ -4459,6 +4472,7 @@ static const struct macb_config sama5d3macb_config = {
| MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config pc302gem_config = {
@@ -4466,6 +4480,7 @@ static const struct macb_config pc302gem_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d2_config = {
@@ -4473,6 +4488,7 @@ static const struct macb_config sama5d2_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3_config = {
@@ -4482,6 +4498,7 @@ static const struct macb_config sama5d3_config = {
.clk_init = macb_clk_init,
.init = macb_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d4_config = {
@@ -4489,18 +4506,21 @@ static const struct macb_config sama5d4_config = {
.dma_burst_length = 4,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config emac_config = {
.caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config np4_config = {
.caps = MACB_CAPS_USRIO_DISABLED,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynqmp_config = {
@@ -4511,6 +4531,7 @@ static const struct macb_config zynqmp_config = {
.clk_init = macb_clk_init,
.init = macb_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynq_config = {
@@ -4519,6 +4540,23 @@ static const struct macb_config zynq_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config sama7g5_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
+};
+
+static const struct macb_config sama7g5_emac_config = {
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
};
static const struct of_device_id macb_dt_ids[] = {
@@ -4538,6 +4576,8 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
+ { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4640,6 +4680,8 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ bp->usrio = macb_config->usrio;
+
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -4735,11 +4777,7 @@ err_out_free_netdev:
free_netdev(dev);
err_disable_clocks:
- clk_disable_unprepare(tx_clk);
- clk_disable_unprepare(hclk);
- clk_disable_unprepare(pclk);
- clk_disable_unprepare(rx_clk);
- clk_disable_unprepare(tsu_clk);
+ macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -4764,11 +4802,8 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- clk_disable_unprepare(bp->tsu_clk);
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
+ bp->rx_clk, bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
phylink_destroy(bp->phylink);
@@ -4947,13 +4982,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(dev))) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- }
- clk_disable_unprepare(bp->tsu_clk);
+ if (!(device_may_wakeup(dev)))
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
+ else
+ macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 7f90b828d159..1b7e8c91b541 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
-#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
-#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
cpl->len = htons(pktlen);
memcpy(buf, skb->data, pktlen);
- if (tx_info->ip_family == AF_INET) {
+ if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
-#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
-#endif
}
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 9c682aff3834..519323460f26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -36,8 +36,6 @@ void vnic_cq_free(struct vnic_cq *cq)
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
- int err;
-
cq->index = index;
cq->vdev = vdev;
@@ -47,11 +45,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
return -EINVAL;
}
- err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 676e437d78f6..d402d83d9edd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4677,7 +4677,6 @@ static int be_if_create(struct be_adapter *adapter)
{
u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
u32 cap_flags = be_if_cap_flags(adapter);
- int status;
/* alloc required memory for other filtering fields */
adapter->pmac_id = kcalloc(be_max_uc(adapter),
@@ -4700,13 +4699,8 @@ static int be_if_create(struct be_adapter *adapter)
en_flags &= cap_flags;
/* will enable all the needed filter flags in be_open() */
- status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
&adapter->if_handle, 0);
-
- if (status)
- return status;
-
- return 0;
}
int be_update_queues(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c2677ec0564d..3d1e9a302148 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -33,6 +33,7 @@ config FTGMAC100
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
+ select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a1d53ddf1593..3f9175bdce77 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -25,6 +25,7 @@ config FEC
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
+ select CRC32
select PHYLIB
imply PTP_1588_CLOCK
help
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e28510c282e5..4360ce4d3fb6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1625,17 +1625,13 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res;
dpaa_bp = priv->dpaa_bp;
if (!dpaa_bp)
return -EINVAL;
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- return 0;
+ return dpaa_eth_refill_bpool(dpaa_bp, countptr);
}
/* Cleanup function for outgoing frame descriptors that were built on Tx path,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 90cd243070d7..828c177df03d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
if (!of_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ of_node_put(node);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 8ed1ebd5a183..89e558135432 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -143,8 +143,8 @@ static const struct {
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
- { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
@@ -163,9 +163,13 @@ static const struct {
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
+ { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index d18f439f2b81..e1e950d48c92 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1518 0x8198
-#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_R1522 0x8198
+#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
@@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T255 0x8280
+#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_T1522 0x8298
+#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ecdc2af8c292..ed8fcb8b486e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -851,13 +851,12 @@ static bool enetc_port_has_pcs(struct enetc_pf *pf)
pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
}
-static int enetc_mdiobus_create(struct enetc_pf *pf)
+static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
{
- struct device *dev = &pf->si->pdev->dev;
struct device_node *mdio_np;
int err;
- mdio_np = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_np = of_get_child_by_name(node, "mdio");
if (mdio_np) {
err = enetc_mdio_probe(pf, mdio_np);
@@ -969,18 +968,17 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.mac_link_down = enetc_pl_mac_link_down,
};
-static int enetc_phylink_create(struct enetc_ndev_priv *priv)
+static int enetc_phylink_create(struct enetc_ndev_priv *priv,
+ struct device_node *node)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
- struct device *dev = &pf->si->pdev->dev;
struct phylink *phylink;
int err;
pf->phylink_config.dev = &priv->ndev->dev;
pf->phylink_config.type = PHYLINK_NETDEV;
- phylink = phylink_create(&pf->phylink_config,
- of_fwnode_handle(dev->of_node),
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
pf->if_mode, &enetc_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
@@ -1001,13 +999,14 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct device_node *node = pdev->dev.of_node;
struct enetc_ndev_priv *priv;
struct net_device *ndev;
struct enetc_si *si;
struct enetc_pf *pf;
int err;
- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ if (node && !of_device_is_available(node)) {
dev_info(&pdev->dev, "device is disabled, skipping\n");
return -ENODEV;
}
@@ -1058,12 +1057,12 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- if (!of_get_phy_mode(pdev->dev.of_node, &pf->if_mode)) {
- err = enetc_mdiobus_create(pf);
+ if (!of_get_phy_mode(node, &pf->if_mode)) {
+ err = enetc_mdiobus_create(pf, node);
if (err)
goto err_mdiobus_create;
- err = enetc_phylink_create(priv);
+ err = enetc_phylink_create(priv, node);
if (err)
goto err_phylink_create;
}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 34150182cc35..48bf8088795d 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -4,6 +4,7 @@ config FSL_FMAN
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
+ select CRC32
default n
help
Freescale Data-Path Acceleration Architecture Frame Manager
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c6481bd61239..9d58d8334467 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -430,7 +430,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
priv = new_bus->priv;
- new_bus->name = "Freescale PowerQUICC MII Bus",
+ new_bus->name = "Freescale PowerQUICC MII Bus";
new_bus->read = &fsl_pq_mdio_read;
new_bus->write = &fsl_pq_mdio_write;
new_bus->reset = &fsl_pq_mdio_reset;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index f5c80229ea96..daf07c0f790b 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -38,6 +38,8 @@
#define NIC_TX_STATS_REPORT_NUM 0
#define NIC_RX_STATS_REPORT_NUM 4
+#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -49,7 +51,8 @@ struct gve_rx_desc_queue {
struct gve_rx_slot_page_info {
struct page *page;
void *page_address;
- u32 page_offset; /* offset to write to in page */
+ u8 page_offset; /* flipped to second half? */
+ u8 can_flip;
};
/* A list of pages registered with the device during setup and used by a queue
@@ -64,10 +67,11 @@ struct gve_queue_page_list {
/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
struct gve_rx_data_queue {
- struct gve_rx_data_slot *data_ring; /* read by NIC */
+ union gve_rx_data_slot *data_ring; /* read by NIC */
dma_addr_t data_bus; /* dma mapping of the slots */
struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
+ u8 raw_addressing; /* use raw_addressing? */
};
struct gve_priv;
@@ -82,6 +86,7 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u32 db_threshold; /* threshold for posting new buffs and descs */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
@@ -107,12 +112,20 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */
};
+struct gve_tx_dma_buf {
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc
*/
struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
- struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
+ union {
+ struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
+ struct gve_tx_dma_buf buf;
+ };
};
/* A TX buffer - each queue has one */
@@ -135,13 +148,17 @@ struct gve_tx_ring {
__be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
u64 pkt_done; /* free-running - total packets completed */
u64 bytes_done; /* free-running - total bytes completed */
+ u64 dropped_pkt; /* free-running - total packets dropped */
+ u64 dma_mapping_error; /* count of dma mapping errors */
/* Cacheline 2 -- Read-mostly fields */
union gve_tx_desc *desc ____cacheline_aligned;
struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
struct netdev_queue *netdev_txq;
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
+ struct device *dev;
u32 mask; /* masks req and done down to queue size */
+ u8 raw_addressing; /* use raw_addressing? */
/* Slow-path fields */
u32 q_num ____cacheline_aligned; /* queue idx */
@@ -194,11 +211,12 @@ struct gve_priv {
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
u16 tx_pages_per_qpl; /* tx buffer length */
- u16 rx_pages_per_qpl; /* rx buffer length */
+ u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
+ u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
@@ -436,14 +454,14 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
*/
static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues;
+ return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
}
/* Returns the number of rx queue page lists
*/
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
{
- return priv->rx_cfg.num_queues;
+ return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
}
/* Returns a pointer to the next available tx qpl in the list of qpls
@@ -497,15 +515,6 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
return DMA_FROM_DEVICE;
}
-/* Returns true if the max mtu allows page recycling */
-static inline bool gve_can_recycle_pages(struct net_device *dev)
-{
- /* We can't recycle the pages if we can't fit a packet into half a
- * page.
- */
- return dev->max_mtu <= PAGE_SIZE / 2;
-}
-
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 24ae6a28a806..53864f200599 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -14,6 +14,57 @@
#define GVE_ADMINQ_SLEEP_LEN 20
#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
+#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
+"Expected: length=%d, feature_mask=%x.\n" \
+"Actual: length=%d, feature_mask=%x.\n"
+
+static
+struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
+ struct gve_device_option *option)
+{
+ void *option_end, *descriptor_end;
+
+ option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
+ descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
+
+ return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
+}
+
+static
+void gve_parse_device_option(struct gve_priv *priv,
+ struct gve_device_descriptor *device_descriptor,
+ struct gve_device_option *option)
+{
+ u16 option_length = be16_to_cpu(option->option_length);
+ u16 option_id = be16_to_cpu(option->option_id);
+
+ switch (option_id) {
+ case GVE_DEV_OPT_ID_RAW_ADDRESSING:
+ /* If the length or feature mask doesn't match,
+ * continue without enabling the feature.
+ */
+ if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
+ option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
+ GVE_DEV_OPT_LEN_RAW_ADDRESSING,
+ cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
+ option_length, option->feat_mask);
+ priv->raw_addressing = 0;
+ } else {
+ dev_info(&priv->pdev->dev,
+ "Raw addressing device option enabled.\n");
+ priv->raw_addressing = 1;
+ }
+ break;
+ default:
+ /* If we don't recognize the option just continue
+ * without doing anything.
+ */
+ dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
+ option_id);
+ }
+}
+
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -318,8 +369,10 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd;
+ u32 qpl_id;
int err;
+ qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
@@ -328,7 +381,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
.queue_resources_addr =
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
- .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
+ .queue_page_list_id = cpu_to_be32(qpl_id),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
};
@@ -357,8 +410,10 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
+ u32 qpl_id;
int err;
+ qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
@@ -369,7 +424,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
.rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
- .queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
+ .queue_page_list_id = cpu_to_be32(qpl_id),
};
err = gve_adminq_issue_cmd(priv, &cmd);
@@ -460,11 +515,14 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_descriptor *descriptor;
+ struct gve_device_option *dev_opt;
union gve_adminq_command cmd;
dma_addr_t descriptor_bus;
+ u16 num_options;
int err = 0;
u8 *mac;
u16 mtu;
+ int i;
memset(&cmd, 0, sizeof(cmd));
descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -511,13 +569,30 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
- if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_pages_per_qpl);
- priv->rx_desc_cnt = priv->rx_pages_per_qpl;
+ priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
+ if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_data_slot_cnt);
+ priv->rx_desc_cnt = priv->rx_data_slot_cnt;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+ dev_opt = (void *)(descriptor + 1);
+
+ num_options = be16_to_cpu(descriptor->num_device_options);
+ for (i = 0; i < num_options; i++) {
+ struct gve_device_option *next_opt;
+
+ next_opt = gve_get_next_option(descriptor, dev_opt);
+ if (!next_opt) {
+ dev_err(&priv->dev->dev,
+ "options exceed device_descriptor's total length.\n");
+ err = -EINVAL;
+ goto free_device_descriptor;
+ }
+
+ gve_parse_device_option(priv, descriptor, dev_opt);
+ dev_opt = next_opt;
+ }
free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 015796a20118..d320c2ffd87c 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -79,12 +79,17 @@ struct gve_device_descriptor {
static_assert(sizeof(struct gve_device_descriptor) == 40);
-struct device_option {
- __be32 option_id;
- __be32 option_length;
+struct gve_device_option {
+ __be16 option_id;
+ __be16 option_length;
+ __be32 feat_mask;
};
-static_assert(sizeof(struct device_option) == 8);
+static_assert(sizeof(struct gve_device_option) == 8);
+
+#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
+#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
+#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
struct gve_adminq_configure_device_resources {
__be64 counter_array;
@@ -111,6 +116,8 @@ struct gve_adminq_unregister_page_list {
static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
+#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
+
struct gve_adminq_create_tx_queue {
__be32 queue_id;
__be32 reserved;
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 54779871d52e..05ae6300e984 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -16,9 +16,11 @@
* Base addresses encoded in seg_addr are not assumed to be physical
* addresses. The ring format assumes these come from some linear address
* space. This could be physical memory, kernel virtual memory, user virtual
- * memory. gVNIC uses lists of registered pages. Each queue is assumed
- * to be associated with a single such linear address space to ensure a
- * consistent meaning for seg_addrs posted to its rings.
+ * memory.
+ * If raw dma addressing is not supported then gVNIC uses lists of registered
+ * pages. Each queue is assumed to be associated with a single such linear
+ * address space to ensure a consistent meaning for seg_addrs posted to its
+ * rings.
*/
struct gve_tx_pkt_desc {
@@ -72,12 +74,15 @@ struct gve_rx_desc {
} __packed;
static_assert(sizeof(struct gve_rx_desc) == 64);
-/* As with the Tx ring format, the qpl_offset entries below are offsets into an
- * ordered list of registered pages.
+/* If the device supports raw dma addressing then the addr in data slot is
+ * the dma address of the buffer.
+ * If the device only supports registered segments then the addr is a byte
+ * offset into the registered segment (an ordered list of pages) where the
+ * buffer is.
*/
-struct gve_rx_data_slot {
- /* byte offset into the rx registered segment of this slot */
+union gve_rx_data_slot {
__be64 qpl_offset;
+ __be64 addr;
};
/* GVE Recive Packet Descriptor Seq No */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 2fb197fd3daf..0901fa6853ca 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -51,6 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
+ "tx_dma_mapping_error[%u]",
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -323,6 +324,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
tx));
+ data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
/* skip NIC tx stats */
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 02e7d74779f4..7302498c6df3 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -677,6 +677,10 @@ static int gve_alloc_qpls(struct gve_priv *priv)
int i, j;
int err;
+ /* Raw addressing means no QPLs */
+ if (priv->raw_addressing)
+ return 0;
+
priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
@@ -689,7 +693,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
}
for (; i < num_qpls; i++) {
err = gve_alloc_queue_page_list(priv, i,
- priv->rx_pages_per_qpl);
+ priv->rx_data_slot_cnt);
if (err)
goto free_qpls;
}
@@ -717,6 +721,10 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
+ /* Raw addressing means no QPLs */
+ if (priv->raw_addressing)
+ return;
+
kvfree(priv->qpl_cfg.qpl_id_map);
for (i = 0; i < num_qpls; i++)
@@ -1077,6 +1085,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (skip_describe_device)
goto setup_device;
+ priv->raw_addressing = false;
/* Get the initial information we need from the device */
err = gve_adminq_describe_device(priv);
if (err) {
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 008fa897a3e6..bf123fe524c4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -16,12 +16,39 @@ static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
block->rx = NULL;
}
+static void gve_rx_free_buffer(struct device *dev,
+ struct gve_rx_slot_page_info *page_info,
+ union gve_rx_data_slot *data_slot)
+{
+ dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
+ GVE_DATA_SLOT_ADDR_PAGE_MASK);
+
+ gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
+}
+
+static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ if (rx->data.raw_addressing) {
+ u32 slots = rx->mask + 1;
+ int i;
+
+ for (i = 0; i < slots; i++)
+ gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
+ &rx->data.data_ring[i]);
+ } else {
+ gve_unassign_qpl(priv, rx->data.qpl->id);
+ rx->data.qpl = NULL;
+ }
+ kvfree(rx->data.page_info);
+ rx->data.page_info = NULL;
+}
+
static void gve_rx_free_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *dev = &priv->pdev->dev;
+ u32 slots = rx->mask + 1;
size_t bytes;
- u32 slots;
gve_rx_remove_from_block(priv, idx);
@@ -33,11 +60,8 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
- gve_unassign_qpl(priv, rx->data.qpl->id);
- rx->data.qpl = NULL;
- kvfree(rx->data.page_info);
+ gve_rx_unfill_pages(priv, rx);
- slots = rx->mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
@@ -46,19 +70,35 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
}
static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- struct gve_rx_data_slot *slot,
- dma_addr_t addr, struct page *page)
+ dma_addr_t addr, struct page *page, __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
- slot->qpl_offset = cpu_to_be64(addr);
+ *slot_addr = cpu_to_be64(addr);
+}
+
+static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
+ struct gve_rx_slot_page_info *page_info,
+ union gve_rx_data_slot *data_slot)
+{
+ struct page *page;
+ dma_addr_t dma;
+ int err;
+
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+ gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ return 0;
}
static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
{
struct gve_priv *priv = rx->gve;
u32 slots;
+ int err;
int i;
/* Allocate one page per Rx queue slot. Each page is split into two
@@ -71,17 +111,30 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
if (!rx->data.page_info)
return -ENOMEM;
- rx->data.qpl = gve_assign_rx_qpl(priv);
-
+ if (!rx->data.raw_addressing)
+ rx->data.qpl = gve_assign_rx_qpl(priv);
for (i = 0; i < slots; i++) {
- struct page *page = rx->data.qpl->pages[i];
- dma_addr_t addr = i * PAGE_SIZE;
+ if (!rx->data.raw_addressing) {
+ struct page *page = rx->data.qpl->pages[i];
+ dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i],
- &rx->data.data_ring[i], addr, page);
+ gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ &rx->data.data_ring[i].qpl_offset);
+ continue;
+ }
+ err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
+ &rx->data.data_ring[i]);
+ if (err)
+ goto alloc_err;
}
return slots;
+alloc_err:
+ while (i--)
+ gve_rx_free_buffer(&priv->pdev->dev,
+ &rx->data.page_info[i],
+ &rx->data.data_ring[i]);
+ return err;
}
static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
@@ -110,8 +163,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->gve = priv;
rx->q_num = idx;
- slots = priv->rx_pages_per_qpl;
+ slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1;
+ rx->data.raw_addressing = priv->raw_addressing;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -156,8 +210,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->mask = slots - 1;
rx->cnt = 0;
+ rx->db_threshold = priv->rx_desc_cnt / 2;
rx->desc.seqno = 1;
gve_rx_add_to_block(priv, idx);
@@ -168,7 +222,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
- kvfree(rx->data.page_info);
+ gve_rx_unfill_pages(priv, rx);
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
@@ -225,15 +279,14 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
- struct net_device *dev,
+static struct sk_buff *gve_rx_copy(struct net_device *dev,
struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
{
struct sk_buff *skb = napi_alloc_skb(napi, len);
void *va = page_info->page_address + GVE_RX_PAD +
- page_info->page_offset;
+ (page_info->page_offset ? PAGE_SIZE / 2 : 0);
if (unlikely(!skb))
return NULL;
@@ -244,15 +297,10 @@ static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
skb->protocol = eth_type_trans(skb, dev);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- u64_stats_update_end(&rx->statss);
-
return skb;
}
-static struct sk_buff *gve_rx_add_frags(struct net_device *dev,
- struct napi_struct *napi,
+static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
{
@@ -262,20 +310,92 @@ static struct sk_buff *gve_rx_add_frags(struct net_device *dev,
return NULL;
skb_add_rx_frag(skb, 0, page_info->page,
- page_info->page_offset +
+ (page_info->page_offset ? PAGE_SIZE / 2 : 0) +
GVE_RX_PAD, len, PAGE_SIZE / 2);
return skb;
}
-static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
- struct gve_rx_data_slot *data_ring)
+static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
+{
+ const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
+
+ /* "flip" to other packet buffer on this page */
+ page_info->page_offset ^= 0x1;
+ *(slot_addr) ^= offset;
+}
+
+static bool gve_rx_can_flip_buffers(struct net_device *netdev)
+{
+ return PAGE_SIZE == 4096
+ ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
+}
+
+static int gve_rx_can_recycle_buffer(struct page *page)
+{
+ int pagecount = page_count(page);
+
+ /* This page is not being used by any SKBs - reuse */
+ if (pagecount == 1)
+ return 1;
+ /* This page is still being used by an SKB - we can't reuse */
+ else if (pagecount >= 2)
+ return 0;
+ WARN(pagecount < 1, "Pagecount should never be < 1");
+ return -1;
+}
+
+static struct sk_buff *
+gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
+ struct gve_rx_slot_page_info *page_info, u16 len,
+ struct napi_struct *napi,
+ union gve_rx_data_slot *data_slot)
{
- u64 addr = be64_to_cpu(data_ring->qpl_offset);
+ struct sk_buff *skb;
+
+ skb = gve_rx_add_frags(napi, page_info, len);
+ if (!skb)
+ return NULL;
- page_info->page_offset ^= PAGE_SIZE / 2;
- addr ^= PAGE_SIZE / 2;
- data_ring->qpl_offset = cpu_to_be64(addr);
+ /* Optimistically stop the kernel from freeing the page by increasing
+ * the page bias. We will check the refcount in refill to determine if
+ * we need to alloc a new page.
+ */
+ get_page(page_info->page);
+
+ return skb;
+}
+
+static struct sk_buff *
+gve_rx_qpl(struct device *dev, struct net_device *netdev,
+ struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
+ u16 len, struct napi_struct *napi,
+ union gve_rx_data_slot *data_slot)
+{
+ struct sk_buff *skb;
+
+ /* if raw_addressing mode is not enabled gvnic can only receive into
+ * registered segments. If the buffer can't be recycled, our only
+ * choice is to copy the data out of it so that we can return it to the
+ * device.
+ */
+ if (page_info->can_flip) {
+ skb = gve_rx_add_frags(napi, page_info, len);
+ /* No point in recycling if we didn't get the skb */
+ if (skb) {
+ /* Make sure that the page isn't freed. */
+ get_page(page_info->page);
+ gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
+ }
+ } else {
+ skb = gve_rx_copy(netdev, napi, page_info, len);
+ if (skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ }
+ return skb;
}
static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
@@ -285,8 +405,9 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
struct gve_priv *priv = rx->gve;
struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
struct net_device *dev = priv->dev;
- struct sk_buff *skb;
- int pagecount;
+ union gve_rx_data_slot *data_slot;
+ struct sk_buff *skb = NULL;
+ dma_addr_t page_bus;
u16 len;
/* drop this packet */
@@ -294,71 +415,55 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
- return true;
+ return false;
}
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
- PAGE_SIZE, DMA_FROM_DEVICE);
- /* gvnic can only receive into registered segments. If the buffer
- * can't be recycled, our only choice is to copy the data out of
- * it so that we can return it to the device.
- */
+ data_slot = &rx->data.data_ring[idx];
+ page_bus = (rx->data.raw_addressing) ?
+ be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK :
+ rx->data.qpl->page_buses[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
+ PAGE_SIZE, DMA_FROM_DEVICE);
- if (PAGE_SIZE == 4096) {
- if (len <= priv->rx_copybreak) {
- /* Just copy small packets */
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copybreak_pkt++;
- u64_stats_update_end(&rx->statss);
- goto have_skb;
- }
- if (unlikely(!gve_can_recycle_pages(dev))) {
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
- goto have_skb;
- }
- pagecount = page_count(page_info->page);
- if (pagecount == 1) {
- /* No part of this page is used by any SKBs; we attach
- * the page fragment to a new SKB and pass it up the
- * stack.
- */
- skb = gve_rx_add_frags(dev, napi, page_info, len);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- return true;
+ if (len <= priv->rx_copybreak) {
+ /* Just copy small packets */
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
+ } else {
+ u8 can_flip = gve_rx_can_flip_buffers(dev);
+ int recycle = 0;
+
+ if (can_flip) {
+ recycle = gve_rx_can_recycle_buffer(page_info->page);
+ if (recycle < 0) {
+ if (!rx->data.raw_addressing)
+ gve_schedule_reset(priv);
+ return false;
}
- /* Make sure the kernel stack can't release the page */
- get_page(page_info->page);
- /* "flip" to other packet buffer on this page */
- gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]);
- } else if (pagecount >= 2) {
- /* We have previously passed the other half of this
- * page up the stack, but it has not yet been freed.
- */
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
+ }
+
+ page_info->can_flip = can_flip && recycle;
+ if (rx->data.raw_addressing) {
+ skb = gve_rx_raw_addressing(&priv->pdev->dev, dev,
+ page_info, len, napi,
+ data_slot);
} else {
- WARN(pagecount < 1, "Pagecount should never be < 1");
- return false;
+ skb = gve_rx_qpl(&priv->pdev->dev, dev, rx,
+ page_info, len, napi, data_slot);
}
- } else {
- skb = gve_rx_copy(rx, dev, napi, page_info, len);
}
-have_skb:
- /* We didn't manage to allocate an skb but we haven't had any
- * reset worthy failures.
- */
if (!skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_skb_alloc_fail++;
u64_stats_update_end(&rx->statss);
- return true;
+ return false;
}
if (likely(feat & NETIF_F_RXCSUM)) {
@@ -399,19 +504,73 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx)
return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
}
+static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ int refill_target = rx->mask + 1;
+ u32 fill_cnt = rx->fill_cnt;
+
+ while (fill_cnt - rx->cnt < refill_target) {
+ struct gve_rx_slot_page_info *page_info;
+ u32 idx = fill_cnt & rx->mask;
+
+ page_info = &rx->data.page_info[idx];
+ if (page_info->can_flip) {
+ /* The other half of the page is free because it was
+ * free when we processed the descriptor. Flip to it.
+ */
+ union gve_rx_data_slot *data_slot =
+ &rx->data.data_ring[idx];
+
+ gve_rx_flip_buff(page_info, &data_slot->addr);
+ page_info->can_flip = 0;
+ } else {
+ /* It is possible that the networking stack has already
+ * finished processing all outstanding packets in the buffer
+ * and it can be reused.
+ * Flipping is unnecessary here - if the networking stack still
+ * owns half the page it is impossible to tell which half. Either
+ * the whole page is free or it needs to be replaced.
+ */
+ int recycle = gve_rx_can_recycle_buffer(page_info->page);
+
+ if (recycle < 0) {
+ if (!rx->data.raw_addressing)
+ gve_schedule_reset(priv);
+ return false;
+ }
+ if (!recycle) {
+ /* We can't reuse the buffer - alloc a new one*/
+ union gve_rx_data_slot *data_slot =
+ &rx->data.data_ring[idx];
+ struct device *dev = &priv->pdev->dev;
+
+ gve_rx_free_buffer(dev, page_info, data_slot);
+ page_info->page = NULL;
+ if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ break;
+ }
+ }
+ fill_cnt++;
+ }
+ rx->fill_cnt = fill_cnt;
+ return true;
+}
+
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat)
{
struct gve_priv *priv = rx->gve;
+ u32 work_done = 0, packets = 0;
struct gve_rx_desc *desc;
u32 cnt = rx->cnt;
u32 idx = cnt & rx->mask;
- u32 work_done = 0;
u64 bytes = 0;
desc = rx->desc.desc_ring + idx;
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
work_done < budget) {
+ bool dropped;
+
netif_info(priv, rx_status, priv->dev,
"[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
rx->q_num, idx, desc, desc->flags_seq);
@@ -419,9 +578,11 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
- bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
- if (!gve_rx(rx, desc, feat, idx))
- gve_schedule_reset(priv);
+ dropped = !gve_rx(rx, desc, feat, idx);
+ if (!dropped) {
+ bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
+ packets++;
+ }
cnt++;
idx = cnt & rx->mask;
desc = rx->desc.desc_ring + idx;
@@ -429,15 +590,34 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
work_done++;
}
- if (!work_done)
+ if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
return false;
u64_stats_update_begin(&rx->statss);
- rx->rpackets += work_done;
+ rx->rpackets += packets;
rx->rbytes += bytes;
u64_stats_update_end(&rx->statss);
rx->cnt = cnt;
- rx->fill_cnt += work_done;
+
+ /* restock ring slots */
+ if (!rx->data.raw_addressing) {
+ /* In QPL mode buffs are refilled as the desc are processed */
+ rx->fill_cnt += work_done;
+ } else if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ /* In raw addressing mode buffs are only refilled if the avail
+ * falls below a threshold.
+ */
+ if (!gve_rx_refill_buffers(priv, rx))
+ return false;
+
+ /* If we were not able to completely refill buffers, we'll want
+ * to schedule this queue for work again to refill buffers.
+ */
+ if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ gve_rx_write_doorbell(priv, rx);
+ return true;
+ }
+ }
gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index d0244feb0301..6938f3a939d6 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -158,9 +158,11 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
- gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
- tx->tx_fifo.qpl = NULL;
+ if (!tx->raw_addressing) {
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ tx->tx_fifo.qpl = NULL;
+ }
bytes = sizeof(*tx->desc) * slots;
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
@@ -206,11 +208,15 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->desc)
goto abort_with_info;
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
+ tx->raw_addressing = priv->raw_addressing;
+ tx->dev = &priv->pdev->dev;
+ if (!tx->raw_addressing) {
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
- /* map Tx FIFO */
- if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ /* map Tx FIFO */
+ if (gve_tx_fifo_init(priv, &tx->tx_fifo))
+ goto abort_with_desc;
+ }
tx->q_resources =
dma_alloc_coherent(hdev,
@@ -228,7 +234,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
return 0;
abort_with_fifo:
- gve_tx_fifo_release(priv, &tx->tx_fifo);
+ if (!tx->raw_addressing)
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -301,27 +308,47 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
return bytes;
}
-/* The most descriptors we could need are 3 - 1 for the headers, 1 for
- * the beginning of the payload at the end of the FIFO, and 1 if the
- * payload wraps to the beginning of the FIFO.
+/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag,
+ * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor,
+ * and +1 if the payload wraps to the beginning of the FIFO.
*/
-#define MAX_TX_DESC_NEEDED 3
+#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3)
+static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
+{
+ if (info->skb) {
+ dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
+ dma_unmap_len(&info->buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(&info->buf, len, 0);
+ } else {
+ dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
+ dma_unmap_len(&info->buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(&info->buf, len, 0);
+ }
+}
/* Check if sufficient resources (descriptor ring space, FIFO space) are
* available to transmit the given number of bytes.
*/
static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
{
- return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED &&
- gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required));
+ bool can_alloc = true;
+
+ if (!tx->raw_addressing)
+ can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
+
+ return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}
/* Stops the queue if the skb cannot be transmitted. */
static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
{
- int bytes_required;
+ int bytes_required = 0;
+
+ if (!tx->raw_addressing)
+ bytes_required = gve_skb_fifo_bytes_required(tx, skb);
- bytes_required = gve_skb_fifo_bytes_required(tx, skb);
if (likely(gve_can_tx(tx, bytes_required)))
return 0;
@@ -395,17 +422,13 @@ static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
{
u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
u64 first_page = iov_offset / PAGE_SIZE;
- dma_addr_t dma;
u64 page;
- for (page = first_page; page <= last_page; page++) {
- dma = page_buses[page];
- dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
- }
+ for (page = first_page; page <= last_page; page++)
+ dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
}
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
- struct device *dev)
+static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
{
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
@@ -447,7 +470,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
hlen);
- gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
info->iov[hdr_nfrags - 1].iov_offset,
info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
@@ -463,7 +486,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
skb_copy_bits(skb, copy_offset,
tx->tx_fifo.base + info->iov[i].iov_offset,
info->iov[i].iov_len);
- gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
info->iov[i].iov_offset,
info->iov[i].iov_len);
copy_offset += info->iov[i].iov_len;
@@ -472,6 +495,94 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
return 1 + payload_nfrags;
}
+static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int hlen, payload_nfrags, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *seg_desc;
+ struct gve_tx_buffer_state *info;
+ bool is_gso = skb_is_gso(skb);
+ u32 idx = tx->req & tx->mask;
+ struct gve_tx_dma_buf *buf;
+ u64 addr;
+ u32 len;
+ int i;
+
+ info = &tx->info[idx];
+ pkt_desc = &tx->desc[idx];
+
+ l4_hdr_offset = skb_checksum_start_offset(skb);
+ /* If the skb is gso, then we want only up to the tcp header in the first segment
+ * to efficiently replicate on each segment otherwise we want the linear portion
+ * of the skb (which will contain the checksum because skb->csum_start and
+ * skb->csum_offset are given relative to skb->head) in the first segment.
+ */
+ hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
+ len = skb_headlen(skb);
+
+ info->skb = skb;
+
+ addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dev, addr))) {
+ tx->dma_mapping_error++;
+ goto drop;
+ }
+ buf = &info->buf;
+ dma_unmap_len_set(buf, len, len);
+ dma_unmap_addr_set(buf, dma, addr);
+
+ payload_nfrags = shinfo->nr_frags;
+ if (hlen < len) {
+ /* For gso the rest of the linear portion of the skb needs to
+ * be in its own descriptor.
+ */
+ payload_nfrags++;
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ 1 + payload_nfrags, hlen, addr);
+
+ len -= hlen;
+ addr += hlen;
+ idx = (tx->req + 1) & tx->mask;
+ seg_desc = &tx->desc[idx];
+ gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
+ } else {
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ 1 + payload_nfrags, hlen, addr);
+ }
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ const skb_frag_t *frag = &shinfo->frags[i];
+
+ idx = (idx + 1) & tx->mask;
+ seg_desc = &tx->desc[idx];
+ len = skb_frag_size(frag);
+ addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dev, addr))) {
+ tx->dma_mapping_error++;
+ goto unmap_drop;
+ }
+ buf = &tx->info[idx].buf;
+ tx->info[idx].skb = NULL;
+ dma_unmap_len_set(buf, len, len);
+ dma_unmap_addr_set(buf, dma, addr);
+
+ gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
+ }
+
+ return 1 + payload_nfrags;
+
+unmap_drop:
+ i += (payload_nfrags == shinfo->nr_frags ? 1 : 2);
+ while (i--) {
+ idx--;
+ gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
+ }
+drop:
+ tx->dropped_pkt++;
+ return 0;
+}
+
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -490,17 +601,26 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
- nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
-
- netdev_tx_sent_queue(tx->netdev_txq, skb->len);
- skb_tx_timestamp(skb);
-
- /* give packets to NIC */
- tx->req += nsegs;
+ if (tx->raw_addressing)
+ nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
+ else
+ nsegs = gve_tx_add_skb_copy(priv, tx, skb);
+
+ /* If the packet is getting sent, we need to update the skb */
+ if (nsegs) {
+ netdev_tx_sent_queue(tx->netdev_txq, skb->len);
+ skb_tx_timestamp(skb);
+ tx->req += nsegs;
+ } else {
+ dev_kfree_skb_any(skb);
+ }
if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
return NETDEV_TX_OK;
+ /* Give packets to NIC. Even if this packet failed to send the doorbell
+ * might need to be rung because of xmit_more.
+ */
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_OK;
}
@@ -525,24 +645,29 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
info = &tx->info[idx];
skb = info->skb;
+ /* Unmap the buffer */
+ if (tx->raw_addressing)
+ gve_tx_unmap_buf(tx->dev, info);
+ tx->done++;
/* Mark as free */
if (skb) {
info->skb = NULL;
bytes += skb->len;
pkts++;
dev_consume_skb_any(skb);
+ if (tx->raw_addressing)
+ continue;
/* FIFO free */
for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
- space_freed += info->iov[i].iov_len +
- info->iov[i].iov_padding;
+ space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
info->iov[i].iov_len = 0;
info->iov[i].iov_padding = 0;
}
}
- tx->done++;
}
- gve_tx_free_fifo(&tx->tx_fifo, space_freed);
+ if (!tx->raw_addressing)
+ gve_tx_free_fifo(&tx->tx_fifo, space_freed);
u64_stats_update_begin(&tx->statss);
tx->bytes_done += bytes;
tx->pkt_done += pkts;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a9aca8c24e90..173d6966c1a3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -546,9 +546,9 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
obj_args.integer.type = ACPI_TYPE_INTEGER;
obj_args.integer.value = mac_cb->mac_id;
- argv4.type = ACPI_TYPE_PACKAGE,
- argv4.package.count = 1,
- argv4.package.elements = &obj_args,
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
&hns_dsaf_acpi_dsm_guid, 0,
@@ -593,9 +593,9 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
obj_args.integer.type = ACPI_TYPE_INTEGER;
obj_args.integer.value = mac_cb->mac_id;
- argv4.type = ACPI_TYPE_PACKAGE,
- argv4.package.count = 1,
- argv4.package.elements = &obj_args,
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
&hns_dsaf_acpi_dsm_guid, 0,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 1ffe8fac702d..fb5e8842983c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -110,6 +110,7 @@ struct hclge_vf_to_pf_msg {
u8 en_bc;
u8 en_uc;
u8 en_mc;
+ u8 en_limit_promisc;
};
struct {
u8 vector_id;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 78b48861ff8b..a7daf6d4511e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -29,7 +29,9 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/pkt_sched.h>
#include <linux/types.h>
+#include <net/pkt_cls.h>
#define HNAE3_MOD_VERSION "1.0"
@@ -457,6 +459,12 @@ struct hnae3_ae_dev {
* Configure the default MAC for specified VF
* get_module_eeprom
* Get the optical module eeprom info.
+ * add_cls_flower
+ * Add clsflower rule
+ * del_cls_flower
+ * Delete clsflower rule
+ * cls_flower_active
+ * Check if any cls flower rule exist
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -634,6 +642,11 @@ struct hnae3_ae_ops {
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
bool (*get_cmdq_stat)(struct hnae3_handle *handle);
+ int (*add_cls_flower)(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower, int tc);
+ int (*del_cls_flower)(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower);
+ bool (*cls_flower_active)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -647,7 +660,8 @@ struct hnae3_dcb_ops {
u8 (*getdcbx)(struct hnae3_handle *);
u8 (*setdcbx)(struct hnae3_handle *, u8);
- int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
+ int (*setup_tc)(struct hnae3_handle *handle,
+ struct tc_mqprio_qopt_offload *mqprio_qopt);
};
struct hnae3_ae_algo {
@@ -659,15 +673,17 @@ struct hnae3_ae_algo {
#define HNAE3_INT_NAME_LEN 32
#define HNAE3_ITR_COUNTDOWN_START 100
+#define HNAE3_MAX_TC 8
+#define HNAE3_MAX_USER_PRIO 8
struct hnae3_tc_info {
- u16 tqp_offset; /* TQP offset from base TQP */
- u16 tqp_count; /* Total TQPs */
- u8 tc; /* TC index */
- bool enable; /* If this TC is enable or not */
+ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
+ u16 tqp_count[HNAE3_MAX_TC];
+ u16 tqp_offset[HNAE3_MAX_TC];
+ unsigned long tc_en; /* bitmap of TC enabled */
+ u8 num_tc; /* Total number of enabled TCs */
+ bool mqprio_active;
};
-#define HNAE3_MAX_TC 8
-#define HNAE3_MAX_USER_PRIO 8
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -676,9 +692,7 @@ struct hnae3_knic_private_info {
u16 num_tx_desc;
u16 num_rx_desc;
- u8 num_tc; /* Total number of enabled TCs */
- u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
- struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
+ struct hnae3_tc_info tc_info;
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -719,6 +733,11 @@ struct hnae3_roce_private_info {
#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
+enum hnae3_pflag {
+ HNAE3_PFLAG_LIMIT_PROMISC,
+ HNAE3_PFLAG_MAX
+};
+
struct hnae3_handle {
struct hnae3_client *client;
struct pci_dev *pdev;
@@ -741,6 +760,9 @@ struct hnae3_handle {
/* Network interface message level enabled bits */
u32 msg_enable;
+
+ unsigned long supported_pflags;
+ unsigned long priv_flags;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index cb26742e2ed8..9d4e9c053a8f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -385,7 +385,8 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1798c0a04b0e..405e49033417 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -323,13 +323,14 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
- unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ unsigned int queue_size = kinfo->num_tqps;
int i, ret;
- if (kinfo->num_tc <= 1) {
+ if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
netdev_reset_tc(netdev);
} else {
- ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ ret = netdev_set_num_tc(netdev, tc_info->num_tc);
if (ret) {
netdev_err(netdev,
"netdev_set_num_tc fail, ret=%d!\n", ret);
@@ -337,13 +338,11 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
}
for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
+ if (!test_bit(i, &tc_info->tc_en))
continue;
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
+ netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
+ tc_info->tqp_offset[i]);
}
}
@@ -369,7 +368,7 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
u16 alloc_tqps, max_rss_size, rss_size;
h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
- rss_size = alloc_tqps / h->kinfo.num_tc;
+ rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
return min_t(u16, rss_size, max_rss_size);
}
@@ -508,7 +507,7 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
- netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
+ netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
@@ -1006,6 +1005,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct hnae3_ae_dev *ae_dev;
struct vlan_ethhdr *vhdr;
int rc;
@@ -1013,10 +1013,13 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
skb_vlan_tag_present(skb)))
return 0;
- /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
- * header is allowed in skb, otherwise it will cause RAS error.
+ /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
+ * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
+ * will cause RAS error.
*/
+ ae_dev = pci_get_drvdata(handle->pdev);
if (unlikely(skb_vlan_tagged_multi(skb) &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_ENABLE))
return -EINVAL;
@@ -1665,6 +1668,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
h->ae_algo->ops->enable_fd(h, enable);
}
+ if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ h->ae_algo->ops->cls_flower_active(h)) {
+ netdev_err(netdev,
+ "there are offloaded TC filters active, cannot disable HW TC offload");
+ return -EINVAL;
+ }
+
netdev->features = features;
return 0;
}
@@ -1790,7 +1800,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
- u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
struct hnae3_knic_private_info *kinfo;
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
@@ -1813,16 +1822,70 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
- kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
+ kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
+}
+
+static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
+ struct flow_cls_offload *flow)
+{
+ int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
+ struct hnae3_handle *h = hns3_get_handle(priv->netdev);
+
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ if (h->ae_algo->ops->add_cls_flower)
+ return h->ae_algo->ops->add_cls_flower(h, flow, tc);
+ break;
+ case FLOW_CLS_DESTROY:
+ if (h->ae_algo->ops->del_cls_flower)
+ return h->ae_algo->ops->del_cls_flower(h, flow);
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct hns3_nic_priv *priv = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return hns3_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
+static LIST_HEAD(hns3_block_cb_list);
+
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- if (type != TC_SETUP_QDISC_MQPRIO)
+ struct hns3_nic_priv *priv = netdev_priv(dev);
+ int ret;
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ ret = hns3_setup_tc(dev, type_data);
+ break;
+ case TC_SETUP_BLOCK:
+ ret = flow_block_cb_setup_simple(type_data,
+ &hns3_block_cb_list,
+ hns3_setup_tc_block_cb,
+ priv, priv, true);
+ break;
+ default:
return -EOPNOTSUPP;
+ }
- return hns3_setup_tc(dev, type_data);
+ return ret;
}
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
@@ -2419,6 +2482,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->features |= NETIF_F_HW_TC;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -3976,21 +4044,20 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
int i;
for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
int j;
- if (!tc_info->enable)
+ if (!test_bit(i, &tc_info->tc_en))
continue;
- for (j = 0; j < tc_info->tqp_count; j++) {
+ for (j = 0; j < tc_info->tqp_count[i]; j++) {
struct hnae3_queue *q;
- q = priv->ring[tc_info->tqp_offset + j].tqp;
- hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
- tc_info->tc);
+ q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
}
}
}
@@ -4117,7 +4184,8 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
}
@@ -4226,6 +4294,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
@@ -4685,6 +4756,12 @@ int hns3_set_channels(struct net_device *netdev,
if (ch->rx_count || ch->tx_count)
return -EINVAL;
+ if (kinfo->tc_info.mqprio_active) {
+ dev_err(&netdev->dev,
+ "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
+ return -EINVAL;
+ }
+
if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < 1) {
dev_err(&netdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 3cca3c125c03..e2fc443fe92c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -18,6 +18,11 @@ struct hns3_sfp_type {
u8 ext_type;
};
+struct hns3_pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ void (*handler)(struct net_device *netdev, bool enable);
+};
+
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
@@ -60,6 +65,8 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
};
+#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags)
+
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
@@ -395,6 +402,23 @@ static void hns3_self_test(struct net_device *ndev,
netif_dbg(h, drv, ndev, "self test end\n");
}
+static void hns3_update_limit_promisc_mode(struct net_device *netdev,
+ bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (enable)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+ else
+ clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+
+ hns3_request_update_promisc_mode(handle);
+}
+
+static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
+ { "limit_promisc", hns3_update_limit_promisc_mode }
+};
+
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -411,6 +435,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_TEST:
return ops->get_sset_count(h, stringset);
+ case ETH_SS_PRIV_FLAGS:
+ return HNAE3_PFLAG_MAX;
+
default:
return -EOPNOTSUPP;
}
@@ -464,6 +491,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
char *buff = (char *)data;
+ int i;
if (!ops->get_strings)
return;
@@ -476,6 +504,13 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) {
+ snprintf(buff, ETH_GSTRING_LEN, "%s",
+ hns3_priv_flags[i].name);
+ buff += ETH_GSTRING_LEN;
+ }
+ break;
default:
break;
}
@@ -1517,6 +1552,53 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
}
+static u32 hns3_get_priv_flags(struct net_device *netdev)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ return handle->priv_flags;
+}
+
+static int hns3_check_priv_flags(struct hnae3_handle *h, u32 changed)
+{
+ u32 i;
+
+ for (i = 0; i < HNAE3_PFLAG_MAX; i++)
+ if ((changed & BIT(i)) && !test_bit(i, &h->supported_pflags)) {
+ netdev_err(h->netdev, "%s is unsupported\n",
+ hns3_priv_flags[i].name);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ u32 changed = pflags ^ handle->priv_flags;
+ int ret;
+ u32 i;
+
+ ret = hns3_check_priv_flags(handle, changed);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HNAE3_PFLAG_MAX; i++) {
+ if (changed & BIT(i)) {
+ bool enable = !(handle->priv_flags & BIT(i));
+
+ if (enable)
+ handle->priv_flags |= BIT(i);
+ else
+ handle->priv_flags &= ~BIT(i);
+ hns3_priv_flags[i].handler(netdev, enable);
+ }
+ }
+
+ return 0;
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
@@ -1547,6 +1629,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_link = hns3_get_link,
.get_msglevel = hns3_get_msglevel,
.set_msglevel = hns3_set_msglevel,
+ .get_priv_flags = hns3_get_priv_flags,
+ .set_priv_flags = hns3_set_priv_flags,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1583,6 +1667,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_fecparam = hns3_set_fecparam,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
+ .get_priv_flags = hns3_get_priv_flags,
+ .set_priv_flags = hns3_set_priv_flags,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 85986c7d71fa..b728be4737f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -359,6 +359,8 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
+ set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
}
static enum hclge_cmd_status
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 49cbd954f76b..edfadb5cb1c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -518,6 +518,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_PF_RSS_SIZE_S 0
+#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
#define HCLGE_CFG_CMD_CNT 4
@@ -558,18 +560,23 @@ struct hclge_rss_input_tuple_cmd {
};
#define HCLGE_RSS_CFG_TBL_SIZE 16
+#define HCLGE_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_RSS_CFG_TBL_BW_H 2U
+#define HCLGE_RSS_CFG_TBL_BW_L 8U
struct hclge_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
- u8 rsv[4];
- u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
+ u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
+ u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
};
#define HCLGE_RSS_TC_OFFSET_S 0
-#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_RSS_TC_SIZE_MSB_B 11
#define HCLGE_RSS_TC_SIZE_S 12
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
struct hclge_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
@@ -583,23 +590,26 @@ struct hclge_link_status_cmd {
u8 rsv[23];
};
-struct hclge_promisc_param {
- u8 vf_id;
- u8 enable;
-};
+/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */
+#define HCLGE_PROMISC_EN_UC 1
+#define HCLGE_PROMISC_EN_MC 2
+#define HCLGE_PROMISC_EN_BC 3
+#define HCLGE_PROMISC_TX_EN 4
+#define HCLGE_PROMISC_RX_EN 5
+
+/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */
+#define HCLGE_PROMISC_UC_RX_EN 2
+#define HCLGE_PROMISC_MC_RX_EN 3
+#define HCLGE_PROMISC_BC_RX_EN 4
+#define HCLGE_PROMISC_UC_TX_EN 5
+#define HCLGE_PROMISC_MC_TX_EN 6
+#define HCLGE_PROMISC_BC_TX_EN 7
-#define HCLGE_PROMISC_TX_EN_B BIT(4)
-#define HCLGE_PROMISC_RX_EN_B BIT(5)
-#define HCLGE_PROMISC_EN_B 1
-#define HCLGE_PROMISC_EN_ALL 0x7
-#define HCLGE_PROMISC_EN_UC 0x1
-#define HCLGE_PROMISC_EN_MC 0x2
-#define HCLGE_PROMISC_EN_BC 0x4
struct hclge_promisc_cfg_cmd {
- u8 flag;
+ u8 promisc;
u8 vf_id;
- __le16 rsv0;
- u8 rsv1[20];
+ u8 extend_promisc;
+ u8 rsv0[21];
};
enum hclge_promisc_type {
@@ -822,6 +832,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_TAG_SHIFT_MODE_EN_B 7
#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
@@ -838,6 +849,8 @@ struct hclge_vport_vtag_tx_cfg_cmd {
#define HCLGE_REM_TAG2_EN_B 1
#define HCLGE_SHOW_TAG1_EN_B 2
#define HCLGE_SHOW_TAG2_EN_B 3
+#define HCLGE_DISCARD_TAG1_EN_B 5
+#define HCLGE_DISCARD_TAG2_EN_B 6
struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
@@ -1045,6 +1058,9 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_WR_RULE_ID_B 0
#define HCLGE_FD_AD_RULE_ID_S 1
#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
+#define HCLGE_FD_AD_TC_OVRD_B 16
+#define HCLGE_FD_AD_TC_SIZE_S 17
+#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
struct hclge_fd_ad_config_cmd {
u8 stage;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index f990f6915226..e08d11b8ecf1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -397,32 +397,130 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
return 0;
}
+static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u16 queue_sum = 0;
+ int ret;
+ int i;
+
+ if (!mqprio_qopt->qopt.num_tc) {
+ mqprio_qopt->qopt.num_tc = 1;
+ return 0;
+ }
+
+ ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
+ mqprio_qopt->qopt.prio_tc_map);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
+ if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count must be power of 2\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count should be no more than %u\n",
+ hdev->pf_rss_size_max);
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.offset[i] != queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue offset must start from 0, and being continuous\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
+ dev_err(&hdev->pdev->dev,
+ "qopt tx_rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ queue_sum = mqprio_qopt->qopt.offset[i];
+ queue_sum += mqprio_qopt->qopt.count[i];
+ }
+ if (hdev->vport[0].alloc_tqps < queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count sum should be less than %u\n",
+ hdev->vport[0].alloc_tqps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ int i;
+
+ memset(tc_info, 0, sizeof(*tc_info));
+ tc_info->num_tc = mqprio_qopt->qopt.num_tc;
+ memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
+ memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
+ sizeof_field(struct hnae3_tc_info, tqp_count));
+ memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
+ sizeof_field(struct hnae3_tc_info, tqp_offset));
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
+}
+
+static int hclge_config_tc(struct hclge_dev *hdev,
+ struct hnae3_tc_info *tc_info)
+{
+ int i;
+
+ hclge_tm_schd_info_update(hdev, tc_info->num_tc);
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
+
+ return hclge_map_update(hdev);
+}
+
/* Set up TC for hardware offloaded mqprio in channel mode */
-static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
+static int hclge_setup_tc(struct hnae3_handle *h,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
+ struct hnae3_tc_info old_tc_info;
+ u8 tc = mqprio_qopt->qopt.num_tc;
int ret;
+ /* if client unregistered, it's not allowed to change
+ * mqprio configuration, which may cause uninit ring
+ * fail.
+ */
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return -EBUSY;
+
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
return -EINVAL;
- ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
- if (ret)
- return -EINVAL;
+ ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check mqprio qopt params, ret = %d\n", ret);
+ return ret;
+ }
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
- hclge_tm_schd_info_update(hdev, tc);
- hclge_tm_prio_tc_info_update(hdev, prio_tc);
-
- ret = hclge_tm_init_hw(hdev, false);
- if (ret)
- goto err_out;
+ kinfo = &vport->nic.kinfo;
+ memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
+ hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
+ kinfo->tc_info.mqprio_active = tc > 0;
- ret = hclge_client_setup_tc(hdev);
+ ret = hclge_config_tc(hdev, &kinfo->tc_info);
if (ret)
goto err_out;
@@ -436,6 +534,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
return hclge_notify_init_up(hdev);
err_out:
+ /* roll-back */
+ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+ if (hclge_config_tc(hdev, &kinfo->tc_info))
+ dev_err(&hdev->pdev->dev,
+ "failed to roll back tc configuration\n");
+
hclge_notify_init_up(hdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index bedbc118c4a3..8f6dea5198cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1454,7 +1454,7 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
u16 qsid = vport->qs_offset + i;
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index a9066e6ff697..ca2ab6cf84d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -35,8 +35,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
-#pragma pack(1)
-
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
-#pragma pack()
-
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ca668a47121e..7a164115c845 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1285,9 +1285,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1308,6 +1308,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
HCLGE_CFG_UMV_TBL_SPACE_S);
if (!cfg->umv_space)
cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+
+ cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_PF_RSS_SIZE_M,
+ HCLGE_CFG_PF_RSS_SIZE_S);
+
+ /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
+ * power of 2, instead of reading out directly. This would
+ * be more flexible for future changes and expansions.
+ * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
+ * it does not make sense if PF's field is 0. In this case, PF and VF
+ * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
+ */
+ cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
+ 1U << cfg->pf_rss_size_max :
+ cfg->vf_rss_size_max;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1469,7 +1484,8 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = cfg.rss_size_max;
+ hdev->vf_rss_size_max = cfg.vf_rss_size_max;
+ hdev->pf_rss_size_max = cfg.pf_rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1652,7 +1668,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
}
}
vport->alloc_tqps = alloced;
- kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
/* ensure one to one mapping between irq and queue at default */
@@ -4262,12 +4278,16 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
return 0;
}
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
+static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
- int i, j;
+ u8 rss_msb_oft;
+ u8 rss_msb_val;
int ret;
+ u16 qid;
+ int i;
+ u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
@@ -4278,11 +4298,15 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
req->start_table_index =
cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
-
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
- req->rss_result[j] =
- indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
-
+ for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
+ qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
+ req->rss_qid_l[j] = qid & 0xff;
+ rss_msb_oft =
+ j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
+ rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
+ (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
+ req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4311,6 +4335,8 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
@@ -4601,21 +4627,58 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hdev->rss_size_max;
+ return hdev->pf_rss_size_max;
}
-int hclge_rss_init_hw(struct hclge_dev *hdev)
+static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct hclge_vport *vport = hdev->vport;
- u8 *rss_indir = vport[0].rss_indirection_tbl;
- u16 rss_size = vport[0].alloc_rss_size;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ struct hnae3_tc_info *tc_info;
+ u16 roundup_size;
+ u16 rss_size;
+ int i;
+
+ tc_info = &vport->nic.kinfo.tc_info;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ rss_size = tc_info->tqp_count[i];
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ /* tc_size set to hardware is the log2 of roundup power of two
+ * of rss_size, the acutal queue size is limited by indirection
+ * table.
+ */
+ if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
+ rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
+ rss_size);
+ return -EINVAL;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = tc_info->tqp_offset[i];
+ }
+
+ return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+}
+
+int hclge_rss_init_hw(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 *rss_indir = vport[0].rss_indirection_tbl;
u8 *key = vport[0].rss_hash_key;
u8 hfunc = vport[0].rss_algo;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
- unsigned int i;
int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
@@ -4630,32 +4693,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- /* Each TC have the same queue size, and tc_size set to hardware is
- * the log2 of roundup power of two of rss_size, the acutal queue
- * size is limited by indirection table.
- */
- if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
- dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %u\n",
- rss_size);
- return -EINVAL;
- }
-
- roundup_size = roundup_pow_of_two(rss_size);
- roundup_size = ilog2(roundup_size);
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = rss_size * i;
- }
-
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_init_rss_tc_mode(hdev);
}
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
@@ -4826,61 +4864,56 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
+ bool en_uc, bool en_mc, bool en_bc)
{
+ struct hclge_vport *vport = &hdev->vport[vf_id];
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
+ bool uc_tx_en = en_uc;
+ u8 promisc_cfg = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
req = (struct hclge_promisc_cfg_cmd *)desc.data;
- req->vf_id = param->vf_id;
+ req->vf_id = vf_id;
- /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
- * pdev revision(0x20), new revision support them. The
- * value of this two fields will not return error when driver
- * send command to fireware in revision(0x20).
- */
- req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
- HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
+ if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
+ uc_tx_en = false;
+
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
+ req->extend_promisc = promisc_cfg;
+
+ /* to be compatible with DEVICE_VERSION_V1/2 */
+ promisc_cfg = 0;
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
+ req->promisc = promisc_cfg;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
- "failed to set vport %d promisc mode, ret = %d.\n",
- param->vf_id, ret);
+ "failed to set vport %u promisc mode, ret = %d.\n",
+ vf_id, ret);
return ret;
}
-static void hclge_promisc_param_init(struct hclge_promisc_param *param,
- bool en_uc, bool en_mc, bool en_bc,
- int vport_id)
-{
- if (!param)
- return;
-
- memset(param, 0, sizeof(struct hclge_promisc_param));
- if (en_uc)
- param->enable = HCLGE_PROMISC_EN_UC;
- if (en_mc)
- param->enable |= HCLGE_PROMISC_EN_MC;
- if (en_bc)
- param->enable |= HCLGE_PROMISC_EN_BC;
- param->vf_id = vport_id;
-}
-
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc)
{
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
-
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
+ en_uc_pmc, en_mc_pmc, en_bc_pmc);
}
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
@@ -5015,7 +5048,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
}
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
- key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
key_cfg->outer_sipv6_word_en = 0;
@@ -5092,6 +5125,7 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
struct hclge_fd_ad_data *action)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_fd_ad_config_cmd *req;
struct hclge_desc desc;
u64 ad_data = 0;
@@ -5107,6 +5141,12 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
action->write_rule_id_to_bd);
hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
action->rule_id);
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
+ action->override_tc);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
+ HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
+ }
ad_data <<= 32;
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
@@ -5350,16 +5390,22 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_rule *rule)
{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_fd_ad_data ad_data;
+ memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
ad_data.ad_id = rule->location;
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
ad_data.drop_packet = true;
- ad_data.forward_to_direct_queue = false;
- ad_data.queue_id = 0;
+ } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
+ ad_data.override_tc = true;
+ ad_data.queue_id =
+ kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
+ ad_data.tc_size =
+ ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
} else {
- ad_data.drop_packet = false;
ad_data.forward_to_direct_queue = true;
ad_data.queue_id = rule->queue_id;
}
@@ -5876,6 +5922,14 @@ clear_rule:
return ret;
}
+static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
+}
+
static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5900,6 +5954,12 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EOPNOTSUPP;
}
+ if (hclge_is_cls_flower_active(handle)) {
+ dev_err(&hdev->pdev->dev,
+ "please delete all exist cls flower rules first\n");
+ return -EINVAL;
+ }
+
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused);
@@ -5930,7 +5990,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EINVAL;
}
- action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+ action = HCLGE_FD_ACTION_SELECT_QUEUE;
q_index = ring;
}
@@ -5981,7 +6041,8 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return -EINVAL;
- if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
+ !hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
"Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT;
@@ -6081,7 +6142,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6424,7 +6485,8 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
* arfs should not work
*/
spin_lock_bh(&hdev->fd_rule_lock);
- if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE ||
+ hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
@@ -6452,7 +6514,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
set_bit(bit_id, hdev->fd_bmap);
rule->location = bit_id;
- rule->flow_id = flow_id;
+ rule->arfs.flow_id = flow_id;
rule->queue_id = queue_id;
hclge_fd_build_arfs_rule(&new_tuples, rule);
ret = hclge_fd_config_rule(hdev, rule);
@@ -6496,7 +6558,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
}
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
if (rps_may_expire_flow(handle->netdev, rule->queue_id,
- rule->flow_id, rule->location)) {
+ rule->arfs.flow_id, rule->location)) {
hlist_del_init(&rule->rule_node);
hlist_add_head(&rule->rule_node, &del_list);
hdev->hclge_fd_rule_num--;
@@ -6525,6 +6587,286 @@ static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
#endif
}
+static void hclge_get_cls_key_basic(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(flow, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
+
+ if (ethtype_key == ETH_P_ALL) {
+ ethtype_key = 0;
+ ethtype_mask = 0;
+ }
+ rule->tuples.ether_proto = ethtype_key;
+ rule->tuples_mask.ether_proto = ethtype_mask;
+ rule->tuples.ip_proto = match.key->ip_proto;
+ rule->tuples_mask.ip_proto = match.mask->ip_proto;
+ } else {
+ rule->unused_tuple |= BIT(INNER_IP_PROTO);
+ rule->unused_tuple |= BIT(INNER_ETH_TYPE);
+ }
+}
+
+static void hclge_get_cls_key_mac(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(flow, &match);
+ ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
+ ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
+ ether_addr_copy(rule->tuples.src_mac, match.key->src);
+ ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
+ } else {
+ rule->unused_tuple |= BIT(INNER_DST_MAC);
+ rule->unused_tuple |= BIT(INNER_SRC_MAC);
+ }
+}
+
+static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(flow, &match);
+ rule->tuples.vlan_tag1 = match.key->vlan_id |
+ (match.key->vlan_priority << VLAN_PRIO_SHIFT);
+ rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
+ (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
+ } else {
+ rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+ }
+}
+
+static void hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ u16 addr_type = 0;
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(flow, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(flow, &match);
+ rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->src);
+ rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->dst);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(flow, &match);
+ be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32, IPV6_SIZE);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_IP);
+ rule->unused_tuple |= BIT(INNER_DST_IP);
+ }
+}
+
+static void hclge_get_cls_key_port(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+
+ rule->tuples.src_port = be16_to_cpu(match.key->src);
+ rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
+ rule->tuples.dst_port = be16_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_PORT);
+ rule->unused_tuple |= BIT(INNER_DST_PORT);
+ }
+}
+
+static int hclge_parse_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower,
+ struct hclge_fd_rule *rule)
+{
+ struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_dissector *dissector = flow->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ hclge_get_cls_key_basic(flow, rule);
+ hclge_get_cls_key_mac(flow, rule);
+ hclge_get_cls_key_vlan(flow, rule);
+ hclge_get_cls_key_ip(flow, rule);
+ hclge_get_cls_key_port(flow, rule);
+
+ return 0;
+}
+
+static int hclge_check_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower, int tc)
+{
+ u32 prio = cls_flower->common.prio;
+
+ if (tc < 0 || tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev, "invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ if (prio == 0 ||
+ prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "prio %u should be in range[1, %u]\n",
+ prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+ return -EINVAL;
+ }
+
+ if (test_bit(prio - 1, hdev->fd_bmap)) {
+ dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hclge_add_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower,
+ int tc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
+
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ dev_err(&hdev->pdev->dev,
+ "please remove all exist fd rules via ethtool first\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_check_cls_flower(hdev, cls_flower, tc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check cls flower params, ret = %d\n", ret);
+ return ret;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
+ if (ret)
+ goto err;
+
+ rule->action = HCLGE_FD_ACTION_SELECT_TC;
+ rule->cls_flower.tc = tc;
+ rule->location = cls_flower->common.prio - 1;
+ rule->vf_id = 0;
+ rule->cls_flower.cookie = cls_flower->cookie;
+ rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_clear_arfs_rules(handle);
+
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to add cls flower rule, ret = %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(rule);
+ return ret;
+}
+
+static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
+ unsigned long cookie)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->cls_flower.cookie == cookie)
+ return rule;
+ }
+
+ return NULL;
+}
+
+static int hclge_del_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
+ NULL, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to delete cls flower rule %u, ret = %d\n",
+ rule->location, ret);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+ }
+
+ ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to delete cls flower rule %u in list, ret = %d\n",
+ rule->location, ret);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -8622,6 +8964,8 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
vcfg->insert_tag1_en ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
+ vcfg->tag_shift_mode_en ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
@@ -8659,6 +9003,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
vcfg->vlan1_vlan_prionly ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
+ vcfg->strip_tag1_discard_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
+ vcfg->strip_tag2_discard_en ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
@@ -8686,7 +9034,10 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.default_tag1 = 0;
} else {
- vport->txvlan_cfg.accept_tag1 = false;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+
+ vport->txvlan_cfg.accept_tag1 =
+ ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
vport->txvlan_cfg.insert_tag1_en = true;
vport->txvlan_cfg.default_tag1 = vlan_tag;
}
@@ -8701,16 +9052,21 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.accept_untag2 = true;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag2 = 0;
+ vport->txvlan_cfg.tag_shift_mode_en = true;
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
} else {
vport->rxvlan_cfg.strip_tag1_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
}
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
@@ -9005,10 +9361,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
} else {
vport->rxvlan_cfg.strip_tag1_en = enable;
vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
}
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
vport->rxvlan_cfg.rx_vlan_offload_en = enable;
@@ -9120,6 +9480,7 @@ static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_vlan_info vlan_info;
@@ -9149,16 +9510,25 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
vlan_info.qos = qos;
vlan_info.vlan_proto = ntohs(proto);
- if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
- return hclge_update_port_base_vlan_cfg(vport, state,
- &vlan_info);
- } else {
- ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id, state,
- vlan, qos,
- ntohs(proto));
+ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update port base vlan for vf %d, ret = %d\n",
+ vfid, ret);
return ret;
}
+
+ /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ * VLAN state.
+ */
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+ test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id, state,
+ vlan, qos,
+ ntohs(proto));
+
+ return 0;
}
static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
@@ -10671,12 +11041,10 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclge_get_max_channels(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return min_t(u32, hdev->rss_size_max,
- vport->alloc_tqps / kinfo->num_tc);
+ return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
}
static void hclge_get_channels(struct hnae3_handle *handle,
@@ -10695,7 +11063,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
*alloc_tqps = vport->alloc_tqps;
- *max_rss_size = hdev->rss_size_max;
+ *max_rss_size = hdev->pf_rss_size_max;
}
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
@@ -10763,7 +11131,7 @@ out:
dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
- cur_tqps, kinfo->rss_size * kinfo->num_tc);
+ cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
return ret;
}
@@ -11496,6 +11864,9 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
.get_cmdq_stat = hclge_get_cmdq_stat,
+ .add_cls_flower = hclge_add_cls_flower,
+ .del_cls_flower = hclge_del_cls_flower,
+ .cls_flower_active = hclge_is_cls_flower_active,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index bd17685e4065..50a294dfaff5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -348,7 +348,8 @@ struct hclge_cfg {
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
- u16 rss_size_max;
+ u16 vf_rss_size_max;
+ u16 pf_rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
@@ -564,6 +565,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
HCLGE_FD_EP_ACTIVE,
+ HCLGE_FD_TC_FLOWER_ACTIVE,
};
enum HCLGE_FD_PACKET_TYPE {
@@ -572,8 +574,9 @@ enum HCLGE_FD_PACKET_TYPE {
};
enum HCLGE_FD_ACTION {
- HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_SELECT_QUEUE,
HCLGE_FD_ACTION_DROP_PACKET,
+ HCLGE_FD_ACTION_SELECT_TC,
};
struct hclge_fd_key_cfg {
@@ -618,12 +621,20 @@ struct hclge_fd_rule {
struct hclge_fd_rule_tuples tuples_mask;
u32 unused_tuple;
u32 flow_type;
- u8 action;
- u16 vf_id;
+ union {
+ struct {
+ unsigned long cookie;
+ u8 tc;
+ } cls_flower;
+ struct {
+ u16 flow_id; /* only used for arfs */
+ } arfs;
+ };
u16 queue_id;
+ u16 vf_id;
u16 location;
- u16 flow_id; /* only used for arfs */
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
+ u8 action;
};
struct hclge_fd_ad_data {
@@ -637,6 +648,8 @@ struct hclge_fd_ad_data {
u8 write_rule_id_to_bd;
u8 next_input_key;
u16 rule_id;
+ u16 tc_size;
+ u8 override_tc;
};
enum HCLGE_MAC_NODE_STATE {
@@ -745,7 +758,8 @@ struct hclge_dev {
u16 base_tqp_pid; /* Base task tqp physical id of this PF */
u16 alloc_rss_size; /* Allocated RSS task queue */
- u16 rss_size_max; /* HW defined max RSS task queue */
+ u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
+ u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
@@ -850,15 +864,18 @@ struct hclge_tx_vtag_cfg {
bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */
u16 default_tag2; /* The default outer vlan tag to insert */
+ bool tag_shift_mode_en;
};
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
- u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
- u8 strip_tag1_en; /* Whether strip inner vlan tag */
- u8 strip_tag2_en; /* Whether strip outer vlan tag */
- u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
- u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
+ bool rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */
+ bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */
+ bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */
+ bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
struct hclge_rss_tuple_cfg {
@@ -903,7 +920,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
- u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 3ab6db2588d3..754c09ada901 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -227,6 +227,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
bool en_bc = req->msg.en_bc ? true : false;
bool en_uc = req->msg.en_uc ? true : false;
bool en_mc = req->msg.en_mc ? true : false;
+ struct hnae3_handle *handle = &vport->nic;
int ret;
if (!vport->vf_info.trusted) {
@@ -234,6 +235,12 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
en_mc = false;
}
+ if (req->msg.en_limit_promisc)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+ else
+ clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
+ &handle->priv_flags);
+
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
@@ -371,7 +378,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
status = hclge_update_port_base_vlan_cfg(vport, *state,
vlan_info);
} else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- resp_msg->data[0] = vport->port_base_vlan_cfg.state;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+ /* vf does not need to know about the port based VLAN state
+ * on device HNAE3_DEVICE_VERSION_V3. So always return disable
+ * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
+ * based VLAN state.
+ */
+ resp_msg->data[0] =
+ ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
+ HNAE3_PORT_BASE_VLAN_DISABLE :
+ vport->port_base_vlan_cfg.state;
resp_msg->len = sizeof(u8);
}
@@ -398,7 +414,7 @@ static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
unsigned int i;
- for (i = 0; i < kinfo->num_tc; i++)
+ for (i = 0; i < kinfo->tc_info.num_tc; i++)
resp_msg->data[0] |= BIT(i);
resp_msg->len = sizeof(u8);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index b1026cd1ba0a..82742a64f3b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -565,7 +565,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
false);
@@ -589,23 +589,66 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
return 0;
}
+static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return vport->alloc_tqps / tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
+ continue;
+ if (max_rss_size < tc_info->tqp_count[i])
+ max_rss_size = tc_info->tqp_count[i];
+ }
+
+ return max_rss_size;
+}
+
+static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hclge_dev *hdev = vport->back;
+ int sum = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return kinfo->rss_size * tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
+ sum += tc_info->tqp_count[i];
+ }
+
+ return sum;
+}
+
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
+ u16 vport_max_rss_size;
u16 max_rss_size;
u8 i;
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
- kinfo->num_tc = vport->vport_id ? 1 :
+ kinfo->tc_info.num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
- max_rss_size = min_t(u16, hdev->rss_size_max,
- vport->alloc_tqps / kinfo->num_tc);
+ vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
+ hdev->pf_rss_size_max;
+ max_rss_size = min_t(u16, vport_max_rss_size,
+ hclge_vport_get_max_rss_size(vport));
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
@@ -622,34 +665,36 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
if (!kinfo->req_rss_size)
max_rss_size = min_t(u16, max_rss_size,
(hdev->num_nic_msi - 1) /
- kinfo->num_tc);
+ kinfo->tc_info.num_tc);
/* Set to the maximum specification value (max_rss_size). */
kinfo->rss_size = max_rss_size;
}
- kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
+ kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ /* when enable mqprio, the tc_info has been updated. */
+ if (kinfo->tc_info.mqprio_active)
+ return;
+
for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
- kinfo->tc_info[i].enable = true;
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
- kinfo->tc_info[i].tc = i;
+ if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
+ set_bit(i, &kinfo->tc_info.tc_en);
+ kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
+ kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
} else {
/* Set to default queue if TC is disable */
- kinfo->tc_info[i].enable = false;
- kinfo->tc_info[i].tqp_offset = 0;
- kinfo->tc_info[i].tqp_count = 1;
- kinfo->tc_info[i].tc = 0;
+ clear_bit(i, &kinfo->tc_info.tc_en);
+ kinfo->tc_info.tqp_offset[i] = 0;
+ kinfo->tc_info.tqp_count[i] = 1;
}
}
- memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
- sizeof_field(struct hnae3_knic_private_info, prio_tc));
+ memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
}
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -854,15 +899,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
struct hnae3_queue **tqp = kinfo->tqp;
- struct hnae3_tc_info *v_tc_info;
u32 i, j;
int ret;
- for (i = 0; i < kinfo->num_tc; i++) {
- v_tc_info = &kinfo->tc_info[i];
- for (j = 0; j < v_tc_info->tqp_count; j++) {
- struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
+ for (i = 0; i < tc_info->num_tc; i++) {
+ for (j = 0; j < tc_info->tqp_count[i]; j++) {
+ struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
ret = hclge_tm_q_to_qs_map_cfg(hdev,
hclge_get_queue_id(q),
@@ -887,7 +931,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
struct hnae3_knic_private_info *kinfo =
&vport[k].nic.kinfo;
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(
hdev, vport[k].qs_offset + i, i);
if (ret)
@@ -1001,7 +1045,7 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
u32 i;
int ret;
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
HCLGE_SHAPER_LVL_QSET,
&ir_para, max_tm_rate);
@@ -1123,7 +1167,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
return ret;
/* Qset dwrr */
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
ret = hclge_tm_qs_weight_cfg(
hdev, vport->qs_offset + i,
hdev->tm_info.pg_info[0].tc_dwrr[i]);
@@ -1254,7 +1298,7 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
if (ret)
return ret;
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
@@ -1484,7 +1528,7 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
for (k = 0; k < hdev->num_alloc_vport; k++) {
kinfo = &vport[k].nic.kinfo;
- kinfo->prio_tc[i] = prio_tc[i];
+ kinfo->tc_info.prio_tc[i] = prio_tc[i];
}
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5b2f9a56f1d8..145757cb70f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -14,6 +14,9 @@
#define HCLGEVF_RESET_MAX_FAIL_CNT 5
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay);
+
static struct hnae3_ae_algo ae_algovf;
static struct workqueue_struct *hclgevf_wq;
@@ -430,19 +433,20 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
struct hnae3_knic_private_info *kinfo;
u16 new_tqps = hdev->num_tqps;
unsigned int i;
+ u8 num_tc = 0;
kinfo = &nic->kinfo;
- kinfo->num_tc = 0;
kinfo->num_tx_desc = hdev->num_tx_desc;
kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
- kinfo->num_tc++;
+ num_tc++;
- kinfo->rss_size
- = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
- new_tqps = kinfo->rss_size * kinfo->num_tc;
+ num_tc = num_tc ? num_tc : 1;
+ kinfo->tc_info.num_tc = num_tc;
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
+ new_tqps = kinfo->rss_size * num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
@@ -460,7 +464,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
* and rss size with the actual vector numbers
*/
kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
- kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
kinfo->rss_size);
return 0;
@@ -1146,6 +1150,7 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
+ struct hnae3_handle *handle = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
int ret;
@@ -1154,6 +1159,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
send_msg.en_bc = en_bc_pmc ? 1 : 0;
send_msg.en_uc = en_uc_pmc ? 1 : 0;
send_msg.en_mc = en_mc_pmc ? 1 : 0;
+ send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
+ &handle->priv_flags) ? 1 : 0;
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (ret)
@@ -1180,6 +1187,7 @@ static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ hclgevf_task_schedule(hdev, 0);
}
static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
@@ -3353,7 +3361,7 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->num_tc);
+ hdev->num_tqps / kinfo->tc_info.num_tc);
}
/**
@@ -3396,7 +3404,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle,
kinfo->req_rss_size = new_tqps_num;
max_rss_size = min_t(u16, hdev->rss_size_max,
- hdev->num_tqps / kinfo->num_tc);
+ hdev->num_tqps / kinfo->tc_info.num_tc);
/* Use the user's configuration when it is not larger than
* max_rss_size, otherwise, use the maximum specification value.
@@ -3408,7 +3416,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle,
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
kinfo->rss_size = max_rss_size;
- kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
+ kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
}
static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
@@ -3454,7 +3462,7 @@ out:
dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
- cur_tqps, kinfo->rss_size * kinfo->num_tc);
+ cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
return ret;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 350225bbe0be..9a9b09401d01 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -313,13 +313,7 @@ static void free_rxqs(struct hinic_dev *nic_dev)
static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
{
- int err;
-
- err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
- if (err)
- return err;
-
- return 0;
+ return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
}
static int hinic_rss_init(struct hinic_dev *nic_dev)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cf55c5ea07cb..a2191392ca4f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -398,6 +398,8 @@ failure:
dev_kfree_skb_any(pool->rx_buff[index].skb);
pool->rx_buff[index].skb = NULL;
}
+ adapter->replenish_add_buff_failure += ind_bufp->index;
+ atomic_add(buffers_added, &pool->available);
ind_bufp->index = 0;
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
@@ -419,6 +421,8 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
+
+ netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
}
static void release_stats_buffers(struct ibmvnic_adapter *adapter)
@@ -927,6 +931,7 @@ static int ibmvnic_login(struct net_device *netdev)
__ibmvnic_set_mac(netdev, adapter->mac_addr);
+ netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
return 0;
}
@@ -1358,6 +1363,10 @@ static int ibmvnic_close(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
+ adapter->state, adapter->failover_pending,
+ adapter->force_reset_recovery);
+
/* If device failover is pending, just set device state and return.
* Device operation will be handled by reset routine.
*/
@@ -2013,8 +2022,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
int i, rc;
- netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
- rwi->reset_reason);
+ netdev_dbg(adapter->netdev,
+ "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
+ adapter->state, adapter->failover_pending,
+ rwi->reset_reason, reset_state);
rtnl_lock();
/*
@@ -2173,6 +2184,8 @@ out:
adapter->state = reset_state;
rtnl_unlock();
+ netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
+ adapter->state, adapter->failover_pending, rc);
return rc;
}
@@ -2242,6 +2255,8 @@ out:
/* restore adapter state if reset failed */
if (rc)
adapter->state = reset_state;
+ netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
+ adapter->state, adapter->failover_pending, rc);
return rc;
}
@@ -2352,6 +2367,11 @@ static void __ibmvnic_reset(struct work_struct *work)
}
clear_bit_unlock(0, &adapter->resetting);
+
+ netdev_dbg(adapter->netdev,
+ "[S:%d FRR:%d WFR:%d] Done processing resets\n",
+ adapter->state, adapter->force_reset_recovery,
+ adapter->wait_for_reset);
}
static void __ibmvnic_delayed_reset(struct work_struct *work)
@@ -2397,7 +2417,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
- netdev_dbg(netdev, "Skipping matching reset\n");
+ netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
+ reason);
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b30f00891c03..128ab6898070 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
/* Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
+ mac_data &= ~BIT(28);
ew32(FEXTNVM9, mac_data);
/* Enable K1 off to enable mPHY Power Gating */
mac_data = er32(FEXTNVM6);
mac_data |= BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Enable mPHY power gating for any link and speed */
mac_data = er32(FEXTNVM8);
@@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
/* Disable K1 off */
mac_data = er32(FEXTNVM6);
mac_data &= ~BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Disable Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
+ mac_data |= BIT(28);
ew32(FEXTNVM9, mac_data);
/* Cancel not waking from dynamic
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9f73cd7aee09..4aca637d4a23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1861,6 +1861,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1883,7 +1884,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1894,7 +1896,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
@@ -1953,16 +1955,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -2113,14 +2123,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2347,6 +2359,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
@@ -2389,7 +2402,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2432,7 +2445,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a0723831c4e4..56725356a17b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -304,7 +304,6 @@ struct ice_vsi {
u8 irqs_ready:1;
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
- u8 vlan_ena:1;
u16 num_vlan;
/* queue information */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 7db5fd977367..6d7e7dd0ebe2 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -904,8 +904,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Failed to get scheduler allocated resources\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
goto err_unroll_alloc;
}
@@ -925,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
- goto err_unroll_sched;
+ dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
+ status);
/* Initialize port_info struct with link information */
status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
@@ -1044,8 +1044,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
}
if (cnt == grst_timeout) {
- ice_debug(hw, ICE_DBG_INIT,
- "Global reset polling failed to complete.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
@@ -1063,16 +1062,14 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, GLNVM_ULD) & uld_mask;
if (reg == uld_mask) {
- ice_debug(hw, ICE_DBG_INIT,
- "Global reset processes done. %d\n", cnt);
+ ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
break;
}
mdelay(10);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
- ice_debug(hw, ICE_DBG_INIT,
- "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
return ICE_ERR_RESET_FAILED;
}
@@ -1124,8 +1121,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
- ice_debug(hw, ICE_DBG_INIT,
- "PF reset polling failed to complete.\n");
+ ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
@@ -1578,8 +1574,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
goto ice_acquire_res_exit;
if (status)
- ice_debug(hw, ICE_DBG_RES,
- "resource %d acquire type %d failed.\n", res, access);
+ ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
/* If necessary, poll until the current lock owner timeouts */
timeout = time_left;
@@ -1602,11 +1597,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
ice_acquire_res_exit:
if (status == ICE_ERR_AQ_NO_WORK) {
if (access == ICE_RES_WRITE)
- ice_debug(hw, ICE_DBG_RES,
- "resource indicates no work to do.\n");
+ ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES,
- "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
}
return status;
}
@@ -1792,66 +1785,53 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
switch (cap) {
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: valid_functions (bitmap) = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: sr_iov_1_1 = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: active_tc_bitmap = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d\n", prefix, caps->maxtc);
+ ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_size = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_entry_width = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_rxq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rxq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_txq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: txq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_msix_vectors = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: msix_vector_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_PENDING_NVM_VER:
@@ -1904,8 +1884,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
if (hw->dev_caps.num_funcs > 4) {
/* Max 4 TCs per port */
caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT,
- "reducing maxtc to %d (based on #ports)\n",
+ ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
}
}
@@ -1973,11 +1952,9 @@ ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
GLQF_FD_SIZE_FD_BSIZE_S;
func_p->fd_fltr_best_effort = val;
- ice_debug(hw, ICE_DBG_INIT,
- "func caps: fd_fltr_guar = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
func_p->fd_fltr_guar);
- ice_debug(hw, ICE_DBG_INIT,
- "func caps: fd_fltr_best_effort = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
func_p->fd_fltr_best_effort);
}
@@ -2026,8 +2003,7 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
default:
/* Don't list common capabilities as unknown */
if (!found)
- ice_debug(hw, ICE_DBG_INIT,
- "func caps: unknown capability[%d]: 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
i, cap);
break;
}
@@ -2160,8 +2136,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
default:
/* Don't list common capabilities as unknown */
if (!found)
- ice_debug(hw, ICE_DBG_INIT,
- "dev caps: unknown capability[%d]: 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
i, cap);
break;
}
@@ -2618,8 +2593,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
- ice_debug(hw, ICE_DBG_PHY,
- "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
cfg->caps);
cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
@@ -3067,8 +3041,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
status = ice_update_link_info(pi);
if (status)
- ice_debug(pi->hw, ICE_DBG_LINK,
- "get link status error, status = %d\n",
+ ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
status);
}
@@ -3793,8 +3766,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
* of the endianness of the machine.
*/
if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX,
- "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+ ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
f, ce_info[f].width, ce_info[f].size_of);
continue;
}
@@ -4261,10 +4233,6 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
*/
bool ice_fw_supports_link_override(struct ice_hw *hw)
{
- /* Currently, only supported for E810 devices */
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
return true;
@@ -4296,8 +4264,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read link override TLV.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
return status;
}
@@ -4308,8 +4275,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
/* link options first */
status = ice_read_sr_word(hw, tlv_start, &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
@@ -4320,8 +4286,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
status = ice_read_sr_word(hw, offset, &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override phy config.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
return status;
}
ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
@@ -4331,8 +4296,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
status = ice_read_sr_word(hw, (offset + i), &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
/* shift 16 bits at a time to fill 64 bits */
@@ -4345,8 +4309,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
status = ice_read_sr_word(hw, (offset + i), &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
/* shift 16 bits at a time to fill 64 bits */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 1f46a7828be8..4db12d1f5808 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -717,8 +717,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status != ICE_ERR_AQ_FW_CRITICAL)
break;
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Retry Admin Queue init due to FW critical error\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -813,8 +812,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
memset(details, 0, sizeof(*details));
ntc++;
@@ -852,8 +850,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
len = le16_to_cpu(cq_desc->datalen);
- ice_debug(hw, ICE_DBG_AQ_DESC,
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(cq_desc->opcode),
le16_to_cpu(cq_desc->flags),
le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
@@ -925,8 +922,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send queue not initialized.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
}
@@ -938,8 +934,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) {
if (buf_size > cq->sq_buf_size) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Invalid buffer size for Control Send queue: %d.\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
status = ICE_ERR_INVAL_SIZE;
goto sq_send_command_error;
@@ -952,8 +947,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
val = rd32(hw, cq->sq.head);
if (val >= cq->num_sq_entries) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "head overrun at %d in the Control Send Queue ring\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
@@ -971,8 +965,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* called in a separate thread in case of asynchronous completions.
*/
if (ice_clean_sq(hw, cq) == 0) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Error: Control Send Queue is full.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL;
goto sq_send_command_error;
}
@@ -1000,8 +993,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
- ice_debug(hw, ICE_DBG_AQ_DESC,
- "ATQ: Control Send queue desc and buffer:\n");
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
@@ -1026,8 +1018,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
u16 copy_size = le16_to_cpu(desc->datalen);
if (copy_size > buf_size) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Return len %d > than buf len %d\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
status = ICE_ERR_AQ_ERROR;
} else {
@@ -1036,8 +1027,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
retval = le16_to_cpu(desc->retval);
if (retval) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
le16_to_cpu(desc->opcode),
retval);
@@ -1050,8 +1040,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
cq->sq_last_status = (enum ice_aq_err)retval;
}
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "ATQ: desc and buffer writeback:\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
ice_debug_cq(hw, (void *)desc, buf, buf_size);
@@ -1067,8 +1056,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL;
} else {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue Writeback timeout.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
}
}
@@ -1124,8 +1112,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive queue not initialized.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY;
goto clean_rq_elem_err;
}
@@ -1147,8 +1134,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode),
cq->rq_last_status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 9095b4d274ad..f5e81b555353 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -709,8 +709,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw,
if (!status)
mutex_lock(&ice_global_cfg_lock_sw);
else if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG,
- "Global config lock: No work to do\n");
+ ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
}
@@ -909,8 +908,7 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
last, &offset, &info, NULL);
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Update pkg failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
@@ -988,8 +986,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
/* Save AQ status from download package */
hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Pkg download failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
break;
@@ -1083,8 +1080,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
meta_seg->pkg_name);
} else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find metadata segment in driver package\n");
+ ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n");
return ICE_ERR_CFG;
}
@@ -1101,8 +1097,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr->seg_format_ver.draft,
seg_hdr->seg_id);
} else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find ice segment in driver package\n");
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
return ICE_ERR_CFG;
}
@@ -1318,8 +1313,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
(*seg)->hdr.seg_format_ver.minor >
pkg->pkg_info[i].ver.minor) {
status = ICE_ERR_FW_DDP_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT,
- "OS package is not compatible with NVM.\n");
+ ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
}
/* done processing NVM package so break */
break;
@@ -1387,8 +1381,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT,
- "package previously loaded - no work.\n");
+ ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
status = 0;
}
@@ -3261,8 +3254,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
if (ent->profile_cookie == hdl)
return true;
- ice_debug(hw, ICE_DBG_INIT,
- "Characteristic list for VSI group %d not found.\n",
+ ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
vsig);
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index eadc85aee389..89a0cef20506 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -708,57 +708,64 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- struct ice_flow_prof_params params;
+ struct ice_flow_prof_params *params;
enum ice_status status;
u8 i;
if (!prof)
return ICE_ERR_BAD_PTR;
- memset(&params, 0, sizeof(params));
- params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
- GFP_KERNEL);
- if (!params.prof)
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
return ICE_ERR_NO_MEMORY;
+ params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
+ GFP_KERNEL);
+ if (!params->prof) {
+ status = ICE_ERR_NO_MEMORY;
+ goto free_params;
+ }
+
/* initialize extraction sequence to all invalid (0xff) */
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
- params.es[i].prot_id = ICE_PROT_INVALID;
- params.es[i].off = ICE_FV_OFFSET_INVAL;
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
}
- params.blk = blk;
- params.prof->id = prof_id;
- params.prof->dir = dir;
- params.prof->segs_cnt = segs_cnt;
+ params->blk = blk;
+ params->prof->id = prof_id;
+ params->prof->dir = dir;
+ params->prof->segs_cnt = segs_cnt;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
*/
for (i = 0; i < segs_cnt; i++)
- memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
+ memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs));
- status = ice_flow_proc_segs(hw, &params);
+ status = ice_flow_proc_segs(hw, params);
if (status) {
- ice_debug(hw, ICE_DBG_FLOW,
- "Error processing a flow's packet segments\n");
+ ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
goto out;
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ params->es);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
}
- INIT_LIST_HEAD(&params.prof->entries);
- mutex_init(&params.prof->entries_lock);
- *prof = params.prof;
+ INIT_LIST_HEAD(&params->prof->entries);
+ mutex_init(&params->prof->entries_lock);
+ *prof = params->prof;
out:
if (status)
- devm_kfree(ice_hw_to_dev(hw), params.prof);
+ devm_kfree(ice_hw_to_dev(hw), params->prof);
+free_params:
+ kfree(params);
return status;
}
@@ -827,8 +834,7 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
if (!status)
set_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile add failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
status);
}
@@ -859,8 +865,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
if (!status)
clear_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile remove failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 2dea4d0e9415..c52b9bb0e3ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -224,7 +224,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->vlan_ena) {
+ if (vsi->num_vlan > 1) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
set_promisc);
} else {
@@ -326,7 +326,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->vlan_ena)
+ if (vsi->num_vlan > 1)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -340,7 +340,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->vlan_ena)
+ if (vsi->num_vlan > 1)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -667,7 +667,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
speed = "100 M";
break;
default:
- speed = "Unknown";
+ speed = "Unknown ";
break;
}
@@ -3116,10 +3116,8 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
* packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!ret) {
- vsi->vlan_ena = true;
+ if (!ret)
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
- }
return ret;
}
@@ -3158,7 +3156,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
- vsi->vlan_ena = false;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 5903a36763de..f729cd0c6224 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -55,7 +55,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
*
* Reads a portion of the NVM, as a flat memory space. This function correctly
* breaks read requests across Shadow RAM sectors and ensures that no single
- * read request exceeds the maximum 4Kb read for a single AdminQ command.
+ * read request exceeds the maximum 4KB read for a single AdminQ command.
*
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
@@ -73,18 +73,17 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: requested offset is beyond Shadow RAM limit\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
do {
u32 read_size, sector_offset;
- /* ice_aq_read_nvm cannot read more than 4Kb at a time.
+ /* ice_aq_read_nvm cannot read more than 4KB at a time.
* Additionally, a read from the Shadow RAM may not cross over
* a sector boundary. Conveniently, the sector size is also
- * 4Kb.
+ * 4KB.
*/
sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
@@ -196,7 +195,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
* Shadow RAM sector restrictions necessary when reading from the NVM.
*/
status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
- (u8 *)&data_local, true);
+ (__force u8 *)&data_local, true);
if (status)
return status;
@@ -397,8 +396,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
return status;
}
@@ -406,8 +404,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
* (Combo Image Version High and Combo Image Version Low)
*/
if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT,
- "Invalid Boot Configuration Block TLV size.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
@@ -542,14 +539,12 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
- ice_debug(hw, ICE_DBG_NVM,
- "%s: New upper bound of %u bytes\n",
+ ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
max_size = offset;
} else if (!status) {
- ice_debug(hw, ICE_DBG_NVM,
- "%s: New lower bound of %u bytes\n",
+ ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
__func__, offset);
min_size = offset;
} else {
@@ -558,8 +553,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
}
}
- ice_debug(hw, ICE_DBG_NVM,
- "Predicted flash size is %u bytes\n", max_size);
+ ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
hw->nvm.flash_size = max_size;
@@ -600,15 +594,13 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
} else {
/* Blank programming mode */
nvm->blank_nvm_mode = true;
- ice_debug(hw, ICE_DBG_NVM,
- "NVM init error: unsupported blank mode.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read DEV starter version.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
return status;
}
nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
@@ -629,37 +621,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM init error: failed to discover flash size.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
return status;
}
- switch (hw->device_id) {
- /* the following devices do not have boot_cfg_tlv yet */
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- return status;
- default:
- break;
- }
-
status = ice_get_orom_ver_info(hw);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 44a228530253..f0912e44d4ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -164,8 +164,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
parent = ice_sched_find_node_by_teid(pi->root,
le32_to_cpu(info->parent_teid));
if (!parent) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Parent Node not found for parent_teid=0x%x\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid));
return ICE_ERR_PARAM;
}
@@ -704,8 +703,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
/* On error, free mem required */
list_del(&rl_prof_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
@@ -863,8 +861,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
for (i = 0; i < num_nodes; i++) {
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "add nodes in SW DB failed status =%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
}
@@ -872,8 +869,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
teid = le32_to_cpu(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
if (!new_node) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Node is missing for teid =%d\n", teid);
+ ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
break;
}
@@ -1830,8 +1826,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
continue;
if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "VSI has leaf nodes in TC %d\n", i);
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE;
goto exit_sched_rm_vsi_cfg;
}
@@ -1896,8 +1891,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln], list_entry) {
if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Removed rl profile\n");
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
}
}
}
@@ -2441,8 +2435,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE)
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
if (status == ICE_ERR_IN_USE)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index c3a6c41385ee..c33612132ddf 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -537,8 +537,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
- ice_debug(pi->hw, ICE_DBG_SW,
- "incorrect VSI/port type received\n");
+ ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
break;
}
}
@@ -1476,8 +1475,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
tmp_fltr_info.vsi_handle = rem_vsi_handle;
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr_info.fwd_id.hw_vsi_id, status);
return status;
}
@@ -1493,8 +1491,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to remove VSI list %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status;
}
@@ -1853,8 +1850,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
*/
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
- ice_debug(hw, ICE_DBG_SW,
- "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
status = ICE_ERR_CFG;
goto exit;
}
@@ -2740,8 +2736,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status)
- ice_debug(hw, ICE_DBG_SW,
- "counter resource could not be freed\n");
+ ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
kfree(buf);
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 77d5eae6b4c2..a2d0aad8cfdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buf->page);
+#else
+ 0;
+#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
+ int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- ice_put_rx_buf(rx_ring, NULL);
+ ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@@ -1187,7 +1200,7 @@ construct_skb:
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 0286d2fceee4..aaa954aae574 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -138,6 +138,8 @@ struct vf_mac_filter {
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
@@ -247,6 +249,9 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+/* TX resources are shared between XDP and netstack
+ * and we need to tag the buffer type to distinguish them
+ */
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a4ef4934fcf..03f78fdb0dcd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
- int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
- struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- if (frame_size > igb_rx_bufsz(ring))
+ if (frame_size > igb_rx_bufsz(ring)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "The RX buffer size is too small for the frame size");
+ netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
+ igb_rx_bufsz(ring), frame_size);
return -EINVAL;
+ }
}
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return igb_xdp_setup(dev, xdp->prog);
+ return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
@@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
- return -ENXIO;
+ return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
+ adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
@@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
@@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
- netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ netdev_warn(adapter->netdev,
+ "Requested MTU size is not supported with XDP. Max frame size is %d\n",
+ max_frame);
return -EINVAL;
}
}
@@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
@@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
@@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index fd37d2c203af..d0700d48ecf9 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -213,6 +213,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I226_K:
case IGC_DEV_ID_I225_LMVP:
case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I226_LM:
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 55dae7c4703f..9da5f83ce456 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -23,6 +23,7 @@
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I225_LMVP 0x5502
+#define IGC_DEV_ID_I226_K 0x5504
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b673ac1199bb..afd6a62da29d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 50e6b8b6ba7b..393d1c2cd853 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -2055,9 +2063,10 @@ skip_sync:
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 67471cb2b129..24c2bfdfec4e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -497,18 +497,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr, err;
+ int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
/* Initialize admin queue */
- err = npa_aq_init(rvu, &hw->block[blkaddr]);
- if (err)
- return err;
-
- return 0;
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
}
void rvu_npa_freemem(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 7bcf5246350f..56390a664517 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -12,7 +12,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct ptp_req *req;
- int err;
if (!ptp->nic)
return -ENODEV;
@@ -24,11 +23,7 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
req->op = PTP_OP_ADJFINE;
req->scaled_ppm = scaled_ppm;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return err;
-
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
static u64 ptp_cc_read(const struct cyclecounter *cc)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 0f20e0788cce..25dd903a3e92 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -93,15 +93,10 @@ static int prestera_port_open(struct net_device *dev)
static int prestera_port_close(struct net_device *dev)
{
struct prestera_port *port = netdev_priv(dev);
- int err;
netif_stop_queue(dev);
- err = prestera_hw_port_state_set(port, false);
- if (err)
- return err;
-
- return 0;
+ return prestera_hw_port_state_set(port, false);
}
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
@@ -318,8 +313,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
- if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+ err = -EINVAL;
goto err_port_init;
+ }
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 0fe97155dd8f..6bc9f2487384 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -241,17 +241,13 @@ out:
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- int err, path;
+ int path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
/* Setup proper MUXes along the path */
- err = mtk_eth_mux_setup(eth, path);
- if (err)
- return err;
-
- return 0;
+ return mtk_eth_mux_setup(eth, path);
}
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3b8576b9c2f9..f7053a74e6a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -462,19 +462,14 @@ EXPORT_SYMBOL_GPL(mlx4_cq_free);
int mlx4_init_cq_table(struct mlx4_dev *dev)
{
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
- int err;
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
- err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
- dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
- if (err)
- return err;
-
- return 0;
+ return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
+ dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
}
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 157f7eef92f1..32aad4d32b88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
- en_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+ en_dbg(DRV, priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+ }
}
@@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
+ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
+ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- watchdog_task);
+ restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@@ -2376,7 +2380,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- /* NIC is probably restarting - let watchdog task reset
+ /* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@@ -2385,7 +2389,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+ &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@@ -2791,7 +2797,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
@@ -3164,7 +3171,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index b15ec32758a3..31b74bddb7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1c50d0f22199..17f2b1919378 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -251,6 +251,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
+enum {
+ MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@@ -297,6 +301,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
+ unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@@ -510,6 +515,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
+enum {
+ MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -575,7 +584,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct watchdog_task;
+ struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@@ -620,6 +629,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
+ unsigned long state;
};
enum mlx4_en_wol {
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index cbe4d9746ddf..dd890f5d7b72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -272,19 +272,14 @@ EXPORT_SYMBOL_GPL(mlx4_srq_query);
int mlx4_init_srq_table(struct mlx4_dev *dev)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- int err;
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
- err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
- dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
- if (err)
- return err;
-
- return 0;
+ return mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
+ dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
}
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 99f1ec3b2575..6e4d7bb7fea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -6,6 +6,7 @@
config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
+ select AUXILIARY_BUS
select NET_DEVLINK
depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
@@ -198,6 +199,7 @@ config MLX5_EN_TLS
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
+ select CRC32
default y
help
Build support for software-managed steering in the NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 83a67ca43a41..77961643d5a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 1972ddd12704..b051417ede67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -31,313 +31,484 @@
*/
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/mlx5_ifc_vdpa.h>
#include "mlx5_core.h"
-static LIST_HEAD(intf_list);
-static LIST_HEAD(mlx5_dev_list);
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
+static DEFINE_IDA(mlx5_adev_ida);
-struct mlx5_device_context {
- struct list_head list;
- struct mlx5_interface *intf;
- void *context;
- unsigned long state;
-};
+static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
+ return false;
-enum {
- MLX5_INTERFACE_ADDED,
- MLX5_INTERFACE_ATTACHED,
-};
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return false;
+ if (!is_mdev_switchdev_mode(dev))
+ return false;
-void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+ return true;
+}
+
+static bool is_eth_supported(struct mlx5_core_dev *dev)
{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+ if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
+ return false;
- if (!mlx5_lag_intf_add(intf, priv))
- return;
+ if (is_eth_rep_supported(dev))
+ return false;
- dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx)
- return;
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
- dev_ctx->intf = intf;
+ if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
+ return false;
+ }
- dev_ctx->context = intf->add(dev);
- if (dev_ctx->context) {
- set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
- if (intf->attach)
- set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
+ mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
+ return false;
+ }
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
+ if (!MLX5_CAP_ETH(dev, csum_cap)) {
+ mlx5_core_warn(dev, "Missing csum_cap capability\n");
+ return false;
}
- if (!dev_ctx->context)
- kfree(dev_ctx);
+ if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
+ mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
+ return false;
+ }
+
+ if (!MLX5_CAP_ETH(dev, vlan_cap)) {
+ mlx5_core_warn(dev, "Missing vlan_cap capability\n");
+ return false;
+ }
+
+ if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
+ mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
+ return false;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.max_ft_level) < 3) {
+ mlx5_core_warn(dev, "max_ft_level < 3\n");
+ return false;
+ }
+
+ if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
+ mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
+ if (!MLX5_CAP_GEN(dev, cq_moderation))
+ mlx5_core_warn(dev, "CQ moderation is not supported\n");
+
+ return true;
}
-static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
- struct mlx5_priv *priv)
+static bool is_vnet_supported(struct mlx5_core_dev *dev)
{
- struct mlx5_device_context *dev_ctx;
+ if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
+ return false;
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf)
- return dev_ctx;
- return NULL;
+ if (mlx5_core_is_pf(dev))
+ return false;
+
+ if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
+ return false;
+
+ if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
+ return false;
+
+ if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
+ return false;
+
+ return true;
}
-void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
+ return false;
- dev_ctx = mlx5_get_device(intf, priv);
- if (!dev_ctx)
- return;
+ if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
+ return false;
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
+ if (!is_eth_rep_supported(dev))
+ return false;
- if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- intf->remove(dev, dev_ctx->context);
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return false;
- kfree(dev_ctx);
+ if (!is_mdev_switchdev_mode(dev))
+ return false;
+
+ if (mlx5_core_mp_enabled(dev))
+ return false;
+
+ return true;
}
-static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+static bool is_mp_supported(struct mlx5_core_dev *dev)
{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = mlx5_get_device(intf, priv);
- if (!dev_ctx)
- return;
-
- if (intf->attach) {
- if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- return;
- if (intf->attach(dev, dev_ctx->context))
- return;
- set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
- } else {
- if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- return;
- dev_ctx->context = intf->add(dev);
- if (!dev_ctx->context)
- return;
- set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
- }
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
+ return false;
+
+ if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
+ return false;
+
+ if (is_ib_rep_supported(dev))
+ return false;
+
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
+ if (!mlx5_core_is_mp_slave(dev))
+ return false;
+
+ return true;
}
-void mlx5_attach_device(struct mlx5_core_dev *dev)
+static bool is_ib_supported(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
+ return false;
- mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_attach_interface(intf, priv);
- mutex_unlock(&mlx5_intf_mutex);
+ if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
+ return false;
+
+ if (is_ib_rep_supported(dev))
+ return false;
+
+ if (is_mp_supported(dev))
+ return false;
+
+ return true;
+}
+
+enum {
+ MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ MLX5_INTERFACE_PROTOCOL_ETH,
+
+ MLX5_INTERFACE_PROTOCOL_IB_REP,
+ MLX5_INTERFACE_PROTOCOL_MPIB,
+ MLX5_INTERFACE_PROTOCOL_IB,
+
+ MLX5_INTERFACE_PROTOCOL_VNET,
+};
+
+static const struct mlx5_adev_device {
+ const char *suffix;
+ bool (*is_supported)(struct mlx5_core_dev *dev);
+} mlx5_adev_devices[] = {
+ [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
+ .is_supported = &is_vnet_supported },
+ [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
+ .is_supported = &is_ib_supported },
+ [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
+ .is_supported = &is_eth_supported },
+ [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
+ .is_supported = &is_eth_rep_supported },
+ [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
+ .is_supported = &is_ib_rep_supported },
+ [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
+ .is_supported = &is_mp_supported },
+};
+
+int mlx5_adev_idx_alloc(void)
+{
+ return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
}
-static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+void mlx5_adev_idx_free(int idx)
{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = mlx5_get_device(intf, priv);
- if (!dev_ctx)
- return;
-
- if (intf->detach) {
- if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- return;
- intf->detach(dev, dev_ctx->context);
- clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
- } else {
- if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- return;
- intf->remove(dev, dev_ctx->context);
- clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
- }
+ ida_free(&mlx5_adev_ida, idx);
}
-void mlx5_detach_device(struct mlx5_core_dev *dev)
+int mlx5_adev_init(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
- mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_detach_interface(intf, priv);
- mutex_unlock(&mlx5_intf_mutex);
+ priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
+ sizeof(struct mlx5_adev *), GFP_KERNEL);
+ if (!priv->adev)
+ return -ENOMEM;
+
+ return 0;
}
-bool mlx5_device_registered(struct mlx5_core_dev *dev)
+void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv;
- bool found = false;
+ struct mlx5_priv *priv = &dev->priv;
- mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list)
- if (priv == &dev->priv)
- found = true;
- mutex_unlock(&mlx5_intf_mutex);
+ kfree(priv->adev);
+}
+
+static void adev_release(struct device *dev)
+{
+ struct mlx5_adev *mlx5_adev =
+ container_of(dev, struct mlx5_adev, adev.dev);
+ struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
+ int idx = mlx5_adev->idx;
- return found;
+ kfree(mlx5_adev);
+ priv->adev[idx] = NULL;
}
-void mlx5_register_device(struct mlx5_core_dev *dev)
+static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
+{
+ const char *suffix = mlx5_adev_devices[idx].suffix;
+ struct auxiliary_device *adev;
+ struct mlx5_adev *madev;
+ int ret;
+
+ madev = kzalloc(sizeof(*madev), GFP_KERNEL);
+ if (!madev)
+ return ERR_PTR(-ENOMEM);
+
+ adev = &madev->adev;
+ adev->id = dev->priv.adev_idx;
+ adev->name = suffix;
+ adev->dev.parent = dev->device;
+ adev->dev.release = adev_release;
+ madev->mdev = dev;
+ madev->idx = idx;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(madev);
+ return ERR_PTR(ret);
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ERR_PTR(ret);
+ }
+ return madev;
+}
+
+static void del_adev(struct auxiliary_device *adev)
+{
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+int mlx5_attach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
+ struct auxiliary_device *adev;
+ struct auxiliary_driver *adrv;
+ int ret = 0, i;
mutex_lock(&mlx5_intf_mutex);
- list_add_tail(&priv->dev_list, &mlx5_dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
+ for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
+ if (!priv->adev[i]) {
+ bool is_supported = false;
+
+ if (mlx5_adev_devices[i].is_supported)
+ is_supported = mlx5_adev_devices[i].is_supported(dev);
+
+ if (!is_supported)
+ continue;
+
+ priv->adev[i] = add_adev(dev, i);
+ if (IS_ERR(priv->adev[i])) {
+ ret = PTR_ERR(priv->adev[i]);
+ priv->adev[i] = NULL;
+ }
+ } else {
+ adev = &priv->adev[i]->adev;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+
+ if (adrv->resume)
+ ret = adrv->resume(adev);
+ }
+ if (ret) {
+ mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
+ i, mlx5_adev_devices[i].suffix);
+
+ break;
+ }
+ }
mutex_unlock(&mlx5_intf_mutex);
+ return ret;
}
-void mlx5_unregister_device(struct mlx5_core_dev *dev)
+void mlx5_detach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
+ struct auxiliary_device *adev;
+ struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
+ int i;
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry_reverse(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
+ for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
+ if (!priv->adev[i])
+ continue;
+
+ adev = &priv->adev[i]->adev;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+
+ if (adrv->suspend) {
+ adrv->suspend(adev, pm);
+ continue;
+ }
+
+ del_adev(&priv->adev[i]->adev);
+ priv->adev[i] = NULL;
+ }
mutex_unlock(&mlx5_intf_mutex);
}
-int mlx5_register_interface(struct mlx5_interface *intf)
+int mlx5_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
+ int ret;
mutex_lock(&mlx5_intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list)
- mlx5_add_device(intf, priv);
+ dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
+ ret = mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex);
+ if (ret)
+ mlx5_unregister_device(dev);
- return 0;
+ return ret;
}
-EXPORT_SYMBOL(mlx5_register_interface);
-void mlx5_unregister_interface(struct mlx5_interface *intf)
+void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv;
-
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
+ dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
+ mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex);
}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-/* Must be called with intf_mutex held */
-static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
+static int add_drivers(struct mlx5_core_dev *dev)
{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_interface *intf;
- bool found = false;
-
- list_for_each_entry(intf, &intf_list, list) {
- if (intf->protocol == protocol) {
- dev_ctx = mlx5_get_device(intf, &mdev->priv);
- if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- found = true;
- break;
+ struct mlx5_priv *priv = &dev->priv;
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
+ bool is_supported = false;
+
+ if (priv->adev[i])
+ continue;
+
+ if (mlx5_adev_devices[i].is_supported)
+ is_supported = mlx5_adev_devices[i].is_supported(dev);
+
+ if (!is_supported)
+ continue;
+
+ priv->adev[i] = add_adev(dev, i);
+ if (IS_ERR(priv->adev[i])) {
+ mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
+ i, mlx5_adev_devices[i].suffix);
+ /* We continue to rescan drivers and leave to the caller
+ * to make decision if to release everything or continue.
+ */
+ ret = PTR_ERR(priv->adev[i]);
+ priv->adev[i] = NULL;
}
}
-
- return found;
+ return ret;
}
-void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
+static void delete_drivers(struct mlx5_core_dev *dev)
{
- mutex_lock(&mlx5_intf_mutex);
- if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
- mlx5_remove_dev_by_protocol(mdev, protocol);
- mlx5_add_dev_by_protocol(mdev, protocol);
+ struct mlx5_priv *priv = &dev->priv;
+ bool delete_all;
+ int i;
+
+ delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
+
+ for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
+ bool is_supported = false;
+
+ if (!priv->adev[i])
+ continue;
+
+ if (mlx5_adev_devices[i].is_supported && !delete_all)
+ is_supported = mlx5_adev_devices[i].is_supported(dev);
+
+ if (is_supported)
+ continue;
+
+ del_adev(&priv->adev[i]->adev);
+ priv->adev[i] = NULL;
}
- mutex_unlock(&mlx5_intf_mutex);
}
-/* Must be called with intf_mutex held */
-void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+/* This function is used after mlx5_core_dev is reconfigured.
+ */
+int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
- struct mlx5_interface *intf;
+ struct mlx5_priv *priv = &dev->priv;
- list_for_each_entry(intf, &intf_list, list)
- if (intf->protocol == protocol) {
- mlx5_add_device(intf, &dev->priv);
- break;
- }
-}
+ lockdep_assert_held(&mlx5_intf_mutex);
-/* Must be called with intf_mutex held */
-void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
-{
- struct mlx5_interface *intf;
+ delete_drivers(dev);
+ if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+ return 0;
- list_for_each_entry(intf, &intf_list, list)
- if (intf->protocol == protocol) {
- mlx5_remove_device(intf, &dev->priv);
- break;
- }
+ return add_drivers(dev);
}
-static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
(dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
-/* Must be called with intf_mutex held */
+static int next_phys_dev(struct device *dev, const void *data)
+{
+ struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ const struct mlx5_core_dev *curr = data;
+
+ if (!mlx5_core_is_pf(mdev))
+ return 0;
+
+ if (mdev == curr)
+ return 0;
+
+ if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
+ return 0;
+
+ return 1;
+}
+
+/* This function is called with two flows:
+ * 1. During initialization of mlx5_core_dev and we don't need to lock it.
+ * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
+ */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *res = NULL;
- struct mlx5_core_dev *tmp_dev;
- struct mlx5_priv *priv;
- u32 pci_id;
+ struct auxiliary_device *adev;
+ struct mlx5_adev *madev;
if (!mlx5_core_is_pf(dev))
return NULL;
- pci_id = mlx5_gen_pci_id(dev);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
- tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
- if (!mlx5_core_is_pf(tmp_dev))
- continue;
-
- if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
- res = tmp_dev;
- break;
- }
- }
+ adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
+ if (!adev)
+ return NULL;
- return res;
+ madev = container_of(adev, struct mlx5_adev, adev);
+ put_device(&adev->dev);
+ return madev->mdev;
}
-
void mlx5_dev_list_lock(void)
{
mutex_lock(&mlx5_intf_mutex);
}
-
void mlx5_dev_list_unlock(void)
{
mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index e2ed341648e4..3261d0dc1104 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -43,7 +43,7 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
- err = devlink_info_driver_name_put(req, DRIVER_NAME);
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err)
return err;
@@ -212,7 +212,7 @@ static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
u8 eswitch_mode;
bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch);
+ eswitch_mode = mlx5_eswitch_mode(dev);
smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2f05b0f9de01..a1a81cfeb607 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -227,6 +227,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_PFLAG_XDP_TX_MPWQE,
MLX5E_PFLAG_SKB_TX_MPWQE,
+ MLX5E_PFLAG_TX_PORT_TS,
MLX5E_NUM_PFLAGS, /* Keep last */
};
@@ -282,10 +283,12 @@ struct mlx5e_cq {
u16 event_ctr;
struct napi_struct *napi;
struct mlx5_core_cq mcq;
- struct mlx5e_channel *channel;
+ struct mlx5e_ch_stats *ch_stats;
/* control */
+ struct net_device *netdev;
struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
@@ -329,6 +332,15 @@ struct mlx5e_tx_mpwqe {
u8 inline_on;
};
+struct mlx5e_skb_fifo {
+ struct sk_buff **fifo;
+ u16 *pc;
+ u16 *cc;
+ u16 mask;
+};
+
+struct mlx5e_ptpsq;
+
struct mlx5e_txqsq {
/* data path */
@@ -349,11 +361,10 @@ struct mlx5e_txqsq {
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
- u16 skb_fifo_mask;
struct mlx5e_sq_stats *stats;
struct {
struct mlx5e_sq_dma *dma_fifo;
- struct sk_buff **skb_fifo;
+ struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
void __iomem *uar_map;
@@ -367,14 +378,17 @@ struct mlx5e_txqsq {
unsigned int hw_mtu;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
+ struct net_device *netdev;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5e_channel *channel;
int ch_ix;
int txq_ix;
u32 rate_limit;
struct work_struct recover_work;
+ struct mlx5e_ptpsq *ptpsq;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -593,7 +607,6 @@ struct mlx5e_rq {
u8 map_dir; /* dma map direction */
} buff;
- struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_rq_stats *stats;
@@ -602,6 +615,8 @@ struct mlx5e_rq {
struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
+ struct mlx5e_icosq *icosq;
+ struct mlx5e_priv *priv;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
@@ -681,8 +696,11 @@ struct mlx5e_channel {
int cpu;
};
+struct mlx5e_port_ptp;
+
struct mlx5e_channels {
struct mlx5e_channel **c;
+ struct mlx5e_port_ptp *port_ptp;
unsigned int num;
struct mlx5e_params params;
};
@@ -697,6 +715,12 @@ struct mlx5e_channel_stats {
struct mlx5e_xdpsq_stats xsksq;
} ____cacheline_aligned_in_smp;
+struct mlx5e_port_ptp_stats {
+ struct mlx5e_ch_stats ch;
+ struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+} ____cacheline_aligned_in_smp;
+
enum {
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
@@ -766,8 +790,10 @@ struct mlx5e_scratchpad {
struct mlx5e_priv {
/* priv data path fields - start */
- struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
+ /* +1 for port ptp ts */
+ struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -802,12 +828,15 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch;
u8 max_opened_tc;
+ bool port_ptp_opened;
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -923,9 +952,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
+struct mlx5e_create_cq_param {
+ struct napi_struct *napi;
+ struct mlx5e_ch_stats *ch_stats;
+ int node;
+ int ix;
+};
+
struct mlx5e_cq_param;
-int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
- struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
+int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+ struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
+ struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev);
@@ -974,7 +1011,17 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
+void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
+void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_tx_disable_queue(struct netdev_queue *txq);
+int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
+void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
+struct mlx5e_create_sq_param;
+int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn);
+void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index dc744702aee4..5749557749b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -287,8 +287,7 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type);
-bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev);
+u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 69a05da0e3e3..718f8c0a4f6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = cq->channel->priv;
u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
u8 hw_status;
void *cqc;
int err;
- err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out);
+ err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
if (err)
return err;
@@ -158,10 +157,8 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv)
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
}
-int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn)
+int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn)
{
- struct mlx5_core_dev *mdev = channel->mdev;
- struct net_device *dev = channel->netdev;
struct mlx5e_modify_sq_param msp = {};
int err;
@@ -206,21 +203,22 @@ out:
return err;
}
-int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel)
+int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
+ struct mlx5e_ch_stats *stats)
{
u32 eqe_count;
- netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
return -EIO;
- netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n",
+ netdev_err(dev, "Recovered %d eqes on EQ 0x%x\n",
eqe_count, eq->core.eqn);
- channel->stats->eq_rearm++;
+ stats->eq_rearm++;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index b9aadddfd000..018262d0164b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -7,8 +7,6 @@
#include "en.h"
#include "diag/rsc_dump.h"
-#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
-
static inline bool cqe_syndrome_needs_recover(u8 syndrome)
{
return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
@@ -42,8 +40,9 @@ struct mlx5e_err_ctx {
void *ctx;
};
-int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn);
-int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel);
+int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn);
+int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
+ struct mlx5e_ch_stats *stats);
int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 187007ad3349..807147d97a0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -41,6 +41,15 @@ struct mlx5e_channel_param {
struct mlx5e_sq_param async_icosq;
};
+struct mlx5e_create_sq_param {
+ struct mlx5_wq_ctrl *wq_ctrl;
+ u32 cqn;
+ u32 ts_cqe_to_dest_cqn;
+ u32 tisn;
+ u8 tis_lst_sz;
+ u8 min_inline_mode;
+};
+
static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
u16 qid,
enum mlx5e_rq_group group,
@@ -102,6 +111,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */
+void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
new file mode 100644
index 000000000000..351118985a57
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies
+
+#include "en/ptp.h"
+#include "en/txrx.h"
+#include "lib/clock.h"
+
+struct mlx5e_skb_cb_hwtstamp {
+ ktime_t cqe_hwtstamp;
+ ktime_t port_hwtstamp;
+};
+
+void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
+{
+ memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
+}
+
+static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
+ return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
+}
+
+static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
+ struct mlx5e_ptp_cq_stats *cq_stats)
+{
+ struct skb_shared_hwtstamps hwts = {};
+ ktime_t diff;
+
+ diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
+ mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
+
+ /* Maximal allowed diff is 1 / 128 second */
+ if (diff > (NSEC_PER_SEC >> 7)) {
+ cq_stats->abort++;
+ cq_stats->abort_abs_diff_ns += diff;
+ return;
+ }
+
+ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
+ skb_tstamp_tx(skb, &hwts);
+}
+
+void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
+ ktime_t hwtstamp,
+ struct mlx5e_ptp_cq_stats *cq_stats)
+{
+ switch (hwtstamp_type) {
+ case (MLX5E_SKB_CB_CQE_HWTSTAMP):
+ mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
+ break;
+ case (MLX5E_SKB_CB_PORT_HWTSTAMP):
+ mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
+ break;
+ }
+
+ /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
+ * skb soon to be released.
+ */
+ if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
+ !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
+ return;
+
+ mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
+ memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
+}
+
+static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ struct mlx5_cqe64 *cqe,
+ int budget)
+{
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ ktime_t hwtstamp;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ ptpsq->cq_stats->err_cqe++;
+ goto out;
+ }
+
+ hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
+ mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
+ hwtstamp, ptpsq->cq_stats);
+ ptpsq->cq_stats->cqe++;
+
+out:
+ napi_consume_skb(skb, budget);
+}
+
+static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
+ struct mlx5_cqwq *cqwq = &cq->wq;
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
+ return false;
+
+ cqe = mlx5_cqwq_get_cqe(cqwq);
+ if (!cqe)
+ return false;
+
+ do {
+ mlx5_cqwq_pop(cqwq);
+
+ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+
+ mlx5_cqwq_update_db_record(cqwq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ return work_done == budget;
+}
+
+static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
+ napi);
+ struct mlx5e_ch_stats *ch_stats = c->stats;
+ bool busy = false;
+ int work_done = 0;
+ int i;
+
+ rcu_read_lock();
+
+ ch_stats->poll++;
+
+ for (i = 0; i < c->num_tc; i++) {
+ busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
+ busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
+ }
+
+ if (busy) {
+ work_done = budget;
+ goto out;
+ }
+
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ goto out;
+
+ ch_stats->arm++;
+
+ for (i = 0; i < c->num_tc; i++) {
+ mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
+ mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
+ }
+
+out:
+ rcu_read_unlock();
+
+ return work_done;
+}
+
+static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc,
+ struct mlx5e_ptpsq *ptpsq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ int err;
+ int node;
+
+ sq->pdev = c->pdev;
+ sq->tstamp = c->tstamp;
+ sq->clock = &mdev->clock;
+ sq->mkey_be = c->mkey_be;
+ sq->netdev = c->netdev;
+ sq->priv = c->priv;
+ sq->mdev = mdev;
+ sq->ch_ix = c->ix;
+ sq->txq_ix = txq_ix;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = &c->priv->port_ptp_stats.sq[tc];
+ sq->ptpsq = ptpsq;
+ INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
+ if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
+ sq->stop_room = param->stop_room;
+
+ node = dev_to_node(mlx5_core_dma_dev(mdev));
+
+ param->wq.db_numa_node = node;
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ err = mlx5e_alloc_txqsq_db(sq, node);
+ if (err)
+ goto err_sq_wq_destroy;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ mlx5_core_destroy_sq(mdev, sqn);
+}
+
+static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+
+ ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
+ GFP_KERNEL, numa);
+ if (!ptpsq->skb_fifo.fifo)
+ return -ENOMEM;
+
+ ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
+ ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
+ ptpsq->skb_fifo.mask = wq_sz - 1;
+
+ return 0;
+}
+
+static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
+{
+ while (*skb_fifo->pc != *skb_fifo->cc) {
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
+
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
+{
+ mlx5e_ptp_drain_skb_fifo(skb_fifo);
+ kvfree(skb_fifo->fifo);
+}
+
+static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
+ int txq_ix, struct mlx5e_ptp_params *cparams,
+ int tc, struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
+ struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
+ struct mlx5e_create_sq_param csp = {};
+ int err;
+
+ err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
+ txqsq, tc, ptpsq);
+ if (err)
+ return err;
+
+ csp.tisn = tisn;
+ csp.tis_lst_sz = 1;
+ csp.cqn = txqsq->cq.mcq.cqn;
+ csp.wq_ctrl = &txqsq->wq_ctrl;
+ csp.min_inline_mode = txqsq->min_inline_mode;
+ csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
+
+ err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
+ if (err)
+ goto err_free_txqsq;
+
+ err = mlx5e_ptp_alloc_traffic_db(ptpsq,
+ dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ if (err)
+ goto err_free_txqsq;
+
+ return 0;
+
+err_free_txqsq:
+ mlx5e_free_txqsq(txqsq);
+
+ return err;
+}
+
+static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct mlx5_core_dev *mdev = sq->mdev;
+
+ mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
+ cancel_work_sync(&sq->recover_work);
+ mlx5e_ptp_destroy_sq(mdev, sq->sqn);
+ mlx5e_free_txqsq_descs(sq);
+ mlx5e_free_txqsq(sq);
+}
+
+static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ struct mlx5e_params *params = &cparams->params;
+ int ix_base;
+ int err;
+ int tc;
+
+ ix_base = params->num_tc * params->num_channels;
+
+ for (tc = 0; tc < params->num_tc; tc++) {
+ int txq_ix = ix_base + tc;
+
+ err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
+ cparams, tc, &c->ptpsq[tc]);
+ if (err)
+ goto close_txqsq;
+ }
+
+ return 0;
+
+close_txqsq:
+ for (--tc; tc >= 0; tc--)
+ mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
+
+ return err;
+}
+
+static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
+}
+
+static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ struct mlx5e_params *params = &cparams->params;
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder ptp_moder = {};
+ struct mlx5e_cq_param *cq_param;
+ int err;
+ int tc;
+
+ ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
+ ccp.ch_stats = c->stats;
+ ccp.napi = &c->napi;
+ ccp.ix = c->ix;
+
+ cq_param = &cparams->txq_sq_param.cqp;
+
+ for (tc = 0; tc < params->num_tc; tc++) {
+ struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
+
+ err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ if (err)
+ goto out_err_txqsq_cq;
+ }
+
+ for (tc = 0; tc < params->num_tc; tc++) {
+ struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
+ struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
+
+ err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ if (err)
+ goto out_err_ts_cq;
+
+ ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
+ }
+
+ return 0;
+
+out_err_ts_cq:
+ for (--tc; tc >= 0; tc--)
+ mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
+ tc = params->num_tc;
+out_err_txqsq_cq:
+ for (--tc; tc >= 0; tc--)
+ mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
+
+ return err;
+}
+
+static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
+}
+
+static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq;
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ mlx5e_build_tx_cq_param(priv, params, &param->cqp);
+}
+
+static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
+ struct mlx5e_ptp_params *cparams,
+ struct mlx5e_params *orig)
+{
+ struct mlx5e_params *params = &cparams->params;
+
+ params->tx_min_inline_mode = orig->tx_min_inline_mode;
+ params->num_channels = orig->num_channels;
+ params->hard_mtu = orig->hard_mtu;
+ params->sw_mtu = orig->sw_mtu;
+ params->num_tc = orig->num_tc;
+
+ /* SQ */
+ params->log_sq_size = orig->log_sq_size;
+
+ mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
+}
+
+static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ int err;
+
+ err = mlx5e_ptp_open_cqs(c, cparams);
+ if (err)
+ return err;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_ptp_open_txqsqs(c, cparams);
+ if (err)
+ goto disable_napi;
+
+ return 0;
+
+disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_ptp_close_cqs(c);
+
+ return err;
+}
+
+static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
+{
+ mlx5e_ptp_close_txqsqs(c);
+ napi_disable(&c->napi);
+ mlx5e_ptp_close_cqs(c);
+}
+
+int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_port_ptp **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ptp_params *cparams;
+ struct mlx5e_port_ptp *c;
+ unsigned int irq;
+ int err;
+ int eqn;
+
+ err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
+ if (err)
+ return err;
+
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
+ cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
+ if (!c || !cparams)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->mdev = priv->mdev;
+ c->tstamp = &priv->tstamp;
+ c->ix = 0;
+ c->pdev = mlx5_core_dma_dev(priv->mdev);
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->num_tc = params->num_tc;
+ c->stats = &priv->port_ptp_stats.ch;
+ c->irq_desc = irq_to_desc(irq);
+ c->lag_port = lag_port;
+
+ netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
+
+ mlx5e_ptp_build_params(c, cparams, params);
+
+ err = mlx5e_ptp_open_queues(c, cparams);
+ if (unlikely(err))
+ goto err_napi_del;
+
+ *cp = c;
+
+ kvfree(cparams);
+
+ return 0;
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+
+ kvfree(cparams);
+ kvfree(c);
+ return err;
+}
+
+void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
+{
+ mlx5e_ptp_close_queues(c);
+ netif_napi_del(&c->napi);
+
+ kvfree(c);
+}
+
+void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
+}
+
+void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
new file mode 100644
index 000000000000..28aa5ae118f4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_PTP_H__
+#define __MLX5_EN_PTP_H__
+
+#include "en.h"
+#include "en/params.h"
+#include "en_stats.h"
+
+struct mlx5e_ptpsq {
+ struct mlx5e_txqsq txqsq;
+ struct mlx5e_cq ts_cq;
+ u16 skb_fifo_cc;
+ u16 skb_fifo_pc;
+ struct mlx5e_skb_fifo skb_fifo;
+ struct mlx5e_ptp_cq_stats *cq_stats;
+};
+
+struct mlx5e_port_ptp {
+ /* data path */
+ struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ u8 lag_port;
+
+ /* data path - accessed per napi poll */
+ struct irq_desc *irq_desc;
+ struct mlx5e_ch_stats *stats;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct hwtstamp_config *tstamp;
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
+ int ix;
+};
+
+struct mlx5e_ptp_params {
+ struct mlx5e_params params;
+ struct mlx5e_sq_param txq_sq_param;
+};
+
+int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_port_ptp **cp);
+void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
+void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
+void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
+
+enum {
+ MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
+ MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
+};
+
+void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
+ ktime_t hwtstamp,
+ struct mlx5e_ptp_cq_stats *cq_stats);
+
+void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
+#endif /* __MLX5_EN_PTP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 9913647a1faf..d80bbd17e5f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -87,7 +87,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
/* At this point, both the rq and the icosq are disabled */
- err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn);
+ err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn);
if (err)
goto out;
@@ -146,17 +146,16 @@ out:
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
- struct mlx5e_icosq *icosq;
struct mlx5_eq_comp *eq;
struct mlx5e_rq *rq;
int err;
rq = ctx;
- icosq = &rq->channel->icosq;
eq = rq->cq.mcq.eq;
- err = mlx5e_health_channel_eq_recover(eq, rq->channel);
- if (err)
- clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+
+ err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
+ if (err && rq->icosq)
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
return err;
}
@@ -233,21 +232,13 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = rq->channel->priv;
- struct mlx5e_icosq *icosq;
- u8 icosq_hw_state;
u16 wqe_counter;
int wqes_sz;
u8 hw_state;
u16 wq_head;
int err;
- icosq = &rq->channel->icosq;
- err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
- if (err)
- return err;
-
- err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
+ err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state);
if (err)
return err;
@@ -259,7 +250,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
if (err)
return err;
@@ -295,9 +286,18 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
if (err)
return err;
- err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
- if (err)
- return err;
+ if (rq->icosq) {
+ struct mlx5e_icosq *icosq = rq->icosq;
+ u8 icosq_hw_state;
+
+ err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
+ if (err)
+ return err;
+ }
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
@@ -557,25 +557,29 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
{
- struct mlx5e_icosq *icosq = &rq->channel->icosq;
- struct mlx5e_priv *priv = rq->channel->priv;
+ char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = rq;
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+
+ if (icosq)
+ snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
snprintf(err_str, sizeof(err_str),
- "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
- icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+ "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
+ rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
}
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
{
- struct mlx5e_priv *priv = rq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 8be6eaa3eeb1..d7275c84313e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "health.h"
+#include "en/ptp.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -15,7 +16,7 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
msleep(20);
}
- netdev_err(sq->channel->netdev,
+ netdev_err(sq->netdev,
"Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
sq->sqn, sq->cc, sq->pc);
@@ -41,8 +42,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
int err;
sq = ctx;
- mdev = sq->channel->mdev;
- dev = sq->channel->netdev;
+ mdev = sq->mdev;
+ dev = sq->netdev;
if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
return 0;
@@ -68,7 +69,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
* pending WQEs. SQ can safely reset the SQ.
*/
- err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn);
+ err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
if (err)
goto out;
@@ -99,8 +100,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
to_ctx = ctx;
sq = to_ctx->sq;
eq = sq->cq.mcq.eq;
- priv = sq->channel->priv;
- err = mlx5e_health_channel_eq_recover(eq, sq->channel);
+ priv = sq->priv;
+ err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
if (!err) {
to_ctx->status = 0; /* this sq recovered */
return err;
@@ -141,11 +142,11 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
}
static int
-mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
- struct mlx5e_txqsq *sq, int tc)
+mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
+ struct mlx5e_txqsq *sq, int tc)
{
- struct mlx5e_priv *priv = sq->channel->priv;
bool stopped = netif_xmit_stopped(sq->txq);
+ struct mlx5e_priv *priv = sq->priv;
u8 state;
int err;
@@ -153,14 +154,6 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err)
return err;
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
- if (err)
- return err;
-
err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
if (err)
return err;
@@ -193,7 +186,24 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err)
return err;
- err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
+ return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
+}
+
+static int
+mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
+ struct mlx5e_txqsq *sq, int tc)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ if (err)
+ return err;
+
+ err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
if (err)
return err;
@@ -204,49 +214,147 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
return 0;
}
-static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int
+mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
+ struct mlx5e_ptpsq *ptpsq, int tc)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
- u32 sq_stride, sq_sz;
+ int err;
- int i, tc, err = 0;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
- mutex_lock(&priv->state_lock);
+ err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ if (err)
+ return err;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
+ if (err)
+ return err;
- sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq);
- sq_stride = MLX5_SEND_WQE_BB;
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ if (err)
+ return err;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
+ err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
if (err)
- goto unlock;
+ return err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
+ struct mlx5e_txqsq *txqsq)
+{
+ u32 sq_stride, sq_sz;
+ int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
if (err)
- goto unlock;
+ return err;
+
+ sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
+ sq_stride = MLX5_SEND_WQE_BB;
err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
if (err)
- goto unlock;
+ return err;
- err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg);
+ err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
if (err)
- goto unlock;
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
+ struct mlx5e_ptpsq *ptpsq)
+{
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ if (err)
+ return err;
+
+ err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ struct mlx5e_ptpsq *generic_ptpsq;
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
+ if (err)
+ return err;
+
+ err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
+ if (err)
+ return err;
+
+ generic_ptpsq = priv->channels.port_ptp ?
+ &priv->channels.port_ptp->ptpsq[0] :
+ NULL;
+ if (!generic_ptpsq)
+ goto out;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ if (err)
+ return err;
+
+ err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
+ if (err)
+ return err;
+
+ err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
+ if (err)
+ return err;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err)
+ return err;
+
+out:
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
+
+ int i, tc, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
if (err)
goto unlock;
@@ -265,6 +373,19 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock;
}
}
+
+ if (!ptp_ch)
+ goto close_sqs_nest;
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
+ &ptp_ch->ptpsq[tc],
+ tc);
+ if (err)
+ goto unlock;
+ }
+
+close_sqs_nest:
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
goto unlock;
@@ -338,6 +459,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
+ struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
struct mlx5_rsc_key key = {};
int i, tc, err;
@@ -373,6 +495,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
return err;
}
}
+
+ if (ptp_ch) {
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
+
+ err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
+ if (err)
+ return err;
+ }
+ }
+
return devlink_fmsg_arr_pair_nest_end(fmsg);
}
@@ -396,8 +529,8 @@ static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = sq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_priv *priv = sq->priv;
struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = sq;
@@ -410,9 +543,9 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = sq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_tx_timeout_ctx to_ctx = {};
+ struct mlx5e_priv *priv = sq->priv;
struct mlx5e_err_ctx err_ctx = {};
to_ctx.sq = sq;
@@ -421,7 +554,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
err_ctx.dump = mlx5e_tx_reporter_dump_sq;
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
- sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start));
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 07ee1d236ab3..7943eb30b837 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -24,6 +24,8 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
@@ -250,21 +252,24 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
dma->type = map_type;
}
-static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i)
+static inline
+struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
{
- return &sq->db.skb_fifo[i & sq->skb_fifo_mask];
+ return &fifo->fifo[i & fifo->mask];
}
-static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+static inline
+void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
{
- struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++);
+ struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
*skb_item = skb;
}
-static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq)
+static inline
+struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
{
- return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++);
+ return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
}
static inline void
@@ -308,7 +313,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
- netdev_err(cq->channel->netdev,
+ netdev_err(cq->netdev,
"Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
cq->mcq.cqn, ci, qn,
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index be3465ba38ca..d87c345878d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -49,8 +49,11 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_channel *c)
{
struct mlx5e_channel_param *cparam;
+ struct mlx5e_create_cq_param ccp;
int err;
+ mlx5e_build_create_cq_param(&ccp, c);
+
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
@@ -60,7 +63,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq);
+ err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ &c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
@@ -68,7 +72,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq);
+ err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ &c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index f51c04284e4d..2b51d3222ca1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -276,7 +276,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
- if (mlx5_accel_is_ktls_tx(sq->channel->mdev))
+ if (mlx5_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 42e61dc28ead..d9076d543104 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -41,9 +41,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRIVER_VERSION,
- sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
@@ -1946,6 +1944,38 @@ static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
}
+static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ return -EOPNOTSUPP;
+
+ new_channels.params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
+ /* No need to verify SQ stop room as
+ * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
+ * has the same log_sq_size.
+ */
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ err = mlx5e_num_channels_changed(priv);
+ goto out;
+ }
+
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_num_channels_changed_ctx, NULL);
+out:
+ if (!err)
+ priv->port_ptp_opened = true;
+
+ return err;
+}
+
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
@@ -1954,6 +1984,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
{ "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
+ { "tx_port_ts", set_pflag_tx_port_ts },
};
static int mlx5e_handle_pflag(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1f48f99c0997..fa8149f6eb08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -772,25 +772,31 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
};
-bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
+u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
+{
+ return ttc_tunnel_rules[tt].proto;
+}
+
+static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
{
switch (proto_type) {
case IPPROTO_GRE:
return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
default:
return false;
}
}
-bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
+static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
{
int tt;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
- if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
+ if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
return true;
}
return false;
@@ -798,7 +804,7 @@ bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
- return (mlx5e_any_tunnel_proto_supported(mdev) &&
+ return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
@@ -899,8 +905,8 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
- if (!mlx5e_tunnel_proto_supported(priv->mdev,
- ttc_tunnel_rules[tt].proto))
+ if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
+ ttc_tunnel_rules[tt].proto))
continue;
trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 427fc376fe1a..03831650f655 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -64,6 +64,7 @@
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
+#include "en/ptp.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -412,9 +413,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
+ rq->priv = c->priv;
rq->tstamp = c->tstamp;
rq->clock = &mdev->clock;
- rq->channel = c;
+ rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
@@ -613,14 +615,11 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_channel *c = rq->channel;
- struct bpf_prog *old_prog = NULL;
+ struct bpf_prog *old_prog;
int i;
- /* drop_rq has neither channel nor xdp_prog. */
- if (c)
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&c->priv->state_lock));
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
@@ -720,9 +719,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
@@ -751,8 +748,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
int inlen;
@@ -786,7 +782,6 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
- struct mlx5e_channel *c = rq->channel;
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
@@ -797,8 +792,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
msleep(20);
} while (time_before(jiffies, exp_time));
- netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
- c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
+ netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+ rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT;
@@ -913,7 +908,7 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- mlx5e_trigger_irq(&rq->channel->icosq);
+ mlx5e_trigger_irq(rq->icosq);
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -925,7 +920,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- cancel_work_sync(&rq->channel->icosq.recover_work);
+ cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1089,14 +1084,14 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
mlx5_wq_destroy(&sq->wq_ctrl);
}
-static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
+void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
kvfree(sq->db.wqe_info);
- kvfree(sq->db.skb_fifo);
+ kvfree(sq->db.skb_fifo.fifo);
kvfree(sq->db.dma_fifo);
}
-static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
+int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
@@ -1104,24 +1099,26 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa);
- sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
- sizeof(*sq->db.skb_fifo)),
+ sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.skb_fifo.fifo)),
GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa);
- if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
+ if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
- sq->skb_fifo_mask = df_sz - 1;
+
+ sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
+ sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
+ sq->db.skb_fifo.mask = df_sz - 1;
return 0;
}
-static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
@@ -1138,7 +1135,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
- sq->channel = c;
+ sq->netdev = c->netdev;
+ sq->mdev = c->mdev;
+ sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
@@ -1177,20 +1176,12 @@ err_sq_wq_destroy:
return err;
}
-static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
-struct mlx5e_create_sq_param {
- struct mlx5_wq_ctrl *wq_ctrl;
- u32 cqn;
- u32 tisn;
- u8 tis_lst_sz;
- u8 min_inline_mode;
-};
-
static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
@@ -1215,6 +1206,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn);
+ MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
@@ -1272,10 +1264,10 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
mlx5_core_destroy_sq(mdev, sqn);
}
-static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
- struct mlx5e_sq_param *param,
- struct mlx5e_create_sq_param *csp,
- u32 *sqn)
+int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
int err;
@@ -1338,7 +1330,7 @@ err_free_txqsq:
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
- sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
@@ -1351,7 +1343,7 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
__netif_tx_unlock_bh(txq);
}
-static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -1376,8 +1368,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work);
@@ -1391,7 +1382,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
mlx5e_free_txqsq(sq);
}
-static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
+void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
@@ -1518,10 +1509,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
-static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
unsigned int irqn;
@@ -1554,25 +1546,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
}
cq->mdev = mdev;
+ cq->netdev = priv->netdev;
+ cq->priv = priv;
return 0;
}
-static int mlx5e_alloc_cq(struct mlx5e_channel *c,
+static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
+ struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = c->priv->mdev;
int err;
- param->wq.buf_numa_node = cpu_to_node(c->cpu);
- param->wq.db_numa_node = cpu_to_node(c->cpu);
- param->eq_ix = c->ix;
+ param->wq.buf_numa_node = ccp->node;
+ param->wq.db_numa_node = ccp->node;
+ param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(mdev, param, cq);
+ err = mlx5e_alloc_cq_common(priv, param, cq);
- cq->napi = &c->napi;
- cq->channel = c;
+ cq->napi = ccp->napi;
+ cq->ch_stats = ccp->ch_stats;
return err;
}
@@ -1636,13 +1630,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
-int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
- struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
+int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+ struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
+ struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5e_alloc_cq(c, param, cq);
+ err = mlx5e_alloc_cq(priv, param, ccp, cq);
if (err)
return err;
@@ -1668,14 +1663,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
+ struct mlx5e_create_cq_param *ccp,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c, params->tx_cq_moderation,
- &cparam->txq_sq.cqp, &c->sq[tc].cq);
+ err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
+ ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
}
@@ -1810,35 +1806,52 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
+{
+ *ccp = (struct mlx5e_create_cq_param) {
+ .napi = &c->napi,
+ .ch_stats = c->stats,
+ .node = cpu_to_node(c->cpu),
+ .ix = c->ix,
+ };
+}
+
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
struct dim_cq_moder icocq_moder = {0, 0};
+ struct mlx5e_create_cq_param ccp;
int err;
- err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
+ mlx5e_build_create_cq_param(&ccp, c);
+
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ &c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ &c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
- err = mlx5e_open_tx_cqs(c, params, cparam);
+ err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
+ err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ &c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
+ err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ &c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
- err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
- &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
+ err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
+ &ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
@@ -2361,6 +2374,13 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
+ err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
+ &chs->port_ptp);
+ if (err)
+ goto err_close_channels;
+ }
+
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
@@ -2382,6 +2402,9 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
+
+ if (chs->port_ptp)
+ mlx5e_ptp_activate_channel(chs->port_ptp);
}
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
@@ -2408,6 +2431,9 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
+ if (chs->port_ptp)
+ mlx5e_ptp_deactivate_channel(chs->port_ptp);
+
for (i = 0; i < chs->num; i++)
mlx5e_deactivate_channel(chs->c[i]);
}
@@ -2416,6 +2442,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
+ if (chs->port_ptp)
+ mlx5e_port_ptp_close(chs->port_ptp);
+
for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]);
@@ -2901,6 +2930,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_txqs = nch * ntc;
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+ num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -2974,14 +3005,13 @@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{
- int i, ch;
+ int i, ch, tc, num_tc;
ch = priv->channels.num;
+ num_tc = priv->channels.params.num_tc;
for (i = 0; i < ch; i++) {
- int tc;
-
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5e_txqsq *sq = &c->sq[tc];
@@ -2989,10 +3019,29 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
priv->channel_tc2realtxq[i][tc] = i + tc * ch;
}
}
+
+ if (!priv->channels.port_ptp)
+ return;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ struct mlx5e_port_ptp *c = priv->channels.port_ptp;
+ struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
+
+ priv->txq2sq[sq->txq_ix] = sq;
+ priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
+ }
+}
+
+static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
+{
+ /* Sync with mlx5e_select_queue. */
+ WRITE_ONCE(priv->num_tc_x_num_ch,
+ priv->channels.params.num_tc * priv->channels.num);
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
+ mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv);
@@ -3112,7 +3161,7 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+ if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
return;
if (state == MLX5_PORT_UP)
@@ -3196,6 +3245,11 @@ int mlx5e_close(struct net_device *netdev)
return err;
}
+static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
+{
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq,
struct mlx5e_rq_param *param)
@@ -3219,14 +3273,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
return 0;
}
-static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
+static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(mdev, param, cq);
+ return mlx5e_alloc_cq_common(priv, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3240,7 +3296,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
mlx5e_build_drop_rq_param(priv, &rq_param);
- err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
+ err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
return err;
@@ -3263,7 +3319,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
return 0;
err_free_rq:
- mlx5e_free_rq(drop_rq);
+ mlx5e_free_drop_rq(drop_rq);
err_destroy_cq:
mlx5e_destroy_cq(cq);
@@ -3277,7 +3333,7 @@ err_free_cq:
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
mlx5e_destroy_rq(drop_rq);
- mlx5e_free_rq(drop_rq);
+ mlx5e_free_drop_rq(drop_rq);
mlx5e_destroy_cq(&drop_rq->cq);
mlx5e_free_cq(&drop_rq->cq);
}
@@ -4231,6 +4287,20 @@ int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
+static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
+{
+ switch (proto_type) {
+ case IPPROTO_GRE:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
+ default:
+ return false;
+ }
+}
+
static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
struct sk_buff *skb)
{
@@ -4273,7 +4343,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
+ if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
return features;
break;
case IPPROTO_UDP:
@@ -4322,6 +4392,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
tx_timeout_work);
+ struct net_device *netdev = priv->netdev;
int i;
rtnl_lock();
@@ -4330,9 +4401,9 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
- netdev_get_tx_queue(priv->netdev, i);
+ netdev_get_tx_queue(netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(dev_queue))
@@ -4392,7 +4463,7 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog
struct bpf_prog *old_prog;
old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
- lockdep_is_held(&rq->channel->priv->state_lock));
+ lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
@@ -4577,31 +4648,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_devlink_port = mlx5e_get_devlink_port,
};
-static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
-{
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return -EOPNOTSUPP;
- if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
- !MLX5_CAP_GEN(mdev, nic_flow_table) ||
- !MLX5_CAP_ETH(mdev, csum_cap) ||
- !MLX5_CAP_ETH(mdev, max_lso_cap) ||
- !MLX5_CAP_ETH(mdev, vlan_cap) ||
- !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
- MLX5_CAP_FLOWTABLE(mdev,
- flow_table_properties_nic_receive.max_ft_level)
- < 3) {
- mlx5_core_warn(mdev,
- "Not creating net device, some required device capabilities are missing\n");
- return -EOPNOTSUPP;
- }
- if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
- mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_warn(mdev, "CQ moderation is not supported\n");
-
- return 0;
-}
-
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
@@ -4857,6 +4903,17 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
}
+static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
+ if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt)))
+ return true;
+ }
+ return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
+}
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4902,8 +4959,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_vxlan_set_netdev_info(priv);
- if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
- mlx5e_any_tunnel_proto_supported(mdev)) {
+ if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
@@ -4920,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
- if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
+ if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
@@ -4929,7 +4985,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
- if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
+ if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6;
netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
@@ -5314,10 +5370,14 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct net_device *netdev;
+ unsigned int ptp_txqs = 0;
int err;
+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ ptp_txqs = profile->max_tc;
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * profile->max_tc,
+ nch * profile->max_tc + ptp_txqs,
nch * profile->rq_groups);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
@@ -5421,13 +5481,12 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
free_netdev(netdev);
}
-/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
- * hardware contexts and to connect it to the current netdev.
- */
-static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+static int mlx5e_resume(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = edev->mdev;
int err;
if (netif_device_present(netdev))
@@ -5446,109 +5505,111 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
return 0;
}
-static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
struct net_device *netdev = priv->netdev;
-
-#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
- return;
-#endif
+ struct mlx5_core_dev *mdev = priv->mdev;
if (!netif_device_present(netdev))
- return;
+ return -ENODEV;
mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
+ return 0;
}
-static void *mlx5e_add(struct mlx5_core_dev *mdev)
+static int mlx5e_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
+ pm_message_t state = {};
void *priv;
int err;
int nch;
- err = mlx5e_check_required_hca_cap(mdev);
- if (err)
- return NULL;
-
-#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
- mlx5e_rep_register_vport_reps(mdev);
- return mdev;
- }
-#endif
-
nch = mlx5e_get_max_num_channels(mdev);
netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
- return NULL;
+ return -ENOMEM;
}
dev_net_set(netdev, mlx5_core_net(mdev));
priv = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, priv);
- err = mlx5e_attach(mdev, priv);
+ err = mlx5e_resume(adev);
if (err) {
- mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+ mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
goto err_destroy_netdev;
}
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_detach;
+ goto err_resume;
}
mlx5e_devlink_port_type_eth_set(priv);
mlx5e_dcbnl_init_app(priv);
- return priv;
+ return 0;
-err_detach:
- mlx5e_detach(mdev, priv);
+err_resume:
+ mlx5e_suspend(adev, state);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
- return NULL;
+ return err;
}
-static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv;
+ struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ pm_message_t state = {};
-#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
- mlx5e_rep_unregister_vport_reps(mdev);
- return;
- }
-#endif
- priv = vpriv;
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- mlx5e_detach(mdev, vpriv);
+ mlx5e_suspend(adev, state);
mlx5e_destroy_netdev(priv);
}
-static struct mlx5_interface mlx5e_interface = {
- .add = mlx5e_add,
- .remove = mlx5e_remove,
- .attach = mlx5e_attach,
- .detach = mlx5e_detach,
- .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+static const struct auxiliary_device_id mlx5e_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".eth", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
+
+static struct auxiliary_driver mlx5e_driver = {
+ .name = "eth",
+ .probe = mlx5e_probe,
+ .remove = mlx5e_remove,
+ .suspend = mlx5e_suspend,
+ .resume = mlx5e_resume,
+ .id_table = mlx5e_id_table,
};
-void mlx5e_init(void)
+int mlx5e_init(void)
{
+ int ret;
+
mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
- mlx5_register_interface(&mlx5e_interface);
+ ret = mlx5e_rep_init();
+ if (ret)
+ return ret;
+
+ ret = auxiliary_driver_register(&mlx5e_driver);
+ if (ret)
+ mlx5e_rep_cleanup();
+ return ret;
}
void mlx5e_cleanup(void)
{
- mlx5_unregister_interface(&mlx5e_interface);
+ auxiliary_driver_unregister(&mlx5e_driver);
+ mlx5e_rep_cleanup();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 67247c33b9fd..989c70c1eda3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -64,7 +64,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev),
@@ -1316,16 +1315,48 @@ static const struct mlx5_eswitch_rep_ops rep_ops = {
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
};
-void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
+static int mlx5e_rep_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5_eswitch *esw;
+ esw = mdev->priv.eswitch;
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
+ return 0;
}
-void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
+static void mlx5e_rep_remove(struct auxiliary_device *adev)
{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = vdev->mdev;
+ struct mlx5_eswitch *esw;
+ esw = mdev->priv.eswitch;
mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
}
+
+static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".eth-rep", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
+
+static struct auxiliary_driver mlx5e_rep_driver = {
+ .name = "eth-rep",
+ .probe = mlx5e_rep_probe,
+ .remove = mlx5e_rep_remove,
+ .id_table = mlx5e_rep_id_table,
+};
+
+int mlx5e_rep_init(void)
+{
+ return auxiliary_driver_register(&mlx5e_rep_driver);
+}
+
+void mlx5e_rep_cleanup(void)
+{
+ auxiliary_driver_unregister(&mlx5e_rep_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 8932c387d46a..988195ab1c54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -203,8 +203,8 @@ struct mlx5e_rep_sq {
struct list_head list;
};
-void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
-void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
+int mlx5e_rep_init(void);
+void mlx5e_rep_cleanup(void);
int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
@@ -232,6 +232,8 @@ static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
+static inline int mlx5e_rep_init(void) { return 0; };
+static inline void mlx5e_rep_cleanup(void) {};
#endif
static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 6628a0197b4e..7f5851c61218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,7 +52,6 @@
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
-#include "en/txrx.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -503,7 +502,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
- struct mlx5e_icosq *sq = &rq->channel->icosq;
+ struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
@@ -670,13 +669,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
- netdev_WARN_ONCE(cq->channel->netdev,
+ netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->channel->priv->wq, &sq->recover_work);
+ queue_work(cq->priv->wq, &sq->recover_work);
break;
}
@@ -697,7 +696,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
#endif
default:
- netdev_WARN_ONCE(cq->channel->netdev,
+ netdev_WARN_ONCE(cq->netdev,
"Bad WQE type in ICOSQ WQE info: 0x%x\n",
wi->wqe_type);
}
@@ -713,9 +712,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
- struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
u8 umr_completed = rq->mpwqe.umr_completed;
+ struct mlx5e_icosq *sq = rq->icosq;
int alloc_err = 0;
u8 missing, i;
u16 head;
@@ -1218,11 +1217,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+ struct mlx5e_priv *priv = rq->priv;
if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
!test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
- queue_work(rq->channel->priv->wq, &rq->recover_work);
+ queue_work(priv->wq, &rq->recover_work);
}
}
@@ -1771,8 +1771,9 @@ wq_free_wqe:
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
+ struct net_device *netdev = rq->netdev;
struct mlx5_core_dev *mdev = rq->mdev;
- struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = rq->priv;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -1784,15 +1785,15 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) {
- netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+ netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
return -EINVAL;
}
#endif
if (!rq->handle_rx_cqe) {
- netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
+ netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
}
break;
@@ -1807,13 +1808,13 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
#ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- c->priv->ipsec)
+ priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
- rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
- netdev_err(c->netdev, "RX handler of RQ is not set\n");
+ netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 78f6a6f0a7e0..2cf2042b37c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -248,6 +248,178 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
return idx;
}
+static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
+ struct mlx5e_xdpsq_stats *xdpsq_red_stats)
+{
+ s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
+ s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
+ s->tx_xdp_nops += xdpsq_red_stats->nops;
+ s->tx_xdp_full += xdpsq_red_stats->full;
+ s->tx_xdp_err += xdpsq_red_stats->err;
+ s->tx_xdp_cqes += xdpsq_red_stats->cqes;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
+ struct mlx5e_xdpsq_stats *xdpsq_stats)
+{
+ s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
+ s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
+ s->rx_xdp_tx_nops += xdpsq_stats->nops;
+ s->rx_xdp_tx_full += xdpsq_stats->full;
+ s->rx_xdp_tx_err += xdpsq_stats->err;
+ s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
+ struct mlx5e_xdpsq_stats *xsksq_stats)
+{
+ s->tx_xsk_xmit += xsksq_stats->xmit;
+ s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
+ s->tx_xsk_inlnw += xsksq_stats->inlnw;
+ s->tx_xsk_full += xsksq_stats->full;
+ s->tx_xsk_err += xsksq_stats->err;
+ s->tx_xsk_cqes += xsksq_stats->cqes;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
+ struct mlx5e_rq_stats *xskrq_stats)
+{
+ s->rx_xsk_packets += xskrq_stats->packets;
+ s->rx_xsk_bytes += xskrq_stats->bytes;
+ s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
+ s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
+ s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
+ s->rx_xsk_csum_none += xskrq_stats->csum_none;
+ s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
+ s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
+ s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
+ s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
+ s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
+ s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
+ s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
+ s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
+ s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
+ s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
+ s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
+ s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
+ s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
+ struct mlx5e_rq_stats *rq_stats)
+{
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_ecn_mark += rq_stats->ecn_mark;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+ s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_redirect += rq_stats->xdp_redirect;
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
+ s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
+ s->rx_congst_umr += rq_stats->congst_umr;
+ s->rx_arfs_err += rq_stats->arfs_err;
+ s->rx_recover += rq_stats->recover;
+#ifdef CONFIG_MLX5_EN_TLS
+ s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
+ s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
+ s->rx_tls_ctx += rq_stats->tls_ctx;
+ s->rx_tls_del += rq_stats->tls_del;
+ s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
+ s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
+ s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
+ s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
+ s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
+ s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
+ s->rx_tls_err += rq_stats->tls_err;
+#endif
+}
+
+static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
+ struct mlx5e_ch_stats *ch_stats)
+{
+ s->ch_events += ch_stats->events;
+ s->ch_poll += ch_stats->poll;
+ s->ch_arm += ch_stats->arm;
+ s->ch_aff_change += ch_stats->aff_change;
+ s->ch_force_irq += ch_stats->force_irq;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
+ struct mlx5e_sq_stats *sq_stats)
+{
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_nop += sq_stats->nop;
+ s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
+ s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_cqe_err += sq_stats->cqe_err;
+ s->tx_recover += sq_stats->recover;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
+#ifdef CONFIG_MLX5_EN_TLS
+ s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
+ s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
+ s->tx_tls_ctx += sq_stats->tls_ctx;
+ s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
+ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
+ s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
+ s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
+#endif
+ s->tx_cqes += sq_stats->cqes;
+}
+
+static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
+ struct mlx5e_sw_stats *s)
+{
+ int i;
+
+ if (!priv->port_ptp_opened)
+ return;
+
+ mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
+
+ for (i = 0; i < priv->max_opened_tc; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -258,144 +430,25 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
- struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
- struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
- struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
- struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
- struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
- struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
- s->rx_lro_packets += rq_stats->lro_packets;
- s->rx_lro_bytes += rq_stats->lro_bytes;
- s->rx_ecn_mark += rq_stats->ecn_mark;
- s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
- s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_complete += rq_stats->csum_complete;
- s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
- s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
- s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
- s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_redirect += rq_stats->xdp_redirect;
- s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
- s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
- s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
- s->rx_xdp_tx_nops += xdpsq_stats->nops;
- s->rx_xdp_tx_full += xdpsq_stats->full;
- s->rx_xdp_tx_err += xdpsq_stats->err;
- s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
- s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
- s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
- s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
- s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
- s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
- s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
- s->rx_congst_umr += rq_stats->congst_umr;
- s->rx_arfs_err += rq_stats->arfs_err;
- s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_MLX5_EN_TLS
- s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
- s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
- s->rx_tls_ctx += rq_stats->tls_ctx;
- s->rx_tls_del += rq_stats->tls_del;
- s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
- s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
- s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
- s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
- s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
- s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
- s->rx_tls_err += rq_stats->tls_err;
-#endif
- s->ch_events += ch_stats->events;
- s->ch_poll += ch_stats->poll;
- s->ch_arm += ch_stats->arm;
- s->ch_aff_change += ch_stats->aff_change;
- s->ch_force_irq += ch_stats->force_irq;
- s->ch_eq_rearm += ch_stats->eq_rearm;
+ mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
+ mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
+ mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
/* xdp redirect */
- s->tx_xdp_xmit += xdpsq_red_stats->xmit;
- s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
- s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
- s->tx_xdp_nops += xdpsq_red_stats->nops;
- s->tx_xdp_full += xdpsq_red_stats->full;
- s->tx_xdp_err += xdpsq_red_stats->err;
- s->tx_xdp_cqes += xdpsq_red_stats->cqes;
+ mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
/* AF_XDP zero-copy */
- s->rx_xsk_packets += xskrq_stats->packets;
- s->rx_xsk_bytes += xskrq_stats->bytes;
- s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
- s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
- s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
- s->rx_xsk_csum_none += xskrq_stats->csum_none;
- s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
- s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
- s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
- s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
- s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
- s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
- s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
- s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
- s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
- s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
- s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
- s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
- s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
- s->tx_xsk_xmit += xsksq_stats->xmit;
- s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
- s->tx_xsk_inlnw += xsksq_stats->inlnw;
- s->tx_xsk_full += xsksq_stats->full;
- s->tx_xsk_err += xsksq_stats->err;
- s->tx_xsk_cqes += xsksq_stats->cqes;
+ mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
+ mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
for (j = 0; j < priv->max_opened_tc; j++) {
- struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_tso_packets += sq_stats->tso_packets;
- s->tx_tso_bytes += sq_stats->tso_bytes;
- s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
- s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
- s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
- s->tx_nop += sq_stats->nop;
- s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
- s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
- s->tx_queue_stopped += sq_stats->stopped;
- s->tx_queue_wake += sq_stats->wake;
- s->tx_queue_dropped += sq_stats->dropped;
- s->tx_cqe_err += sq_stats->cqe_err;
- s->tx_recover += sq_stats->recover;
- s->tx_xmit_more += sq_stats->xmit_more;
- s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- s->tx_csum_none += sq_stats->csum_none;
- s->tx_csum_partial += sq_stats->csum_partial;
-#ifdef CONFIG_MLX5_EN_TLS
- s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
- s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
- s->tx_tls_ctx += sq_stats->tls_ctx;
- s->tx_tls_ooo += sq_stats->tls_ooo;
- s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
- s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
- s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
- s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
- s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
- s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
-#endif
- s->tx_cqes += sq_stats->cqes;
+ mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
}
}
+ mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
}
static const struct counter_desc q_stats_desc[] = {
@@ -1656,6 +1709,37 @@ static const struct counter_desc ch_stats_desc[] = {
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
+static const struct counter_desc ptp_sq_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+};
+
+static const struct counter_desc ptp_ch_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
+ { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
+ { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
+ { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
+static const struct counter_desc ptp_cq_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
@@ -1663,6 +1747,69 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
+#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
+#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
+#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
+{
+ return priv->port_ptp_opened ?
+ NUM_PTP_CH_STATS +
+ ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
+ 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
+{
+ int i, tc;
+
+ if (!priv->port_ptp_opened)
+ return idx;
+
+ for (i = 0; i < NUM_PTP_CH_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_ch_stats_desc[i].format);
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_sq_stats_desc[i].format, tc);
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_cq_stats_desc[i].format, tc);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
+{
+ int i, tc;
+
+ if (!priv->port_ptp_opened)
+ return idx;
+
+ for (i = 0; i < NUM_PTP_CH_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
+ ptp_ch_stats_desc, i);
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i);
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
@@ -1784,6 +1931,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
+static MLX5E_DEFINE_STATS_GRP(ptp, 0);
/* The stats groups order is opposite to the update_stats() order calls */
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
@@ -1806,6 +1954,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
+ &MLX5E_STATS_GRP(ptp),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 162daaadb0d8..e41fc11f2ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -51,6 +51,10 @@
#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+
struct counter_desc {
char format[ETH_GSTRING_LEN];
size_t offset; /* Byte offset */
@@ -398,6 +402,13 @@ struct mlx5e_ch_stats {
u64 eq_rearm;
};
+struct mlx5e_ptp_cq_stats {
+ u64 cqe;
+ u64 err_cqe;
+ u64 abort;
+ u64 abort_abs_diff_ns;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ce710f22b1ff..4cdf834fa74a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -271,8 +271,6 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
return 0;
}
-#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
-
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -280,7 +278,7 @@ get_ct_priv(struct mlx5e_priv *priv)
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
- if (esw_offloads_mode(esw)) {
+ if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
@@ -297,7 +295,7 @@ mlx5_tc_rule_insert(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (esw_offloads_mode(esw))
+ if (is_mdev_switchdev_mode(priv->mdev))
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
@@ -310,7 +308,7 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (esw_offloads_mode(esw)) {
+ if (is_mdev_switchdev_mode(priv->mdev)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index d97203cf6a00..e47e2a0059d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -39,6 +40,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "lib/clock.h"
+#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
@@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
+static bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ goto return_txq;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+ up = mlx5e_get_dscp_up(priv, skb);
+ else
+#endif
+ if (skb_vlan_tag_present(skb))
+ up = skb_vlan_tag_get_prio(skb);
+
+return_txq:
+ return priv->port_ptp_tc2realtxq[up];
+}
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- int txq_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
+ int txq_ix;
int up = 0;
int ch_ix;
+ if (unlikely(priv->channels.port_ptp)) {
+ int num_tc_x_num_ch;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
+
+ /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+ /* Fix netdev_pick_tx() not to choose ptp_channel txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq().
+ */
+ if (unlikely(txq_ix >= num_tc_x_num_ch))
+ txq_ix %= num_tc_x_num_ch;
+ } else {
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+ }
+
if (!netdev_get_num_tc(dev))
return txq_ix;
@@ -402,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_check_stop(sq);
+ if (unlikely(sq->ptpsq)) {
+ mlx5e_skb_cb_hwtstamp_init(skb);
+ mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+ skb_get(skb);
+ }
+
send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
@@ -579,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
- mlx5e_skb_fifo_push(sq, skb);
+ mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
@@ -707,7 +774,11 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
- skb_tstamp_tx(skb, &hwts);
+ if (sq->ptpsq)
+ mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
+ hwts.hwtstamp, sq->ptpsq->cq_stats);
+ else
+ skb_tstamp_tx(skb, &hwts);
}
napi_consume_skb(skb, napi_budget);
@@ -719,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
int i;
for (i = 0; i < wi->num_fifo_pkts; i++) {
- struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
mlx5e_consume_skb(sq, skb, cqe, napi_budget);
}
@@ -805,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->channel->priv->wq,
- &sq->recover_work);
+ queue_work(cq->priv->wq, &sq->recover_work);
}
stats->cqe_err++;
}
@@ -840,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_
int i;
for (i = 0; i < wi->num_fifo_pkts; i++)
- dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
+ dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
}
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index d5868670f8a5..1ec3d62f026d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
napi_schedule(cq->napi);
cq->event_ctr++;
- cq->channel->stats->events++;
+ cq->ch_stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- struct mlx5e_channel *c = cq->channel;
- struct net_device *netdev = c->netdev;
+ struct net_device *netdev = cq->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4ea5d6ddf56a..fc0afa03d407 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq);
if (!eqe)
- goto out;
+ return 0;
do {
struct mlx5_core_cq *cq;
@@ -161,8 +161,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
-
-out:
eq_update_ci(eq, 1);
if (cqn != -1)
@@ -250,9 +248,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+ eq_update_ci(eq, 1);
out:
- eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index d46f8b225ebe..2b85d4777303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -101,7 +101,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
- if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index c3faae67e4d6..4c74e2690d57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
- if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index b68976b378b8..d64fad2823e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
- if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 4e55d7225a26..548c005ea633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
- if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7f8c4a957f72..da901e364656 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1641,8 +1641,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_rescan_drivers(esw->dev);
err = esw_offloads_enable(esw);
}
@@ -1660,10 +1659,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
abort:
esw->mode = MLX5_ESWITCH_NONE;
- if (mode == MLX5_ESWITCH_OFFLOADS) {
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
- }
+ if (mode == MLX5_ESWITCH_OFFLOADS)
+ mlx5_rescan_drivers(esw->dev);
+
esw_destroy_tsar(esw);
return err;
}
@@ -1724,10 +1722,9 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_lag_update(esw->dev);
- if (old_mode == MLX5_ESWITCH_OFFLOADS) {
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
- }
+ if (old_mode == MLX5_ESWITCH_OFFLOADS)
+ mlx5_rescan_drivers(esw->dev);
+
esw_destroy_tsar(esw);
if (clear_vf)
@@ -2466,8 +2463,10 @@ free_out:
return err;
}
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c9c2962ad49f..2f6a0ae20650 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -484,7 +484,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = fwd_fdb,
+ dest[i].ft = fwd_fdb;
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
@@ -1680,7 +1680,6 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
goto out_free;
}
- memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index cac8f085b16d..97d96fc38a65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -39,7 +39,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
- strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]",
+ strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
sizeof(drvinfo->driver));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 33081b24f10a..f3d45ef082cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -243,24 +243,30 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
#endif
}
-static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev)
+static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
- mlx5_add_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (!ldev->pf[i].dev)
+ continue;
+
+ ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(ldev->pf[i].dev);
+ }
}
-static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
+static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
- mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (!ldev->pf[i].dev)
+ continue;
+
+ ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(ldev->pf[i].dev);
+ }
}
static void mlx5_do_bond(struct mlx5_lag *ldev)
@@ -290,20 +296,21 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
#endif
if (roce_lag)
- mlx5_lag_remove_ib_devices(ldev);
+ mlx5_lag_remove_devices(ldev);
err = mlx5_activate_lag(ldev, &tracker,
roce_lag ? MLX5_LAG_FLAG_ROCE :
MLX5_LAG_FLAG_SRIOV);
if (err) {
if (roce_lag)
- mlx5_lag_add_ib_devices(ldev);
+ mlx5_lag_add_devices(ldev);
return;
}
if (roce_lag) {
- mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
mlx5_nic_vport_enable_roce(dev1);
}
} else if (do_bond && __mlx5_lag_is_active(ldev)) {
@@ -312,7 +319,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
roce_lag = __mlx5_lag_is_roce(ldev);
if (roce_lag) {
- mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
mlx5_nic_vport_disable_roce(dev1);
}
@@ -321,7 +329,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
return;
if (roce_lag)
- mlx5_lag_add_ib_devices(ldev);
+ mlx5_lag_add_devices(ldev);
}
}
@@ -596,6 +604,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
+
+ return;
}
/* Must be called with intf_mutex held */
@@ -739,24 +749,6 @@ unlock:
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
-bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
- priv);
- struct mlx5_lag *ldev;
-
- if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
- return true;
-
- ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !__mlx5_lag_is_roce(ldev) ||
- ldev->pf[MLX5_LAG_P1].dev == dev)
- return true;
-
- /* If bonded, we do not add an IB device for PF1. */
- return false;
-}
-
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d86f06f14cd3..c08315b51fd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -50,6 +50,7 @@
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
+#include <linux/version.h>
#include <net/devlink.h>
#include "mlx5_core.h"
#include "lib/eq.h"
@@ -76,7 +77,6 @@
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRIVER_VERSION);
unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
@@ -227,13 +227,16 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, DRIVER_NAME, remaining_size);
+ strncat(string, KBUILD_MODNAME, remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, DRIVER_VERSION, remaining_size);
+
+ snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
+ (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
+ (u16)(LINUX_VERSION_CODE & 0xffff));
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
@@ -309,7 +312,7 @@ static int request_bar(struct pci_dev *pdev)
return -ENODEV;
}
- err = pci_request_regions(pdev, DRIVER_NAME);
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err)
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
@@ -1219,14 +1222,21 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
if (err)
goto err_devlink_reg;
- mlx5_register_device(dev);
+
+ err = mlx5_register_device(dev);
} else {
- mlx5_attach_device(dev);
+ err = mlx5_attach_device(dev);
}
+ if (err)
+ goto err_register;
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
+err_register:
+ if (boot)
+ mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
@@ -1303,8 +1313,14 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_pagealloc_init;
+ err = mlx5_adev_init(dev);
+ if (err)
+ goto err_adev_init;
+
return 0;
+err_adev_init:
+ mlx5_pagealloc_cleanup(dev);
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
@@ -1321,6 +1337,7 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
@@ -1331,7 +1348,6 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&dev->intf_state_mutex);
}
-#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
@@ -1351,6 +1367,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
+ dev->priv.adev_idx = mlx5_adev_idx_alloc();
+ if (dev->priv.adev_idx < 0)
+ return dev->priv.adev_idx;
+
err = mlx5_mdev_init(dev, prof_sel);
if (err)
goto mdev_init_err;
@@ -1369,8 +1389,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_load_one;
}
- request_module_nowait(MLX5_IB_MOD);
-
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@@ -1384,6 +1402,7 @@ err_load_one:
pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
+ mlx5_adev_idx_free(dev->priv.adev_idx);
mlx5_devlink_free(devlink);
return err;
@@ -1400,6 +1419,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_unload_one(dev, true);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
+ mlx5_adev_idx_free(dev->priv.adev_idx);
mlx5_devlink_free(devlink);
}
@@ -1614,7 +1634,7 @@ void mlx5_recover_device(struct mlx5_core_dev *dev)
}
static struct pci_driver mlx5_core_driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
@@ -1640,6 +1660,9 @@ static int __init init(void)
{
int err;
+ WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
+ "mlx5_core name not in sync with kernel module name");
+
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
@@ -1651,7 +1674,11 @@ static int __init init(void)
goto err_debug;
#ifdef CONFIG_MLX5_CORE_EN
- mlx5e_init();
+ err = mlx5e_init();
+ if (err) {
+ pci_unregister_driver(&mlx5_core_driver);
+ goto err_debug;
+ }
#endif
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9d00efa9e6bc..0a0302ce7144 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,9 +42,6 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "5.0-0"
-
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -185,22 +182,20 @@ void mlx5_events_cleanup(struct mlx5_core_dev *dev);
void mlx5_events_start(struct mlx5_core_dev *dev);
void mlx5_events_stop(struct mlx5_core_dev *dev);
-void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
-void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
-void mlx5_attach_device(struct mlx5_core_dev *dev);
+int mlx5_adev_idx_alloc(void);
+void mlx5_adev_idx_free(int idx);
+void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
+int mlx5_adev_init(struct mlx5_core_dev *dev);
+
+int mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev);
-bool mlx5_device_registered(struct mlx5_core_dev *dev);
-void mlx5_register_device(struct mlx5_core_dev *dev);
+int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
-void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
-void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
-bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
-
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
@@ -219,7 +214,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *stored_ver);
-void mlx5e_init(void);
+int mlx5e_init(void);
void mlx5e_cleanup(void);
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
@@ -239,7 +234,17 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
-void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
+static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
+{
+ int ret;
+
+ mlx5_dev_list_lock();
+ ret = mlx5_rescan_drivers_locked(dev);
+ mlx5_dev_list_unlock();
+ return ret;
+}
+
void mlx5_lag_update(struct mlx5_core_dev *dev);
enum {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 630109f139a0..c67825a68a26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -160,6 +160,7 @@ struct mlxsw_rx_listener_item {
struct mlxsw_event_listener_item {
struct list_head list;
+ struct mlxsw_core *mlxsw_core;
struct mlxsw_event_listener el;
void *priv;
};
@@ -2171,11 +2172,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_core *mlxsw_core;
struct mlxsw_reg_info reg;
char *payload;
char *reg_tlv;
char *op_tlv;
+ mlxsw_core = event_listener_item->mlxsw_core;
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
mlxsw_emad_tlv_parse(skb);
op_tlv = mlxsw_emad_op_tlv(skb);
reg_tlv = mlxsw_emad_reg_tlv(skb);
@@ -2225,6 +2231,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
if (!el_item)
return -ENOMEM;
+ el_item->mlxsw_core = mlxsw_core;
el_item->el = *el;
el_item->priv = priv;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 9f6905fa6b47..f1b09c2f9eda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -133,10 +133,8 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
}
struct mlxsw_afk_picker {
- struct {
- DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
- unsigned int total;
- } hits[0];
+ DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+ unsigned int total;
};
static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
@@ -154,8 +152,8 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
elinst = &block->instances[j];
if (elinst->element == element) {
- __set_bit(element, picker->hits[i].element);
- picker->hits[i].total++;
+ __set_bit(element, picker[i].element);
+ picker[i].total++;
}
}
}
@@ -169,13 +167,13 @@ static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk,
int i;
int j;
- memcpy(&hits_element, &picker->hits[block_index].element,
+ memcpy(&hits_element, &picker[block_index].element,
sizeof(hits_element));
for (i = 0; i < mlxsw_afk->blocks_count; i++) {
for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) {
- if (__test_and_clear_bit(j, picker->hits[i].element))
- picker->hits[i].total--;
+ if (__test_and_clear_bit(j, picker[i].element))
+ picker[i].total--;
}
}
}
@@ -188,8 +186,8 @@ static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk,
int i;
for (i = 0; i < mlxsw_afk->blocks_count; i++) {
- if (picker->hits[i].total > most_hits) {
- most_hits = picker->hits[i].total;
+ if (picker[i].total > most_hits) {
+ most_hits = picker[i].total;
most_index = i;
}
}
@@ -206,7 +204,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
if (key_info->blocks_count == mlxsw_afk->max_blocks)
return -EINVAL;
- for_each_set_bit(element, picker->hits[block_index].element,
+ for_each_set_bit(element, picker[block_index].element,
MLXSW_AFK_ELEMENT_MAX) {
key_info->element_to_block[element] = key_info->blocks_count;
mlxsw_afk_element_usage_add(&key_info->elusage, element);
@@ -224,11 +222,9 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
{
struct mlxsw_afk_picker *picker;
enum mlxsw_afk_element element;
- size_t alloc_size;
int err;
- alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count;
- picker = kzalloc(alloc_size, GFP_KERNEL);
+ picker = kcalloc(mlxsw_afk->blocks_count, sizeof(*picker), GFP_KERNEL);
if (!picker)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1077ed2046fe..2a89b3261f00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -581,6 +581,13 @@ mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
+enum mlxsw_reg_tunnel_port {
+ MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL0,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL1,
+};
+
/* SFN - Switch FDB Notification Register
* -------------------------------------------
* The switch provides notifications on newly learned FDB entries and
@@ -738,13 +745,6 @@ MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
-enum mlxsw_reg_sfn_tunnel_port {
- MLXSW_REG_SFN_TUNNEL_PORT_NVE,
- MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
- MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_sfn_uc_tunnel_port
* Tunnel port.
* Reserved on Spectrum.
@@ -821,8 +821,16 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN);
+/* reg_spvid_tport
+ * Port is tunnel port.
+ * Reserved when SwitchX/-2 or Spectrum-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
+
/* reg_spvid_local_port
- * Local port number.
+ * When tport = 0: Local port number. Not supported for CPU port.
+ * When tport = 1: Tunnel port.
* Access: Index
*/
MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
@@ -1693,6 +1701,109 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
mlxsw_reg_svfa_vid_set(payload, vid);
}
+/* SPVTR - Switch Port VLAN Stacking Register
+ * ------------------------------------------
+ * The Switch Port VLAN Stacking register configures the VLAN mode of the port
+ * to enable VLAN stacking.
+ */
+#define MLXSW_REG_SPVTR_ID 0x201D
+#define MLXSW_REG_SPVTR_LEN 0x10
+
+MLXSW_REG_DEFINE(spvtr, MLXSW_REG_SPVTR_ID, MLXSW_REG_SPVTR_LEN);
+
+/* reg_spvtr_tport
+ * Port is tunnel port.
+ * Access: Index
+ *
+ * Note: Reserved when SwitchX/-2 or Spectrum-1.
+ */
+MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
+
+/* reg_spvtr_local_port
+ * When tport = 0: local port number (Not supported from/to CPU).
+ * When tport = 1: tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8);
+
+/* reg_spvtr_ippe
+ * Ingress Port Prio Mode Update Enable.
+ * When set, the Port Prio Mode is updated with the provided ipprio_mode field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ippe, 0x04, 31, 1);
+
+/* reg_spvtr_ipve
+ * Ingress Port VID Mode Update Enable.
+ * When set, the Ingress Port VID Mode is updated with the provided ipvid_mode
+ * field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ipve, 0x04, 30, 1);
+
+/* reg_spvtr_epve
+ * Egress Port VID Mode Update Enable.
+ * When set, the Egress Port VID Mode is updated with the provided epvid_mode
+ * field.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, epve, 0x04, 29, 1);
+
+/* reg_spvtr_ipprio_mode
+ * Ingress Port Priority Mode.
+ * This controls the PCP and DEI of the new outer VLAN
+ * Note: for SwitchX/-2 the DEI is not affected.
+ * 0: use port default PCP and DEI (configured by QPDPC).
+ * 1: use C-VLAN PCP and DEI.
+ * Has no effect when ipvid_mode = 0.
+ * Reserved when tport = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipprio_mode, 0x04, 20, 4);
+
+enum mlxsw_reg_spvtr_ipvid_mode {
+ /* IEEE Compliant PVID (default) */
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID,
+ /* Push VLAN (for VLAN stacking, except prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_PUSH_VLAN_FOR_UNTAGGED_PACKET,
+ /* Always push VLAN (also for prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN,
+};
+
+/* reg_spvtr_ipvid_mode
+ * Ingress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.i
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipvid_mode, 0x04, 16, 4);
+
+enum mlxsw_reg_spvtr_epvid_mode {
+ /* IEEE Compliant VLAN membership */
+ MLXSW_REG_SPVTR_EPVID_MODE_IEEE_COMPLIANT_VLAN_MEMBERSHIP,
+ /* Pop VLAN (for VLAN stacking) */
+ MLXSW_REG_SPVTR_EPVID_MODE_POP_VLAN,
+};
+
+/* reg_spvtr_epvid_mode
+ * Egress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.e,u,pt.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
+
+static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
+ u8 local_port,
+ enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
+{
+ MLXSW_REG_ZERO(spvtr, payload);
+ mlxsw_reg_spvtr_tport_set(payload, tport);
+ mlxsw_reg_spvtr_local_port_set(payload, local_port);
+ mlxsw_reg_spvtr_ipvid_mode_set(payload, ipvid_mode);
+ mlxsw_reg_spvtr_ipve_set(payload, true);
+}
+
/* SVPE - Switch Virtual-Port Enabling Register
* --------------------------------------------
* Enables port virtualization.
@@ -10507,13 +10618,6 @@ enum mlxsw_reg_tnumt_record_type {
*/
MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
-enum mlxsw_reg_tnumt_tunnel_port {
- MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
- MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
- MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_tnumt_tunnel_port
* Tunnel port.
* Access: RW
@@ -10561,7 +10665,7 @@ MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
static inline void mlxsw_reg_tnumt_pack(char *payload,
enum mlxsw_reg_tnumt_record_type type,
- enum mlxsw_reg_tnumt_tunnel_port tport,
+ enum mlxsw_reg_tunnel_port tport,
u32 underlay_mc_ptr, bool vnext,
u32 next_underlay_mc_ptr,
u8 record_size)
@@ -10725,13 +10829,6 @@ static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
-enum mlxsw_reg_tnpc_tunnel_port {
- MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
- MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
- MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
- MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
-};
-
/* reg_tnpc_tunnel_port
* Tunnel port.
* Access: Index
@@ -10751,7 +10848,7 @@ MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
static inline void mlxsw_reg_tnpc_pack(char *payload,
- enum mlxsw_reg_tnpc_tunnel_port tport,
+ enum mlxsw_reg_tunnel_port tport,
bool learn_enable)
{
MLXSW_REG_ZERO(tnpc, payload);
@@ -11320,6 +11417,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(slcor),
MLXSW_REG(spmlr),
MLXSW_REG(svfa),
+ MLXSW_REG(spvtr),
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 385eb3c3b362..df8175cd44ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,7 +45,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 1310
+#define MLXSW_SP1_FWREV_SUBMINOR 2018
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,7 +62,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 1310
+#define MLXSW_SP2_FWREV_SUBMINOR 2018
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,7 +77,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 1310
+#define MLXSW_SP3_FWREV_SUBMINOR 2018
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -384,7 +384,7 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
return err;
}
-static int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
{
switch (ethtype) {
case ETH_P_8021Q:
@@ -3595,7 +3595,8 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev)
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_upper *lag;
@@ -3631,8 +3632,20 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
+ /* Join a router interface configured on the LAG, if exists */
+ err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
+ lag_dev, extack);
+ if (err)
+ goto err_router_join;
+
return 0;
+err_router_join:
+ lag->ref_count--;
+ mlxsw_sp_port->lagged = 0;
+ mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
+ mlxsw_sp_port->local_port);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -3997,7 +4010,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
- upper_dev);
+ upper_dev, extack);
} else {
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ce26cc41831f..a6956cfc9cb1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -584,6 +584,7 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u16 ethtype);
struct mlxsw_sp_port_vlan *
@@ -656,6 +657,10 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
unsigned long event,
struct netdev_notifier_info *info);
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
@@ -1198,6 +1203,7 @@ struct mlxsw_sp_nve_params {
enum mlxsw_sp_nve_type type;
__be32 vni;
const struct net_device *dev;
+ u16 ethertype;
};
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 089d99535f9e..6ccca39bae84 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -142,9 +142,9 @@ mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
}
static int
-mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
- u32 tunnel_index,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
@@ -180,43 +180,6 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
}
-static int
-mlxsw_sp_ipip_fib_entry_op_gre4_do(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 dip, u8 prefix_len, u16 ul_vr_id,
- enum mlxsw_sp_fib_entry_op op,
- u32 tunnel_index,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- ll_ops->fib_entry_pack(op_ctx, MLXSW_SP_L3_PROTO_IPV4, op, ul_vr_id,
- prefix_len, (unsigned char *) &dip, priv);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
-}
-
-static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_sp_fib_entry_op op, u32 tunnel_index,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb);
- __be32 dip;
- int err;
-
- err = mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(mlxsw_sp, tunnel_index,
- ipip_entry);
- if (err)
- return err;
-
- dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->ol_dev).addr4;
- return mlxsw_sp_ipip_fib_entry_op_gre4_do(mlxsw_sp, ll_ops, op_ctx, be32_to_cpu(dip),
- 32, ul_vr_id, op, tunnel_index, priv);
-}
-
static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
@@ -332,7 +295,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
- .fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4,
+ .decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index d32702cb6ab4..87bef9880e5e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -50,13 +50,9 @@ struct mlxsw_sp_ipip_ops {
(*ol_loopback_config)(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev);
- int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ int (*decap_config)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_sp_fib_entry_op op,
- u32 tunnel_index,
- struct mlxsw_sp_fib_entry_priv *priv);
+ u32 tunnel_index);
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 47eb751a2570..7846a21555ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -15,7 +15,7 @@ struct mlxsw_sp_mr {
struct list_head table_list;
struct mutex table_list_lock; /* Protects table_list */
#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 54d3e7dcd303..e5ec595593f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -368,7 +368,7 @@ mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
next_valid = true;
}
- mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE,
mc_record->kvdl_index, next_valid,
next_kvdl_index, mc_record->num_entries);
@@ -798,11 +798,11 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
ops = nve->nve_ops_arr[params->type];
- if (!ops->can_offload(nve, params->dev, extack))
+ if (!ops->can_offload(nve, params, extack))
return -EINVAL;
memset(&config, 0, sizeof(config));
- ops->nve_config(nve, params->dev, &config);
+ ops->nve_config(nve, params, &config);
if (nve->num_nve_tunnels &&
memcmp(&config, &nve->config, sizeof(config))) {
NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 12f664f42f21..2796d3659979 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -18,6 +18,7 @@ struct mlxsw_sp_nve_config {
u32 ul_tb_id;
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr ul_sip;
+ u16 ethertype;
};
struct mlxsw_sp_nve {
@@ -35,10 +36,10 @@ struct mlxsw_sp_nve {
struct mlxsw_sp_nve_ops {
enum mlxsw_sp_nve_type type;
bool (*can_offload)(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
void (*nve_config)(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config);
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 05517c7feaa5..3e2bb22e9ca6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -22,10 +22,10 @@
VXLAN_F_LEARN)
static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
if (cfg->saddr.sa.sa_family != AF_INET) {
@@ -86,11 +86,23 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return true;
}
+static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ if (params->ethertype == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
+ return false;
+ }
+
+ return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
+}
+
static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
+ const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
config->type = MLXSW_SP_NVE_TYPE_VXLAN;
@@ -101,6 +113,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
config->udp_dport = cfg->dst_port;
+ config->ethertype = params->ethertype;
}
static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
@@ -286,7 +299,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
@@ -299,16 +312,35 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
{
char tnpc_pl[MLXSW_REG_TNPC_LEN];
- mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
learning_en);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
}
static int
+mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype)
+{
+ char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
+ u8 sver_type;
+ int err;
+
+ mlxsw_reg_spvid_tport_set(spvid_pl, true);
+ mlxsw_reg_spvid_local_port_set(spvid_pl,
+ MLXSW_REG_TUNNEL_PORT_NVE);
+ err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
u16 ul_rif_index;
int err;
@@ -329,8 +361,25 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tngcr_write;
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+ if (err)
+ goto err_spvtr_write;
+
+ err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype);
+ if (err)
+ goto err_decap_ethertype_set;
+
return 0;
+err_decap_ethertype_set:
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+err_spvtr_write:
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
err_tngcr_write:
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
err_vxlan_learning_set:
@@ -340,8 +389,14 @@ err_vxlan_learning_set:
static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
{
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ /* Set default EtherType */
+ mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q);
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 85223647fdb6..d671d961fc33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5142,9 +5142,9 @@ static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
fib_entry->priv);
}
-int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
+static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops)
{
bool postponed_for_bulk = false;
int err;
@@ -5307,13 +5307,21 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ int err;
if (WARN_ON(!ipip_entry))
return -EINVAL;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
- fib_entry->decap.tunnel_index, fib_entry->priv);
+ err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
+ fib_entry->decap.tunnel_index);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
@@ -7697,9 +7705,9 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
}
static int
-mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev,
- struct netlink_ext_ack *extack)
+__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -7764,6 +7772,27 @@ __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ goto out;
+
+ err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
+ extack);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
@@ -7788,8 +7817,8 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
- l3_dev, extack);
+ return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev, extack);
case NETDEV_DOWN:
__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 96d8bf7a9a67..d8aed866af21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -118,10 +118,6 @@ struct mlxsw_sp_router_ll_ops {
bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
};
-int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops);
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 9c4e17607e6a..cea42f6ed89b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2053,9 +2053,10 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
}
static int
-mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
+mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ u16 vid, u16 ethertype,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -2063,6 +2064,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.vni = vxlan->cfg.vni,
.dev = vxlan_dev,
+ .ethertype = ethertype,
};
struct mlxsw_sp_fid *fid;
int err;
@@ -2101,6 +2103,15 @@ err_vni_exists:
return err;
}
+static int
+mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021Q, extack);
+}
+
static struct net_device *
mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
{
@@ -2231,6 +2242,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.vni = vxlan->cfg.vni,
.dev = vxlan_dev,
+ .ethertype = ETH_P_8021Q,
};
struct mlxsw_sp_fid *fid;
int err;
@@ -2335,8 +2347,8 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
- NL_SET_ERR_MSG_MOD(extack, "VXLAN is not supported with 802.1ad");
- return -EOPNOTSUPP;
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021AD, extack);
}
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
@@ -3308,8 +3320,8 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
if (!fid) {
if (!flag_untagged || !flag_pvid)
return 0;
- return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
- vxlan_dev, vid, extack);
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
+ vid, extack);
}
/* Second case: FID is associated with the VNI and the VLAN associated
@@ -3348,16 +3360,14 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
if (!flag_untagged)
return 0;
- err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
if (err)
goto err_vxlan_join;
return 0;
err_vxlan_join:
- mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
- NULL);
+ bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
return err;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index d65872172229..6fc7483aea03 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -1112,7 +1112,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
/* setup mii state */
ks->mii.dev = netdev;
- ks->mii.phy_id = 1,
+ ks->mii.phy_id = 1;
ks->mii.phy_id_mask = 1;
ks->mii.reg_num_mask = 0xf;
ks->mii.mdio_read = ks8851_phy_read;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 31f9a82dc113..d0f6dfe0dcf3 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -47,6 +47,7 @@ config LAN743X
depends on PCI
select PHYLIB
select CRC16
+ select CRC32
help
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 2632fe2d2448..abea8dd2b0cb 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1551,10 +1551,11 @@ int ocelot_init(struct ocelot *ocelot)
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, 0);
+ for (i = 0; i < ocelot->num_flooding_pgids; i++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index dc00772950e5..1e7729421a82 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
ocelot->num_phys_ports = of_get_child_count(ports);
+ ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index d8b99d6a0356..b82758d5beed 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -22,6 +22,7 @@ config NFP
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
+ select CRC32
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
index 8d1458896bcb..dcb67c2b5e5e 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -40,7 +40,7 @@ struct nfp_crypto_req_add_front {
__be16 ipver_vlan __packed;
u8 l4_proto;
#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
- u8 l3_addrs[0];
+ u8 l3_addrs[];
};
struct nfp_crypto_req_add_back {
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 9b32ae46011c..84d66d138c3d 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -492,7 +492,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
goto err_cnt_ign;
}
- switch (iph->version) {
+ switch (ipv6h->version) {
case 4:
sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
iph->saddr, th->source, iph->daddr,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b4acf2f41e84..f21fb573ea3e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
struct nfp_net_dp *dp;
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
- return -EBUSY;
-
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
xdp_attachment_setup(&nn->xdp, bpf);
@@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
- return -EBUSY;
-
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index f18e787fa9ad..10e7d8b21c46 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -1070,7 +1070,7 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
__le16 offset;
__le16 readlen;
u8 eth_index;
- u8 data[0];
+ u8 data[];
} __packed *buf;
int bufsz, ret;
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
index ee83a71c2509..c84997db828c 100644
--- a/drivers/net/ethernet/nxp/Kconfig
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -3,6 +3,7 @@ config LPC_ENET
tristate "NXP ethernet MAC on LPC devices"
depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
+ select CRC32
help
Say Y or M here if you want to use the NXP ethernet MAC included on
some NXP LPC devices. You can safely enable this option for LPC32xx
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 35c72d4a78b3..0832bedcb3b4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -738,16 +738,11 @@ static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int err;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- err = ionic_lif_rss_config(lif, lif->rss_types, key, indir);
- if (err)
- return err;
-
- return 0;
+ return ionic_lif_rss_config(lif, lif->rss_types, key, indir);
}
static int ionic_set_tunable(struct net_device *dev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 30e52f969759..dd03be3fc82a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -2112,7 +2112,6 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
qlcnic_cancel_idc_work(adapter);
@@ -2125,11 +2124,7 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
qlcnic_83xx_disable_mbx_intr(adapter);
cancel_delayed_work_sync(&adapter->idc_aen_work);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- return 0;
+ return pci_save_state(pdev);
}
static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index fcdecddb2812..8d51b0cb545c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -26,7 +26,7 @@ static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
}
/* Needs rtnl lock */
-static struct rmnet_port*
+struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device *real_dev)
{
return rtnl_dereference(real_dev->rx_handler_data);
@@ -253,7 +253,10 @@ static int rmnet_config_notify_cb(struct notifier_block *nb,
netdev_dbg(real_dev, "Kernel unregister\n");
rmnet_force_unassociate_device(real_dev);
break;
-
+ case NETDEV_CHANGEMTU:
+ if (rmnet_vnd_validate_real_dev_mtu(real_dev))
+ return NOTIFY_BAD;
+ break;
default:
break;
}
@@ -329,9 +332,17 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_FLAGS]) {
struct ifla_rmnet_flags *flags;
+ u32 old_data_format;
+ old_data_format = port->data_format;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
port->data_format = flags->flags & flags->mask;
+
+ if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
+ port->data_format = old_data_format;
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
+ return -EINVAL;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index be515982d628..8d8d4690a074 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -73,4 +73,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
struct netlink_ext_ack *extack);
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev);
+struct rmnet_port*
+rmnet_get_port_rtnl(const struct net_device *real_dev);
#endif /* _RMNET_CONFIG_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index ca1535ebb6e7..41fbd2ceeede 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -59,9 +59,30 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int rmnet_vnd_headroom(struct rmnet_port *port)
+{
+ u32 headroom;
+
+ headroom = sizeof(struct rmnet_map_header);
+
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
+ headroom += sizeof(struct rmnet_map_ul_csum_header);
+
+ return headroom;
+}
+
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
{
- if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ struct rmnet_port *port;
+ u32 headroom;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ headroom = rmnet_vnd_headroom(port);
+
+ if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
+ new_mtu > (priv->real_dev->mtu - headroom))
return -EINVAL;
rmnet_dev->mtu = new_mtu;
@@ -230,6 +251,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ u32 headroom;
int rc;
if (rmnet_get_endpoint(port, id)) {
@@ -243,6 +265,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
priv->real_dev = real_dev;
+ headroom = rmnet_vnd_headroom(port);
+
+ if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
+ return -EINVAL;
+ }
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
@@ -284,3 +313,45 @@ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
return 0;
}
+
+int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
+{
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ unsigned long bkt_ep;
+ u32 headroom;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ headroom = rmnet_vnd_headroom(port);
+
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
+ return -1;
+ }
+
+ return 0;
+}
+
+int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ struct net_device *real_dev)
+{
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
+ unsigned long bkt_ep;
+ u32 headroom;
+
+ headroom = rmnet_vnd_headroom(port);
+
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
+ continue;
+
+ if (rmnet_vnd_change_mtu(ep->egress_dev,
+ real_dev->mtu - headroom))
+ return -1;
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 4967f3461ed1..dc3a4443ef0a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -18,4 +18,7 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_setup(struct net_device *dev);
+int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
+int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ struct net_device *real_dev);
#endif /* _RMNET_VND_H_ */
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3ef1b31c95d1..46d8510b2fe2 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -68,7 +68,7 @@
#define R8169_REGS_SIZE 256
#define R8169_RX_BUF_SIZE (SZ_16K - 1)
#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -3844,7 +3844,7 @@ static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
static void rtl8169_rx_clear(struct rtl8169_private *tp)
{
- unsigned int i;
+ int i;
for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
dma_unmap_page(tp_to_dev(tp),
@@ -3859,7 +3859,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
static int rtl8169_rx_fill(struct rtl8169_private *tp)
{
- unsigned int i;
+ int i;
for (i = 0; i < NUM_RX_DESC; i++) {
struct page *data;
@@ -4415,15 +4415,13 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
skb_checksum_none_assert(skb);
}
-static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
+static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget)
{
- unsigned int cur_rx, rx_left, count;
struct device *d = tp_to_dev(tp);
+ int count;
- cur_rx = tp->cur_rx;
-
- for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
- unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC;
+ for (count = 0; count < budget; count++, tp->cur_rx++) {
+ unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
struct sk_buff *skb;
const void *rx_buf;
@@ -4500,9 +4498,6 @@ release_descriptor:
rtl8169_mark_to_asic(desc);
}
- count = cur_rx - tp->cur_rx;
- tp->cur_rx = cur_rx;
-
return count;
}
@@ -4561,7 +4556,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct net_device *dev = tp->dev;
int work_done;
- work_done = rtl_rx(dev, tp, (u32) budget);
+ work_done = rtl_rx(dev, tp, budget);
rtl_tx(dev, tp, budget);
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
index 99e1290e0307..2318811ff75a 100644
--- a/drivers/net/ethernet/rocker/Kconfig
+++ b/drivers/net/ethernet/rocker/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER
config ROCKER
tristate "Rocker switch driver (EXPERIMENTAL)"
depends on PCI && NET_SWITCHDEV && BRIDGE
+ select CRC32
help
This driver supports Rocker switch device.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index efef5476a577..223f69da7e95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(dwmac->ops->addr_width));
- if (ret) {
- dev_err(&pdev->dev, "DMA mask set failed\n");
- goto err_dma_mask;
- }
-
+ plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
@@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dma_mask:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index dc0b8b6d180d..459ae715b33d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -30,7 +30,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
@@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return -ENOMEM;
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
ARRAY_SIZE(mux_parents), &clk_mux_ops,
&clk_configs->m250_mux.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 6e30d7eb4983..0b4ee2dbb691 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 1000000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 67ba67ed0cb9..03e79a677c8b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -305,17 +305,13 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
static int dwmac5_rxp_disable(void __iomem *ioaddr)
{
u32 val;
- int ret;
val = readl(ioaddr + MTL_OPERATION_MODE);
val &= ~MTL_FRPE;
writel(val, ioaddr + MTL_OPERATION_MODE);
- ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
+ return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
val & RXPI, 1, 10000);
- if (ret)
- return ret;
- return 0;
}
static void dwmac5_rxp_enable(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c1ac75901ce..5b1c12ff98c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1558,6 +1558,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
}
/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
*/
@@ -2925,9 +2938,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -2946,6 +2956,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4960,6 +4975,14 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -5172,6 +5195,11 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
@@ -5277,11 +5305,20 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ rtnl_lock();
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+ rtnl_unlock();
+ }
+
rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
+ stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
@@ -5295,14 +5332,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
- }
-
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b2a707e2ef43..d64116e0543e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -365,6 +365,9 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->name = "stmmac";
+ if (priv->plat->has_gmac4)
+ new_bus->probe_capabilities = MDIOBUS_C22_C45;
+
if (priv->plat->has_xgmac) {
new_bus->read = &stmmac_xgmac2_mdio_read;
new_bus->write = &stmmac_xgmac2_mdio_write;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index cc27d660a818..f5bed4d26e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -209,17 +209,11 @@ err_unfill:
static int tc_delete_knode(struct stmmac_priv *priv,
struct tc_cls_u32_offload *cls)
{
- int ret;
-
/* Set entry and fragments as not used */
tc_unfill_entry(priv, cls);
- ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
- priv->tc_entries_max);
- if (ret)
- return ret;
-
- return 0;
+ return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
}
static int tc_setup_cls_u32(struct stmmac_priv *priv,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6dd73bd0f458..99f44563e10f 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
if (!priv->xdpi.prog && !prog)
return 0;
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
WRITE_ONCE(priv->xdp_prog, prog);
xdp_attachment_setup(&priv->xdpi, bpf);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 702fdc393da0..cfff3d48807a 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -381,9 +381,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read,
- data->bus->write = davinci_mdio_write,
- data->bus->reset = davinci_mdio_reset,
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
data->bus->parent = dev;
data->bus->priv = data;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60c199fcb91e..030185301014 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
- struct resource *res;
const void *addr;
__be32 *p;
bool little_endian;
@@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!lp->sdma_regs) {
+ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(lp->sdma_regs);
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;