diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic')
38 files changed, 6037 insertions, 1418 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 32f2a45f4ab2..3cfd10503446 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -110,4 +110,7 @@ config QEDE config QED_RDMA bool +config QED_ISCSI + bool + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 2b10f1bcd151..a996801d442d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -987,20 +987,8 @@ int netxen_send_lro_cleanup(struct netxen_adapter *adapter) int netxen_nic_change_mtu(struct net_device *netdev, int mtu) { struct netxen_adapter *adapter = netdev_priv(netdev); - int max_mtu; int rc = 0; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - max_mtu = P3_MAX_MTU; - else - max_mtu = P2_MAX_MTU; - - if (mtu > max_mtu) { - printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", - netdev->name, max_mtu); - return -EINVAL; - } - if (adapter->set_mtu) rc = adapter->set_mtu(adapter, mtu); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 7a0281a36c28..561fb94c7267 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1572,6 +1572,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->physical_port = i; } + /* MTU range: 0 - 8000 (P2) or 9600 (P3) */ + netdev->min_mtu = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netdev->max_mtu = P3_MAX_MTU; + else + netdev->max_mtu = P2_MAX_MTU; + netxen_nic_clear_stats(adapter); err = netxen_setup_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 967acf322c09..729e43768e99 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 653bb5735f0c..44c184ebe3b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 +#define ISCSI_BDQ_ID(_port_id) (_port_id) #define QED_WID_SIZE (1024) #define QED_PF_DEMS_SIZE (4) @@ -154,7 +155,10 @@ struct qed_qm_iids { u32 tids; }; -enum QED_RESOURCES { +/* HW / FW resources, output of features supported below, most information + * is received from MFW. + */ +enum qed_resources { QED_SB, QED_L2_QUEUE, QED_VPORT, @@ -166,6 +170,7 @@ enum QED_RESOURCES { QED_RDMA_CNQ_RAM, QED_ILT, QED_LL2_QUEUE, + QED_CMDQS_CQS, QED_RDMA_STATS_QUEUE, QED_MAX_RESC, }; @@ -174,6 +179,7 @@ enum QED_FEATURE { QED_PF_L2_QUE, QED_VF, QED_RDMA_CNQ, + QED_VF_L2_QUE, QED_MAX_FEATURES, }; @@ -195,6 +201,11 @@ enum qed_dev_cap { QED_DEV_CAP_ROCE, }; +enum qed_wol_support { + QED_WOL_SUPPORT_NONE, + QED_WOL_SUPPORT_PME, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -226,15 +237,9 @@ struct qed_hw_info { u32 port_mode; u32 hw_mode; unsigned long device_capabilities; -}; + u16 mtu; -struct qed_hw_cid_data { - u32 cid; - bool b_cid_allocated; - - /* Additional identifiers */ - u16 opaque_fid; - u8 vport_id; + enum qed_wol_support b_wol_support; }; /* maximun size of read/write commands (HW limit) */ @@ -378,7 +383,9 @@ struct qed_hwfn { /* Protocol related */ bool using_ll2; struct qed_ll2_info *p_ll2_info; + struct qed_ooo_info *p_ooo_info; struct qed_rdma_info *p_rdma_info; + struct qed_iscsi_info *p_iscsi_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -403,9 +410,6 @@ struct qed_hwfn { struct qed_dcbx_info *p_dcbx_info; - struct qed_hw_cid_data *p_tx_cids; - struct qed_hw_cid_data *p_rx_cids; - struct qed_dmae_info dmae_info; /* QM init */ @@ -538,7 +542,9 @@ struct qed_dev { u8 mcp_rev; u8 boot_mode; - u8 wol; + /* WoL related configurations */ + u8 wol_config; + u8 wol_mac[ETH_ALEN]; u32 int_mode; enum qed_coalescing_mode int_coalescing_mode; @@ -578,6 +584,8 @@ struct qed_dev { /* Linux specific here */ struct qede_dev *edev; struct pci_dev *pdev; + u32 flags; +#define QED_FLAG_STORAGE_STARTED (BIT(0)) int msg_enable; struct pci_params pci_params; @@ -591,6 +599,7 @@ struct qed_dev { union { struct qed_common_cb_ops *common; struct qed_eth_cb_ops *eth; + struct qed_iscsi_cb_ops *iscsi; } protocol_ops; void *ops_cookie; @@ -600,7 +609,7 @@ struct qed_dev { struct qed_cb_ll2_info *ll2; u8 ll2_mac_address[ETH_ALEN]; #endif - + DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; u32 rdma_max_sge; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index edae5fc5fccd..3b2250021c5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -29,8 +29,10 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" +#include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" @@ -137,15 +139,6 @@ void qed_resc_free(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - kfree(p_hwfn->p_tx_cids); - p_hwfn->p_tx_cids = NULL; - kfree(p_hwfn->p_rx_cids); - p_hwfn->p_rx_cids = NULL; - } - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); qed_spq_free(p_hwfn); @@ -155,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); + qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); + } qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); @@ -411,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { + struct qed_iscsi_info *p_iscsi_info; + struct qed_ooo_info *p_ooo_info; #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; #endif @@ -425,23 +424,6 @@ int qed_resc_alloc(struct qed_dev *cdev) if (!cdev->fw_data) return -ENOMEM; - /* Allocate Memory for the Queue->CID mapping */ - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - int tx_size = sizeof(struct qed_hw_cid_data) * - RESC_NUM(p_hwfn, QED_L2_QUEUE); - int rx_size = sizeof(struct qed_hw_cid_data) * - RESC_NUM(p_hwfn, QED_L2_QUEUE); - - p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); - if (!p_hwfn->p_tx_cids) - goto alloc_no_mem; - - p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); - if (!p_hwfn->p_rx_cids) - goto alloc_no_mem; - } - for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; @@ -533,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev) p_hwfn->p_ll2_info = p_ll2_info; } #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + p_iscsi_info = qed_iscsi_alloc(p_hwfn); + if (!p_iscsi_info) + goto alloc_no_mem; + p_hwfn->p_iscsi_info = p_iscsi_info; + p_ooo_info = qed_ooo_alloc(p_hwfn); + if (!p_ooo_info) + goto alloc_no_mem; + p_hwfn->p_ooo_info = p_ooo_info; + } /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); @@ -586,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev) if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); + qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); + } } } @@ -1057,8 +1053,10 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data) { - u32 load_code, param; - int rc, mfw_rc, i; + u32 load_code, param, drv_mb_param; + bool b_default_mtu = true; + struct qed_hwfn *p_hwfn; + int rc = 0, mfw_rc, i; if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -1074,6 +1072,12 @@ int qed_hw_init(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + /* If management didn't provide a default, set one of our own */ + if (!p_hwfn->hw_info.mtu) { + p_hwfn->hw_info.mtu = 1500; + b_default_mtu = false; + } + if (IS_VF(cdev)) { p_hwfn->b_int_enabled = 1; continue; @@ -1157,6 +1161,38 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->hw_init_done = true; } + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + drv_mb_param = (FW_MAJOR_VERSION << 24) | + (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | + (FW_ENGINEERING_VERSION); + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, + drv_mb_param, &load_code, ¶m); + if (rc) + DP_INFO(p_hwfn, "Failed to update firmware version\n"); + + if (!b_default_mtu) { + rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.mtu); + if (rc) + DP_INFO(p_hwfn, + "Failed to update default mtu\n"); + } + + rc = qed_mcp_ov_update_driver_state(p_hwfn, + p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_DISABLED); + if (rc) + DP_INFO(p_hwfn, "Failed to update driver state\n"); + + rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); + } + return 0; } @@ -1324,8 +1360,24 @@ int qed_hw_reset(struct qed_dev *cdev) { int rc = 0; u32 unload_resp, unload_param; + u32 wol_param; int i; + switch (cdev->wol_config) { + case QED_OV_WOL_DISABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; + break; + default: + DP_NOTICE(cdev, + "Unknown WoL configuration %02x\n", cdev->wol_config); + /* Fallthrough */ + case QED_OV_WOL_DEFAULT: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1354,8 +1406,7 @@ int qed_hw_reset(struct qed_dev *cdev) /* Send unload command to MCP */ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_UNLOAD_REQ, - DRV_MB_PARAM_UNLOAD_WOL_MCP, + DRV_MSG_CODE_UNLOAD_REQ, wol_param, &unload_resp, &unload_param); if (rc) { DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); @@ -1421,6 +1472,7 @@ static void get_function_id(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; + struct qed_sb_cnt_info sb_cnt_info; int num_features = 1; if (IS_ENABLED(CONFIG_QED_RDMA) && @@ -1439,53 +1491,257 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_L2_QUEUE)); - DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", - feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), - num_features); + + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + feat_num[QED_VF_L2_QUE] = + min_t(u32, + RESC_NUM(p_hwfn, QED_L2_QUEUE) - + FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", + (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + RESC_NUM(p_hwfn, QED_SB), num_features); } -static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) +static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id) +{ + enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; + + switch (res_id) { + case QED_SB: + mfw_res_id = RESOURCE_NUM_SB_E; + break; + case QED_L2_QUEUE: + mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; + break; + case QED_VPORT: + mfw_res_id = RESOURCE_NUM_VPORT_E; + break; + case QED_RSS_ENG: + mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; + break; + case QED_PQ: + mfw_res_id = RESOURCE_NUM_PQ_E; + break; + case QED_RL: + mfw_res_id = RESOURCE_NUM_RL_E; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + mfw_res_id = RESOURCE_VFC_FILTER_E; + break; + case QED_ILT: + mfw_res_id = RESOURCE_ILT_E; + break; + case QED_LL2_QUEUE: + mfw_res_id = RESOURCE_LL2_QUEUE_E; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + mfw_res_id = RESOURCE_CQS_E; + break; + case QED_RDMA_STATS_QUEUE: + mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; + break; + default: + break; + } + + return mfw_res_id; +} + +static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) { - u8 enabled_func_idx = p_hwfn->enabled_func_idx; - u32 *resc_start = p_hwfn->hw_info.resc_start; u8 num_funcs = p_hwfn->num_funcs_on_engine; - u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int i, max_vf_vlan_filters; + u32 dflt_resc_num = 0; - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + switch (res_id) { + case QED_SB: + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + dflt_resc_num = sb_cnt_info.sb_cnt; + break; + case QED_L2_QUEUE: + dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs; + break; + case QED_VPORT: + dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; + break; + case QED_RSS_ENG: + dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs; + break; + case QED_PQ: + /* The granularity of the PQs is 8 */ + dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs; + dflt_resc_num &= ~0x7; + break; + case QED_RL: + dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; + break; + case QED_ILT: + dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs; + break; + case QED_LL2_QUEUE: + dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; + break; + case QED_RDMA_STATS_QUEUE: + dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs; + break; + default: + break; + } -#ifdef CONFIG_QED_SRIOV - max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; -#else - max_vf_vlan_filters = 0; -#endif + return dflt_resc_num; +} + +static const char *qed_hw_get_resc_name(enum qed_resources res_id) +{ + switch (res_id) { + case QED_SB: + return "SB"; + case QED_L2_QUEUE: + return "L2_QUEUE"; + case QED_VPORT: + return "VPORT"; + case QED_RSS_ENG: + return "RSS_ENG"; + case QED_PQ: + return "PQ"; + case QED_RL: + return "RL"; + case QED_MAC: + return "MAC"; + case QED_VLAN: + return "VLAN"; + case QED_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; + case QED_ILT: + return "ILT"; + case QED_LL2_QUEUE: + return "LL2_QUEUE"; + case QED_CMDQS_CQS: + return "CMDQS_CQS"; + case QED_RDMA_STATS_QUEUE: + return "RDMA_STATS_QUEUE"; + default: + return "UNKNOWN_RESOURCE"; + } +} - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); +static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) +{ + u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param; + u32 *p_resc_num, *p_resc_start; + struct resource_info resc_info; + int rc; + + p_resc_num = &RESC_NUM(p_hwfn, res_id); + p_resc_start = &RESC_START(p_hwfn, res_id); + + /* Default values assumes that each function received equal share */ + dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id); + if (!dflt_resc_num) { + DP_ERR(p_hwfn, + "Failed to get default amount for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return -EINVAL; + } + dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx; + + memset(&resc_info, 0, sizeof(resc_info)); + resc_info.res_id = qed_hw_get_mfw_res_id(res_id); + if (resc_info.res_id == RESOURCE_NUM_INVALID) { + DP_ERR(p_hwfn, + "Failed to match resource %d [%s] with the MFW resources\n", + res_id, qed_hw_get_resc_name(res_id)); + return -EINVAL; + } + + rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info, + &mcp_resp, &mcp_param); + if (rc) { + DP_NOTICE(p_hwfn, + "MFW response failure for an allocation request for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } - resc_num[QED_SB] = min_t(u32, - (MAX_SB_PER_PATH_BB / num_funcs), - sb_cnt_info.sb_cnt); - resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; - resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; - resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; - resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; - resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]); - resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; - resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / - num_funcs; - resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; - resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; - resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs; - resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB / - num_funcs; - - for (i = 0; i < QED_MAX_RESC; i++) - resc_start[i] = resc_num[i] * enabled_func_idx; + /* Default driver values are applied in the following cases: + * - The resource allocation MB command is not supported by the MFW + * - There is an internal error in the MFW while processing the request + * - The resource ID is unknown to the MFW + */ + if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK && + mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) { + DP_NOTICE(p_hwfn, + "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n", + res_id, + qed_hw_get_resc_name(res_id), + mcp_resp, dflt_resc_num, dflt_resc_start); + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + goto out; + } + + /* Special handling for status blocks; Would be revised in future */ + if (res_id == QED_SB) { + resc_info.size -= 1; + resc_info.offset -= p_hwfn->enabled_func_idx; + } + + *p_resc_num = resc_info.size; + *p_resc_start = resc_info.offset; + +out: + /* PQs have to divide by 8 [that's the HW granularity]. + * Reduce number so it would fit. + */ + if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { + DP_INFO(p_hwfn, + "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", + *p_resc_num, + (*p_resc_num) & ~0x7, + *p_resc_start, (*p_resc_start) & ~0x7); + *p_resc_num &= ~0x7; + *p_resc_start &= ~0x7; + } + + return 0; +} + +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) +{ + u8 res_id; + int rc; + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { + rc = qed_hw_set_resc_info(p_hwfn, res_id); + if (rc) + return rc; + } /* Sanity for ILT */ - if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) { + if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) { DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", RESC_START(p_hwfn, QED_ILT), RESC_END(p_hwfn, QED_ILT) - 1); @@ -1495,34 +1751,12 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) qed_hw_set_feat(p_hwfn); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "The numbers for each resource are:\n" - "SB = %d start = %d\n" - "L2_QUEUE = %d start = %d\n" - "VPORT = %d start = %d\n" - "PQ = %d start = %d\n" - "RL = %d start = %d\n" - "MAC = %d start = %d\n" - "VLAN = %d start = %d\n" - "ILT = %d start = %d\n" - "LL2_QUEUE = %d start = %d\n", - p_hwfn->hw_info.resc_num[QED_SB], - p_hwfn->hw_info.resc_start[QED_SB], - p_hwfn->hw_info.resc_num[QED_L2_QUEUE], - p_hwfn->hw_info.resc_start[QED_L2_QUEUE], - p_hwfn->hw_info.resc_num[QED_VPORT], - p_hwfn->hw_info.resc_start[QED_VPORT], - p_hwfn->hw_info.resc_num[QED_PQ], - p_hwfn->hw_info.resc_start[QED_PQ], - p_hwfn->hw_info.resc_num[QED_RL], - p_hwfn->hw_info.resc_start[QED_RL], - p_hwfn->hw_info.resc_num[QED_MAC], - p_hwfn->hw_info.resc_start[QED_MAC], - p_hwfn->hw_info.resc_num[QED_VLAN], - p_hwfn->hw_info.resc_start[QED_VLAN], - p_hwfn->hw_info.resc_num[QED_ILT], - p_hwfn->hw_info.resc_start[QED_ILT], - RESC_NUM(p_hwfn, QED_LL2_QUEUE), - RESC_START(p_hwfn, QED_LL2_QUEUE)); + "The numbers for each resource are:\n"); + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", + qed_hw_get_resc_name(res_id), + RESC_NUM(p_hwfn, res_id), + RESC_START(p_hwfn, res_id)); return 0; } @@ -1801,6 +2035,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_num_funcs(p_hwfn, p_ptt); + if (qed_mcp_is_init(p_hwfn)) + p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; + return qed_hw_get_resc(p_hwfn); } @@ -1975,8 +2212,13 @@ int qed_hw_prepare(struct qed_dev *cdev, void qed_hw_remove(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int i; + if (IS_PF(cdev)) + qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_NOT_LOADED); + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -2037,12 +2279,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) { void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl.p_virt_table; + u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; if (!pp_virt_addr_tbl) return; - if (!p_chain->pbl.p_virt_table) + if (!p_pbl_virt) goto out; for (i = 0; i < page_cnt; i++) { @@ -2060,7 +2302,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; dma_free_coherent(&cdev->pdev->dev, pbl_size, - p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table); + p_chain->pbl_sp.p_virt_table, + p_chain->pbl_sp.p_phys_table); out: vfree(p_chain->pbl.pp_virt_addr_tbl); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 2777d5bb4380..785ab03683eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -8526,6 +8526,41 @@ struct mdump_config_stc { u32 valid_logs; }; +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT (1 << 0) +}; + union drv_union_data { u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; struct mcp_mac wol_mac; @@ -8543,9 +8578,9 @@ union drv_union_data { struct drv_version_stc drv_version; struct lan_stats_stc lan_stats; - u64 reserved_stats[11]; struct ocbb_data_stc ocbb_info; struct temperature_status_stc temp_info; + struct resource_info resource; struct bist_nvm_image_att nvm_image_att; struct mdump_config_stc mdump_config; }; @@ -8561,9 +8596,19 @@ struct public_drv_mb { #define DRV_MSG_CODE_INIT_PHY 0x22000000 #define DRV_MSG_CODE_LINK_RESET 0x23000000 #define DRV_MSG_CODE_SET_DCBX 0x25000000 +#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 +#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 +#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 +#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 +#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 +#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 +#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 @@ -8571,6 +8616,13 @@ struct public_drv_mb { #define DRV_MSG_CODE_MCP_RESET 0x00090000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 #define DRV_MSG_CODE_MCP_HALT 0x00100000 +#define DRV_MSG_CODE_SET_VMAC 0x00110000 +#define DRV_MSG_CODE_GET_VMAC 0x00120000 +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 #define DRV_MSG_CODE_GET_STATS 0x00130000 #define DRV_MSG_CODE_STATS_TYPE_LAN 1 @@ -8582,11 +8634,16 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 +#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 +#define DRV_MSG_CODE_OS_WOL 0x002e0000 #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 @@ -8599,13 +8656,59 @@ struct public_drv_mb { #define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 #define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + #define DRV_MB_PARAM_BIST_REGISTER_TEST 1 #define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 #define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 #define DRV_MB_PARAM_BIST_RC_PASSED 1 @@ -8614,6 +8717,8 @@ struct public_drv_mb { #define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 #define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -8628,15 +8733,27 @@ struct public_drv_mb { #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 #define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 #define FW_MSG_CODE_NVM_OK 0x00010000 #define FW_MSG_CODE_OK 0x00160000 +#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 +#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 fw_mb_param; + /* get pf rdma protocol command responce */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + u32 drv_pulse_mb; #define DRV_PULSE_SEQ_MASK 0x00007fff #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 2adedc6fb6cf..c68dbf7092b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master) case 9: return "DBU"; case 10: return "DMAE"; default: - return "Unkown"; + return "Unknown"; } } @@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) DORQ_REG_DB_DROP_DETAILS); DP_INFO(p_hwfn->cdev, - "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", + "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_DETAILS_ADDRESS), (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), @@ -3030,6 +3030,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } } } + + /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect + * the number of VF SBs [especially for first VF on engine, as we can't + * diffrentiate between empty entries and its entries]. + * Since we don't really support more SBs than VFs today, prevent any + * such configuration by sanitizing the number of SBs to equal the + * number of VFs. + */ + if (IS_PF_SRIOV(p_hwfn)) { + u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + if (total_vfs < p_igu_info->free_blks) { + DP_VERBOSE(p_hwfn, + (NETIF_MSG_INTR | QED_MSG_IOV), + "Limiting number of SBs for IOV - %04x --> %04x\n", + p_igu_info->free_blks, + p_hwfn->cdev->p_iov_info->total_vfs); + p_igu_info->free_blks = total_vfs; + } else if (total_vfs > p_igu_info->free_blks) { + DP_NOTICE(p_hwfn, + "IGU has only %04x SBs for VFs while the device has %04x VFs\n", + p_igu_info->free_blks, total_vfs); + return -EINVAL; + } + } p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; DP_VERBOSE( @@ -3163,7 +3188,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return sb_id - p_info->igu_base_sb; } else if ((sb_id >= p_info->igu_base_sb_iov) && (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { - return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; + /* We want the first VF queue to be adjacent to the + * last PF queue. Since L2 queues can be partial to + * SBs, we'll use the feature instead. + */ + return sb_id - p_info->igu_base_sb_iov + + FEAT_NUM(p_hwfn, QED_PF_L2_QUE); } else { DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c new file mode 100644 index 000000000000..17a70122df05 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -0,0 +1,1277 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_iscsi_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_reg_addr.h" + +struct qed_iscsi_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + + u8 layer_code; + u8 offl_flags; + u8 connect_mode; + u32 initial_ack; + dma_addr_t sq_pbl_addr; + struct qed_chain r2tq; + struct qed_chain xhq; + struct qed_chain uhq; + + struct tcp_upload_params *tcp_upload_params_virt_addr; + dma_addr_t tcp_upload_params_phys_addr; + struct scsi_terminate_extra_params *queue_cnts_virt_addr; + dma_addr_t queue_cnts_phys_addr; + dma_addr_t syn_phy_addr; + + u16 syn_ip_payload_length; + u8 local_mac[6]; + u8 remote_mac[6]; + u16 vlan_id; + u8 tcp_flags; + u8 ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_time; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u32 ts_ticks_per_second; + u16 da_timeout_value; + u8 ack_frequency; + + u8 update_flag; + u8 default_cq; + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; + u32 stat_sn; + u16 physical_q0; + u16 physical_q1; + u8 abortive_dsconnect; +}; + +static int +qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr, + void *event_context, iscsi_event_cb_t async_event_cb) +{ + struct iscsi_init_ramrod_params *p_ramrod = NULL; + struct scsi_init_func_queues *p_queue = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct iscsi_spe_func_init *p_init = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + u32 dval; + u16 val; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_init; + p_init = &p_ramrod->iscsi_init_spe; + p_params = &p_hwfn->pf_params.iscsi_pf_params; + p_queue = &p_init->q_params; + + SET_FIELD(p_init->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE); + p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC; + + val = p_params->half_way_close_timeout; + p_init->half_way_close_timeout = cpu_to_le16(val); + p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; + p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; + p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; + p_init->func_params.log_page_size = p_params->log_page_size; + val = p_params->num_tasks; + p_init->func_params.num_tasks = cpu_to_le16(val); + p_init->debug_mode.flags = p_params->debug_mode; + + DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, + p_params->glbl_q_params_addr); + + val = p_params->cq_num_entries; + p_queue->cq_num_entries = cpu_to_le16(val); + val = p_params->cmdq_num_entries; + p_queue->cmdq_num_entries = cpu_to_le16(val); + p_queue->num_queues = p_params->num_queues; + dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_queue->queue_relative_offset = (u8)dval; + p_queue->cq_sb_pi = p_params->gl_rq_pi; + p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; + + for (i = 0; i < p_params->num_queues; i++) { + val = p_hwfn->sbs_info[i]->igu_sb_id; + p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); + } + + p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], + p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] = + p_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + val = p_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA], + p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->rq_buffer_size; + p_queue->rq_buffer_size = cpu_to_le16(val); + if (p_params->is_target) { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); + val = p_params->tx_sws_timer; + p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); + p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; + + p_hwfn->p_iscsi_info->event_context = event_context; + p_hwfn->p_iscsi_info->event_cb = async_event_cb; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_offload *p_ramrod = NULL; + struct tcp_offload_params_opt2 *p_tcp2 = NULL; + struct tcp_offload_params *p_tcp = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + union qed_qm_pq_params pq_params; + u16 pq0_id = 0, pq1_id = 0; + dma_addr_t r2tq_pbl_addr; + dma_addr_t xhq_pbl_addr; + dma_addr_t uhq_pbl_addr; + int rc = 0; + u32 dval; + u16 wval; + u8 i; + u16 *p; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_offload; + + /* Transmission PQ is the first of the PF */ + memset(&pq_params, 0, sizeof(pq_params)); + pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); + p_conn->physical_q0 = cpu_to_le16(pq0_id); + p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id); + + /* iSCSI Pure-ACK PQ */ + pq_params.iscsi.q_idx = 1; + pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); + p_conn->physical_q1 = cpu_to_le16(pq1_id); + p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id); + + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN; + SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE, + p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + + DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr); + + r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); + DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr); + + xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr); + + uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr); + + p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack); + p_ramrod->iscsi.flags = p_conn->offl_flags; + p_ramrod->iscsi.default_cq = p_conn->default_cq; + p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn); + + if (!GET_FIELD(p_ramrod->iscsi.flags, + ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) { + p_tcp = &p_ramrod->tcp; + + p = (u16 *)p_conn->local_mac; + p_tcp->local_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p = (u16 *)p_conn->remote_mac; + p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); + + p_tcp->flags = p_conn->tcp_flags; + p_tcp->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp->local_ip[i] = cpu_to_le32(dval); + } + p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; + p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold; + + p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next); + p_tcp->snd_una = cpu_to_le32(p_conn->snd_una); + p_tcp->snd_next = cpu_to_le32(p_conn->snd_next); + p_tcp->snd_max = cpu_to_le32(p_conn->snd_max); + p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd); + p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd); + p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1); + p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); + p_tcp->srtt = cpu_to_le16(p_conn->srtt); + p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); + p_tcp->ts_time = cpu_to_le32(p_conn->ts_time); + p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); + p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); + p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); + dval = p_conn->ka_timeout_delta; + p_tcp->ka_timeout_delta = cpu_to_le32(dval); + dval = p_conn->rt_timeout_delta; + p_tcp->rt_timeout_delta = cpu_to_le32(dval); + p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt; + p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt; + p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt; + p_tcp->rt_cnt = p_conn->rt_cnt; + p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); + p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + dval = p_conn->initial_rcv_wnd; + p_tcp->initial_rcv_wnd = cpu_to_le32(dval); + p_tcp->ttl = p_conn->ttl; + p_tcp->tos_or_tc = p_conn->tos_or_tc; + p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp->local_port = cpu_to_le16(p_conn->local_port); + p_tcp->mss = cpu_to_le16(p_conn->mss); + p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; + p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; + dval = p_conn->ts_ticks_per_second; + p_tcp->ts_ticks_per_second = cpu_to_le32(dval); + wval = p_conn->da_timeout_value; + p_tcp->da_timeout_value = cpu_to_le16(wval); + p_tcp->ack_frequency = p_conn->ack_frequency; + p_tcp->connect_mode = p_conn->connect_mode; + } else { + p_tcp2 = + &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; + + p = (u16 *)p_conn->local_mac; + p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p = (u16 *)p_conn->remote_mac; + p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p)); + p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); + p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + + p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); + p_tcp2->flags = p_conn->tcp_flags; + + p_tcp2->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp2->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp2->local_ip[i] = cpu_to_le32(dval); + } + + p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp2->ttl = p_conn->ttl; + p_tcp2->tos_or_tc = p_conn->tos_or_tc; + p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp2->local_port = cpu_to_le16(p_conn->local_port); + p_tcp2->mss = cpu_to_le16(p_conn->mss); + p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale; + p_tcp2->connect_mode = p_conn->connect_mode; + wval = p_conn->syn_ip_payload_length; + p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); + p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); + p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_conn_update_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u32 dval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_update; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + p_ramrod->flags = p_conn->update_flag; + p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); + dval = p_conn->max_recv_pdu_length; + p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); + dval = p_conn->max_send_pdu_length; + p_ramrod->max_send_pdu_length = cpu_to_le32(dval); + dval = p_conn->first_seq_length; + p_ramrod->first_seq_length = cpu_to_le32(dval); + p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_termination *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_terminate; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + p_ramrod->abortive = p_conn->abortive_dsconnect; + + DMA_REGPAIR_LE(p_ramrod->query_params_addr, + p_conn->tcp_upload_params_phys_addr); + DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_slow_path_hdr *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_empty; + p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ; + SET_FIELD(p_ramrod->flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_func_dstry *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_destroy; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, + bdq_id); +} + +static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, + bdq_id); +} + +static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + if (!p_conn->queue_cnts_virt_addr) + goto nomem; + memset(p_conn->queue_cnts_virt_addr, 0, + sizeof(*p_conn->queue_cnts_virt_addr)); + + if (!p_conn->tcp_upload_params_virt_addr) + goto nomem; + memset(p_conn->tcp_upload_params_virt_addr, 0, + sizeof(*p_conn->tcp_upload_params_virt_addr)); + + if (!p_conn->r2tq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->r2tq); + + if (!p_conn->uhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->uhq); + + if (!p_conn->xhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->xhq); + + return 0; +nomem: + return -ENOMEM; +} + +static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn **p_out_conn) +{ + u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0; + struct scsi_terminate_extra_params *p_q_cnts = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct tcp_upload_params *p_tcp = NULL; + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + + /* Try finding a free connection that can be used */ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + if (!list_empty(&p_hwfn->p_iscsi_info->free_list)) + p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, + struct qed_iscsi_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + + /* Need to allocate a new connection */ + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_q_cnts), + &p_conn->queue_cnts_phys_addr, + GFP_KERNEL); + if (!p_q_cnts) + goto nomem_queue_cnts_param; + p_conn->queue_cnts_virt_addr = p_q_cnts; + + p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_tcp), + &p_conn->tcp_upload_params_phys_addr, + GFP_KERNEL); + if (!p_tcp) + goto nomem_upload_param; + p_conn->tcp_upload_params_virt_addr = p_tcp; + + r2tq_num_elements = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / 0x80; + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + r2tq_num_elements, 0x80, &p_conn->r2tq); + if (rc) + goto nomem_r2tq; + + uhq_num_elements = p_params->num_uhq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + uhq_num_elements, + sizeof(struct iscsi_uhqe), &p_conn->uhq); + if (rc) + goto nomem_uhq; + + xhq_num_elements = uhq_num_elements; + rc = qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + xhq_num_elements, + sizeof(struct iscsi_xhqe), &p_conn->xhq); + if (rc) + goto nomem; + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem: + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); +nomem_uhq: + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); +nomem_r2tq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct tcp_upload_params), + p_conn->tcp_upload_params_virt_addr, + p_conn->tcp_upload_params_phys_addr); +nomem_upload_param: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct scsi_terminate_extra_params), + p_conn->queue_cnts_virt_addr, + p_conn->queue_cnts_phys_addr); +nomem_queue_cnts_param: + kfree(p_conn); + + return -ENOMEM; +} + +static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_in_conn, + struct qed_iscsi_conn **p_out_conn) +{ + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + if (rc) + return rc; + + /* Use input connection or allocate a new one */ + if (p_in_conn) + p_conn = p_in_conn; + else + rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); + + if (!rc) + rc = qed_iscsi_setup_connection(p_hwfn, p_conn); + + if (rc) { + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + return rc; + } + + p_conn->icid = icid; + p_conn->conn_id = (u16)icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + + *p_out_conn = p_conn; + + return rc; +} + +static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); +} + +struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_iscsi_info *p_iscsi_info; + + p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); + if (!p_iscsi_info) + return NULL; + + INIT_LIST_HEAD(&p_iscsi_info->free_list); + return p_iscsi_info; +} + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) +{ + spin_lock_init(&p_iscsi_info->lock); +} + +void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) +{ + kfree(p_iscsi_info); +} + +static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct tstorm_iscsi_stats_drv tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->iscsi_rx_bytes_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt); + p_stats->iscsi_rx_packet_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt); + p_stats->iscsi_cmdq_threshold_cnt = + le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt); + p_stats->iscsi_rq_threshold_cnt = + le32_to_cpu(tstats.iscsi_rq_threshold_cnt); + p_stats->iscsi_immq_threshold_cnt = + le32_to_cpu(tstats.iscsi_immq_threshold_cnt); +} + +static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct mstorm_iscsi_stats_drv mstats; + u32 mstats_addr; + + memset(&mstats, 0, sizeof(mstats)); + mstats_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats)); + + p_stats->iscsi_rx_dropped_pdus_task_not_valid = + HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid); +} + +static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ustorm_iscsi_stats_drv ustats; + u32 ustats_addr; + + memset(&ustats, 0, sizeof(ustats)); + ustats_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); + + p_stats->iscsi_rx_data_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt); + p_stats->iscsi_rx_r2t_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt); + p_stats->iscsi_rx_total_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt); +} + +static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct xstorm_iscsi_stats_drv xstats; + u32 xstats_addr; + + memset(&xstats, 0, sizeof(xstats)); + xstats_addr = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats)); + + p_stats->iscsi_tx_go_to_slow_start_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt); + p_stats->iscsi_tx_fast_retransmit_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt); +} + +static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ystorm_iscsi_stats_drv ystats; + u32 ystats_addr; + + memset(&ystats, 0, sizeof(ystats)); + ystats_addr = BAR0_MAP_REG_YSDM_RAM + + YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats)); + + p_stats->iscsi_tx_data_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt); + p_stats->iscsi_tx_r2t_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt); + p_stats->iscsi_tx_total_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt); +} + +static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct pstorm_iscsi_stats_drv pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->iscsi_tx_bytes_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt); + p_stats->iscsi_tx_packet_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt); +} + +static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, + struct qed_iscsi_stats *stats) +{ + struct qed_ptt *p_ptt; + + memset(stats, 0, sizeof(*stats)); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EAGAIN; + } + + _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats); + + _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_iscsi_con { + struct hlist_node node; + struct qed_iscsi_conn *con; +}; + +static int qed_fill_iscsi_dev_info(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + return rc; +} + +static void qed_register_iscsi_ops(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.iscsi = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_iscsi_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_iscsi_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "iscsi already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop iscsi - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the iscsi */ + rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_iscsi_start(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, + iscsi_event_cb_t async_event_cb) +{ + int rc; + struct qed_tid_mem *tid_info; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "iscsi already started;\n"); + return 0; + } + + rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL, event_context, + async_event_cb); + if (rc) { + DP_NOTICE(cdev, "Failed to start iscsi\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (!tasks) + return 0; + + tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); + + if (!tid_info) { + qed_iscsi_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), + tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_iscsi_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_ISCSI * sizeof(u8 *)); + + kfree(tid_info); + + return 0; +} + +static int qed_iscsi_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_iscsi_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); + if (!hash_con) + return -ENOMEM; + + /* Acquire the connection */ + rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_iscsi_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + ether_addr_copy(con->local_mac, conn_info->src.mac); + ether_addr_copy(con->remote_mac, conn_info->dst.mac); + memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); + memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); + con->local_port = conn_info->src.port; + con->remote_port = conn_info->dst.port; + + con->layer_code = conn_info->layer_code; + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->initial_ack = conn_info->initial_ack; + con->vlan_id = conn_info->vlan_id; + con->tcp_flags = conn_info->tcp_flags; + con->ip_version = conn_info->ip_version; + con->default_cq = conn_info->default_cq; + con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; + con->dup_ack_theshold = conn_info->dup_ack_theshold; + con->rcv_next = conn_info->rcv_next; + con->snd_una = conn_info->snd_una; + con->snd_next = conn_info->snd_next; + con->snd_max = conn_info->snd_max; + con->snd_wnd = conn_info->snd_wnd; + con->rcv_wnd = conn_info->rcv_wnd; + con->snd_wl1 = conn_info->snd_wl1; + con->cwnd = conn_info->cwnd; + con->ss_thresh = conn_info->ss_thresh; + con->srtt = conn_info->srtt; + con->rtt_var = conn_info->rtt_var; + con->ts_time = conn_info->ts_time; + con->ts_recent = conn_info->ts_recent; + con->ts_recent_age = conn_info->ts_recent_age; + con->total_rt = conn_info->total_rt; + con->ka_timeout_delta = conn_info->ka_timeout_delta; + con->rt_timeout_delta = conn_info->rt_timeout_delta; + con->dup_ack_cnt = conn_info->dup_ack_cnt; + con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt; + con->ka_probe_cnt = conn_info->ka_probe_cnt; + con->rt_cnt = conn_info->rt_cnt; + con->flow_label = conn_info->flow_label; + con->ka_timeout = conn_info->ka_timeout; + con->ka_interval = conn_info->ka_interval; + con->max_rt_time = conn_info->max_rt_time; + con->initial_rcv_wnd = conn_info->initial_rcv_wnd; + con->ttl = conn_info->ttl; + con->tos_or_tc = conn_info->tos_or_tc; + con->remote_port = conn_info->remote_port; + con->local_port = conn_info->local_port; + con->mss = conn_info->mss; + con->snd_wnd_scale = conn_info->snd_wnd_scale; + con->rcv_wnd_scale = conn_info->rcv_wnd_scale; + con->ts_ticks_per_second = conn_info->ts_ticks_per_second; + con->da_timeout_value = conn_info->da_timeout_value; + con->ack_frequency = conn_info->ack_frequency; + + /* Set default values on other connection fields */ + con->offl_flags = 0x1; + + return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_update_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->update_flag = conn_info->update_flag; + con->max_seq_size = conn_info->max_seq_size; + con->max_recv_pdu_length = conn_info->max_recv_pdu_length; + con->max_send_pdu_length = conn_info->max_send_pdu_length; + con->first_seq_length = conn_info->first_seq_length; + con->exp_stat_sn = conn_info->exp_stat_sn; + + return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_destroy_conn(struct qed_dev *cdev, + u32 handle, u8 abrt_conn) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hash_con->con->abortive_dsconnect = abrt_conn; + + return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) +{ + return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); +} + +static const struct qed_iscsi_ops qed_iscsi_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_iscsi_dev_info, + .register_ops = &qed_register_iscsi_ops, + .start = &qed_iscsi_start, + .stop = &qed_iscsi_stop, + .acquire_conn = &qed_iscsi_acquire_conn, + .release_conn = &qed_iscsi_release_conn, + .offload_conn = &qed_iscsi_offload_conn, + .update_conn = &qed_iscsi_update_conn, + .destroy_conn = &qed_iscsi_destroy_conn, + .clear_sq = &qed_iscsi_clear_conn_sq, + .get_stats = &qed_iscsi_stats, +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void) +{ + return &qed_iscsi_ops_pass; +} +EXPORT_SYMBOL(qed_get_iscsi_ops); + +void qed_put_iscsi_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_iscsi_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h new file mode 100644 index 000000000000..67c25f3db4d5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -0,0 +1,52 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ISCSI_H +#define _QED_ISCSI_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/tcp_common.h> +#include <linux/qed/qed_iscsi_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_iscsi_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; + u16 max_num_outstanding_tasks; + void *event_context; + iscsi_event_cb_t event_cb; +}; + +#ifdef CONFIG_QED_LL2 +extern const struct qed_ll2_ops qed_ll2_ops_pass; +#endif + +#if IS_ENABLED(CONFIG_QED_ISCSI) +struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn); + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info); + +void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline struct qed_iscsi_info *qed_iscsi_alloc( + struct qed_hwfn *p_hwfn) { return NULL; } +static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) {} +static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn, + struct qed_iscsi_info *p_iscsi_info) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index ddd410a91e13..6a3727c4c0c6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -23,6 +23,7 @@ #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/vmalloc.h> #include "qed.h" #include <linux/qed/qed_chain.h> #include "qed_cxt.h" @@ -41,6 +42,124 @@ #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ + if (!p_cid->is_vf && IS_PF(p_hwfn->cdev)) + qed_cxt_release_cid(p_hwfn, p_cid->cid); + vfree(p_cid); +} + +/* The internal is only meant to be directly called by PFs initializeing CIDs + * for their VFs. + */ +struct qed_queue_cid * +_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u8 vf_qid, + struct qed_queue_start_common_params *p_params) +{ + bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid); + struct qed_queue_cid *p_cid; + int rc; + + p_cid = vmalloc(sizeof(*p_cid)); + if (!p_cid) + return NULL; + memset(p_cid, 0, sizeof(*p_cid)); + + p_cid->opaque_fid = opaque_fid; + p_cid->cid = cid; + p_cid->vf_qid = vf_qid; + p_cid->rel = *p_params; + + /* Don't try calculating the absolute indices for VFs */ + if (IS_VF(p_hwfn->cdev)) { + p_cid->abs = p_cid->rel; + goto out; + } + + /* Calculate the engine-absolute indices of the resources. + * This would guarantee they're valid later on. + * In some cases [SBs] we already have the right values. + */ + rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); + if (rc) + goto fail; + + rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); + if (rc) + goto fail; + + /* In case of a PF configuring its VF's queues, the stats-id is already + * absolute [since there's a single index that's suitable per-VF]. + */ + if (b_is_same) { + rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, + &p_cid->abs.stats_id); + if (rc) + goto fail; + } else { + p_cid->abs.stats_id = p_cid->rel.stats_id; + } + + /* SBs relevant information was already provided as absolute */ + p_cid->abs.sb = p_cid->rel.sb; + p_cid->abs.sb_idx = p_cid->rel.sb_idx; + + /* This is tricky - we're actually interested in whehter this is a PF + * entry meant for the VF. + */ + if (!b_is_same) + p_cid->is_vf = true; +out: + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n", + p_cid->opaque_fid, + p_cid->cid, + p_cid->rel.vport_id, + p_cid->abs.vport_id, + p_cid->rel.queue_id, + p_cid->abs.queue_id, + p_cid->rel.stats_id, + p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); + + return p_cid; + +fail: + vfree(p_cid); + return NULL; +} + +static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, struct + qed_queue_start_common_params + *p_params) +{ + struct qed_queue_cid *p_cid; + u32 cid = 0; + + /* Get a unique firmware CID for this queue, in case it's a PF. + * VF's don't need a CID as the queue configuration will be done + * by PF. + */ + if (IS_PF(p_hwfn->cdev)) { + if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return NULL; + } + } + + p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params); + if (!p_cid && IS_PF(p_hwfn->cdev)) + qed_cxt_release_cid(p_hwfn, cid); + + return p_cid; +} + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { @@ -496,61 +615,26 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, return 0; } -static int qed_sp_release_queue_cid( - struct qed_hwfn *p_hwfn, - struct qed_hw_cid_data *p_cid_data) -{ - if (!p_cid_data->b_cid_allocated) - return 0; - - qed_cxt_release_cid(p_hwfn, p_cid_data->cid); - - p_cid_data->b_cid_allocated = false; - - return 0; -} - -int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, bool b_use_zone_a_prod) +int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_rx_cid; - u16 abs_rx_q_id = 0; - u8 abs_vport_id = 0; int rc = -EINVAL; - /* Store information for the stop */ - p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; - p_rx_cid->cid = cid; - p_rx_cid->opaque_fid = opaque_fid; - p_rx_cid->vport_id = p_params->vport_id; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id); - if (rc) - return rc; - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, - cid, p_params->queue_id, p_params->vport_id, p_params->sb); + "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", + p_cid->opaque_fid, p_cid->cid, + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = cid; - init_data.opaque_fid = opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -561,11 +645,11 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->vport_id = abs_vport_id; - p_ramrod->stats_counter_id = stats_id; - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); + p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = 0; p_ramrod->complete_event_flg = 1; @@ -575,85 +659,85 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - if (p_params->vf_qid || b_use_zone_a_prod) { - p_ramrod->vf_rx_prod_index = p_params->vf_qid; + if (p_cid->is_vf) { + p_ramrod->vf_rx_prod_index = p_cid->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", - b_use_zone_a_prod ? " [legacy]" : "", - p_params->vf_qid); - p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod; + !!p_cid->b_legacy_vf ? " [legacy]" : "", + p_cid->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf; } return qed_spq_post(p_hwfn, p_ent, NULL); } static int -qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_queue_start_common_params *p_params, +qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void __iomem **pp_prod) { - struct qed_hw_cid_data *p_rx_cid; u32 init_prod_val = 0; - u16 abs_l2_queue = 0; - u8 abs_stats_id = 0; - int rc; - if (IS_VF(p_hwfn->cdev)) { - return qed_vf_pf_rxq_start(p_hwfn, - p_params->queue_id, - p_params->sb, - (u8)p_params->sb_idx, - bd_max_bytes, - bd_chain_phys_addr, - cqe_pbl_addr, cqe_pbl_size, pp_prod); - } - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue); - if (rc) - return rc; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); - if (rc) - return rc; - - *pp_prod = (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); + *pp_prod = p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); + return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size); +} + +static int +qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct qed_rxq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + /* Allocate a CID for the queue */ - p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); - return rc; - } - p_rx_cid->b_cid_allocated = true; + p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + if (!p_cid) + return -ENOMEM; - rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, - opaque_fid, - p_rx_cid->cid, - p_params, - abs_stats_id, + if (IS_PF(p_hwfn->cdev)) { + rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, + &p_ret_params->p_prod); + } else { + rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, bd_max_bytes, bd_chain_phys_addr, - cqe_pbl_addr, cqe_pbl_size, false); + cqe_pbl_addr, + cqe_pbl_size, &p_ret_params->p_prod); + } + /* Provide the caller with a reference to as handler */ if (rc) - qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; return rc; } int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, + void **pp_rxq_handles, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg, @@ -663,8 +747,7 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, struct rx_queue_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_rx_cid; - u16 qid, abs_rx_q_id = 0; + struct qed_queue_cid *p_cid; int rc = -EINVAL; u8 i; @@ -673,12 +756,11 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, init_data.p_comp_data = p_comp_data; for (i = 0; i < num_rxqs; i++) { - qid = rx_queue_id + i; - p_rx_cid = &p_hwfn->p_rx_cids[qid]; + p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; /* Get SPQ entry */ - init_data.cid = p_rx_cid->cid; - init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_QUEUE_UPDATE, @@ -687,10 +769,9 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.rx_queue_update; + p_ramrod->vport_id = p_cid->abs.vport_id; - qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); - qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = complete_cqe_flg; p_ramrod->complete_event_flg = complete_event_flg; @@ -702,24 +783,19 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, return rc; } -int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, bool cqe_completion) +static int +qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + bool b_eq_completion_only, bool b_cqe_completion) { - struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - u16 abs_rx_q_id = 0; - int rc = -EINVAL; - - if (IS_VF(p_hwfn->cdev)) - return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion); + int rc; - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = p_rx_cid->cid; - init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -729,62 +805,53 @@ int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.rx_queue_stop; - - qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); - qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); /* Cleaning the queue requires the completion to arrive there. * In addition, VFs require the answer to come as eqe to PF. */ - p_ramrod->complete_cqe_flg = - (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && - !eq_completion_only) || cqe_completion; - p_ramrod->complete_event_flg = - !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || - eq_completion_only; + p_ramrod->complete_cqe_flg = (!p_cid->is_vf && + !b_eq_completion_only) || + b_cqe_completion; + p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only; - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; + int rc = -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, + eq_completion_only, + cqe_completion); + else + rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); - return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; } -int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params) +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - struct qed_hw_cid_data *p_tx_cid; - u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u8 abs_vport_id; - - /* Store information for the stop */ - p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; - p_tx_cid->cid = cid; - p_tx_cid->opaque_fid = opaque_fid; - - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); - if (rc) - return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = cid; - init_data.opaque_fid = opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -794,96 +861,92 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.tx_queue_start; - p_ramrod->vport_id = abs_vport_id; + p_ramrod->vport_id = p_cid->abs.vport_id; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->stats_counter_id = stats_id; + p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); + p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; - p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); + p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); + p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); - pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); p_ramrod->qm_pq_id = cpu_to_le16(pq_id); return qed_spq_post(p_hwfn, p_ent, NULL); } static int -qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_queue_start_common_params *p_params, +qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u8 tc, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { - struct qed_hw_cid_data *p_tx_cid; union qed_qm_pq_params pq_params; - u8 abs_stats_id = 0; int rc; - if (IS_VF(p_hwfn->cdev)) { - return qed_vf_pf_txq_start(p_hwfn, - p_params->queue_id, - p_params->sb, - p_params->sb_idx, - pbl_addr, pbl_size, pp_doorbell); - } + memset(&pq_params, 0, sizeof(pq_params)); - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, + pbl_addr, pbl_size, + qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, + &pq_params)); if (rc) return rc; - p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; - memset(p_tx_cid, 0, sizeof(*p_tx_cid)); - memset(&pq_params, 0, sizeof(pq_params)); + /* Provide the caller with the necessary return values */ + *pp_doorbell = p_hwfn->doorbells + + qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); - /* Allocate a CID for the queue */ - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); - return rc; - } - p_tx_cid->b_cid_allocated = true; + return 0; +} - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, p_tx_cid->cid, - p_params->queue_id, p_params->vport_id, p_params->sb); - - rc = qed_sp_eth_txq_start_ramrod(p_hwfn, - opaque_fid, - p_tx_cid->cid, - p_params, - abs_stats_id, - pbl_addr, - pbl_size, - &pq_params); - - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); +static int +qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, + struct qed_txq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + + p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params); + if (!p_cid) + return -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + else + rc = qed_vf_pf_txq_start(p_hwfn, p_cid, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); if (rc) - qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; return rc; } -int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) +static int +qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { - struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; - - if (IS_VF(p_hwfn->cdev)) - return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id); + int rc; - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); - init_data.cid = p_tx_cid->cid; - init_data.opaque_fid = p_tx_cid->opaque_fid; + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -892,11 +955,22 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) if (rc) return rc; - rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc) - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; + int rc; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); + else + rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); - return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; } static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) @@ -1652,6 +1726,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, if (IS_PF(cdev)) { int max_vf_vlan_filters = 0; + int max_vf_mac_filters = 0; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) @@ -1665,11 +1740,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_queues = cdev->num_hwfns; } - if (IS_QED_SRIOV(cdev)) + if (IS_QED_SRIOV(cdev)) { max_vf_vlan_filters = cdev->p_iov_info->total_vfs * QED_ETH_VF_NUM_VLAN_FILTERS; - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - + max_vf_mac_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_MAC_FILTERS; + } + info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_VLAN) - max_vf_vlan_filters; + info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_MAC) - + max_vf_mac_filters; ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); @@ -1683,7 +1765,9 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, } qed_vf_get_num_vlan_filters(&cdev->hwfns[0], - &info->num_vlan_filters); + (u8 *)&info->num_vlan_filters); + qed_vf_get_num_mac_filters(&cdev->hwfns[0], + (u8 *)&info->num_mac_filters); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; @@ -1870,58 +1954,53 @@ static int qed_update_vport(struct qed_dev *cdev, } static int qed_start_rxq(struct qed_dev *cdev, - struct qed_queue_start_common_params *params, + u8 rss_num, + struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, - void __iomem **pp_prod) + struct qed_rxq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = params->rss_id % cdev->num_hwfns; + hwfn_index = rss_num % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; - /* Fix queue ID in 100g mode */ - params->queue_id /= cdev->num_hwfns; - - rc = qed_sp_eth_rx_queue_start(p_hwfn, - p_hwfn->hw_info.opaque_fid, - params, - bd_max_bytes, - bd_chain_phys_addr, - cqe_pbl_addr, - cqe_pbl_size, - pp_prod); + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; + rc = qed_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, ret_params); if (rc) { - DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); + DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); return rc; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", - params->queue_id, params->rss_id, params->vport_id, - params->sb); + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + p_params->queue_id, rss_num, p_params->vport_id, + p_params->sb); return 0; } -static int qed_stop_rxq(struct qed_dev *cdev, - struct qed_stop_rxq_params *params) +static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) { int rc, hwfn_index; struct qed_hwfn *p_hwfn; - hwfn_index = params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; - rc = qed_sp_eth_rx_queue_stop(p_hwfn, - params->rx_queue_id / cdev->num_hwfns, - params->eq_completion_only, false); + rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); if (rc) { - DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); + DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); return rc; } @@ -1929,26 +2008,24 @@ static int qed_stop_rxq(struct qed_dev *cdev, } static int qed_start_txq(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *p_params, dma_addr_t pbl_addr, u16 pbl_size, - void __iomem **pp_doorbell) + struct qed_txq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = p_params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; - - /* Fix queue ID in 100g mode */ - p_params->queue_id /= cdev->num_hwfns; + hwfn_index = rss_num % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; - rc = qed_sp_eth_tx_queue_start(p_hwfn, - p_hwfn->hw_info.opaque_fid, - p_params, - pbl_addr, - pbl_size, - pp_doorbell); + rc = qed_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, 0, + pbl_addr, pbl_size, ret_params); if (rc) { DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); @@ -1956,8 +2033,8 @@ static int qed_start_txq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", - p_params->queue_id, p_params->rss_id, p_params->vport_id, + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + p_params->queue_id, rss_num, p_params->vport_id, p_params->sb); return 0; @@ -1971,19 +2048,17 @@ static int qed_fastpath_stop(struct qed_dev *cdev) return 0; } -static int qed_stop_txq(struct qed_dev *cdev, - struct qed_stop_txq_params *params) +static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; - hwfn_index = params->rss_id % cdev->num_hwfns; - p_hwfn = &cdev->hwfns[hwfn_index]; + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; - rc = qed_sp_eth_tx_queue_stop(p_hwfn, - params->tx_queue_id / cdev->num_hwfns); + rc = qed_eth_tx_queue_stop(p_hwfn, handle); if (rc) { - DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); + DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index e495d62fcc03..48c9bfc28140 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -78,11 +78,34 @@ struct qed_filter_mcast { unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; }; -int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, bool cqe_completion); +/** + * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * + * @param p_hwfn + * @param p_rxq Handler of queue to close + * @param eq_completion_only If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @param cqe_completion If True completion will be + * receive on CQe. + * @return int + */ +int +qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion); -int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id); +/** + * @brief qed_eth_tx_queue_stop - closes a Tx queue + * + * @param p_hwfn + * @param p_txq - handle to Tx queue needed to be closed + * + * @return int + */ +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); enum qed_tpa_mode { QED_TPA_MODE_NONE, @@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, * @note At the moment - only used by non-linux VFs. * * @param p_hwfn - * @param rx_queue_id RX Queue ID - * @param num_rxqs Allow to update multiple rx - * queues, from rx_queue_id to - * (rx_queue_id + num_rxqs) + * @param pp_rxq_handlers An array of queue handlers to be updated. + * @param num_rxqs number of queues to update. * @param complete_cqe_flg Post completion to the CQE Ring if set * @param complete_event_flg Post completion to the Event Ring if set + * @param comp_mode + * @param p_comp_data * * @return int */ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, + void **pp_rxq_handlers, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg, @@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); -int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_start_params *p_params); +void qed_reset_vport_stats(struct qed_dev *cdev); + +struct qed_queue_cid { + /* 'Relative' is a relative term ;-). Usually the indices [not counting + * SBs] would be PF-relative, but there are some cases where that isn't + * the case - specifically for a PF configuring its VF indices it's + * possible some fields [E.g., stats-id] in 'rel' would already be abs. + */ + struct qed_queue_start_common_params rel; + struct qed_queue_start_common_params abs; + u32 cid; + u16 opaque_fid; + + /* VFs queues are mapped differently, so we need to know the + * relative queue associated with them [0-based]. + * Notice this is relevant on the *PF* queue-cid of its VF's queues, + * and not on the VF itself. + */ + bool is_vf; + u8 vf_qid; + + /* Legacy VFs might have Rx producer located elsewhere */ + bool b_legacy_vf; +}; -int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, bool b_use_zone_a_prod); - -int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params); +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid); + +struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u8 vf_qid, + struct qed_queue_start_common_params + *p_params); + +int +qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +/** + * @brief - Starts an Rx queue, when queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param bd_max_bytes + * @param bd_chain_phys_addr + * @param cqe_pbl_addr + * @param cqe_pbl_size + * + * @return int + */ +int +qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +/** + * @brief - Starts a Tx queue, where queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param pbl_addr + * @param pbl_size + * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * + * @return int + */ +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id); u8 qed_mcast_bin_from_mac(u8 *mac); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index f95385cbbd40..8e5cb7605b0f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -36,6 +36,7 @@ #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_roce.h" @@ -296,25 +297,34 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - p_tx->cur_completing_packet = *p_pkt; - p_tx->cur_completing_bd_idx = 1; - b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; - tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->gsi_enable) - qed_ll2b_release_tx_gsi_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer; + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + p_tx->cur_completing_packet = *p_pkt; + p_tx->cur_completing_bd_idx = 1; + b_last_frag = + p_tx->cur_completing_bd_idx == p_pkt->bd_used; + tx_frag = p_pkt->bds_set[0].tx_frag; + if (p_ll2_conn->gsi_enable) + qed_ll2b_release_tx_gsi_packet(p_hwfn, + p_ll2_conn-> + my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + else + qed_ll2b_complete_tx_packet(p_hwfn, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + } } } @@ -540,12 +550,457 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - rx_buf_addr = p_pkt->rx_buf_addr; - cookie = p_pkt->cookie; + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + struct qed_ooo_buffer *p_buffer; + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + rx_buf_addr = p_pkt->rx_buf_addr; + cookie = p_pkt->cookie; + + b_last = list_empty(&p_rx->active_descq); + } + } +} + +#if IS_ENABLED(CONFIG_QED_ISCSI) +static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) +{ + u8 bd_flags = 0; + + if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); + + return bd_flags; +} + +static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + u16 packet_length = 0, parse_flags = 0, vlan = 0; + struct qed_ll2_rx_packet *p_pkt = NULL; + u32 num_ooo_add_to_peninsula = 0, cid; + union core_rx_cqe_union *cqe = NULL; + u16 cq_new_idx = 0, cq_old_idx = 0; + struct qed_ooo_buffer *p_buffer; + struct ooo_opaque *iscsi_ooo; + u8 placement_offset = 0; + u8 cqe_type; + + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + if (cq_new_idx == cq_old_idx) + return 0; + + while (cq_new_idx != cq_old_idx) { + struct core_rx_fast_path_cqe *p_cqe_fp; + + cqe = qed_chain_consume(&p_rx->rcq_chain); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + cqe_type = cqe->rx_cqe_sp.type; + + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + DP_NOTICE(p_hwfn, + "Got a non-regular LB LL2 completion [type 0x%02x]\n", + cqe_type); + return -EINVAL; + } + p_cqe_fp = &cqe->rx_cqe_fp; + + placement_offset = p_cqe_fp->placement_offset; + parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); + packet_length = le16_to_cpu(p_cqe_fp->packet_length); + vlan = le16_to_cpu(p_cqe_fp->vlan); + iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, + iscsi_ooo); + cid = le32_to_cpu(iscsi_ooo->cid); + + /* Process delete isle first */ + if (iscsi_ooo->drop_size) + qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, + iscsi_ooo->drop_isle, + iscsi_ooo->drop_size); + + if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + continue; + + /* Now process create/add/join isles */ + if (list_empty(&p_rx->active_descq)) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX chain has no submitted buffers\n" + ); + return -EIO; + } + + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + + if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || + (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { + if (!p_pkt) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX packet is not valid\n"); + return -EIO; + } + list_del(&p_pkt->list_entry); + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + p_buffer->packet_length = packet_length; + p_buffer->parse_flags = parse_flags; + p_buffer->vlan = vlan; + p_buffer->placement_offset = placement_offset; + qed_chain_consume(&p_rx->rxq_chain); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + + switch (iscsi_ooo->ooo_opcode) { + case TCP_EVENT_ADD_NEW_ISLE: + qed_ooo_add_new_isle(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer); + break; + case TCP_EVENT_ADD_ISLE_RIGHT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_RIGHT_BUF); + break; + case TCP_EVENT_ADD_ISLE_LEFT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle, + p_buffer, + QED_OOO_LEFT_BUF); + break; + case TCP_EVENT_JOIN: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + iscsi_ooo->ooo_isle + + 1, + p_buffer, + QED_OOO_LEFT_BUF); + qed_ooo_join_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid, iscsi_ooo->ooo_isle); + break; + case TCP_EVENT_ADD_PEN: + num_ooo_add_to_peninsula++; + qed_ooo_put_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info, + p_buffer, true); + break; + } + } else { + DP_NOTICE(p_hwfn, + "Unexpected event (%d) TX OOO completion\n", + iscsi_ooo->ooo_opcode); + } + } + + return 0; +} + +static void +qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + u16 l4_hdr_offset_w; + dma_addr_t first_frag; + u16 parse_flags; + u8 bd_flags; + + /* Submit Tx buffers here */ + while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + l4_hdr_offset_w = 0; + bd_flags = 0; + + first_frag = p_buffer->rx_buffer_phys_addr + + p_buffer->placement_offset; + parse_flags = p_buffer->parse_flags; + bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); + SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, + p_buffer->vlan, bd_flags, + l4_hdr_offset_w, + p_ll2_conn->tx_dest, 0, + first_frag, + p_buffer->packet_length, + p_buffer, true); + if (rc) { + qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer, false); + break; + } + } +} + +static void +qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + rc = qed_ll2_post_rx_buffer(p_hwfn, + p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, + 0, p_buffer, true); + if (rc) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + break; + } + } +} + +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + int rc; + + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); + if (rc) + return rc; + + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_ll2_tx_packet *p_pkt = NULL; + struct qed_ooo_buffer *p_buffer; + bool b_dont_submit_rx = false; + u16 new_idx = 0, num_bds = 0; + int rc; + + new_idx = le16_to_cpu(*p_tx->p_fw_cons); + num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); + + if (!num_bds) + return 0; + + while (num_bds) { + if (list_empty(&p_tx->active_descq)) + return -EINVAL; + + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + return -EINVAL; + + if (p_pkt->bd_used != 1) { + DP_NOTICE(p_hwfn, + "Unexpectedly many BDs(%d) in TX OOO completion\n", + p_pkt->bd_used); + return -EINVAL; + } + + list_del(&p_pkt->list_entry); + + num_bds--; + p_tx->bds_idx++; + qed_chain_consume(&p_tx->txq_chain); + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + + if (b_dont_submit_rx) { + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + continue; + } + + rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, 0, + p_buffer, true); + if (rc != 0) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + b_dont_submit_rx = true; + } + } + + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return rc; + + if (!rx_num_ooo_buffers) + return -EINVAL; + + for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: + return rc; +} + +static void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); +} + +static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + + if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } +} + +static void qed_ll2_stop_ooo(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n", + *handle); + + qed_ll2_terminate_connection(hwfn, *handle); + qed_ll2_release_connection(hwfn, *handle); + *handle = QED_LL2_UNUSED_HANDLE; +} + +static int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_info *ll2_info; + int rc; + + ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); + if (!ll2_info) + return -ENOMEM; + ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; + ll2_info->mtu = params->mtu; + ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; + ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; + ll2_info->tx_tc = OOO_LB_TC; + ll2_info->tx_dest = CORE_TX_DEST_LB; + + rc = qed_ll2_acquire_connection(hwfn, ll2_info, + QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, + handle); + kfree(ll2_info); + if (rc) { + DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); + goto out; + } - b_last = list_empty(&p_rx->active_descq); + rc = qed_ll2_establish_connection(hwfn, *handle); + if (rc) { + DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); + goto fail; } + + return 0; + +fail: + qed_ll2_release_connection(hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; } +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, + void *p_cookie) { return -EINVAL; } +static inline int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, + u16 rx_num_ooo_buffers, u16 mtu) { return 0; } +static inline void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void +qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) { return; } +static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; } +static inline int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) + { return -EINVAL; } +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, @@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = 1; + p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 + : 1; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { @@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + p_ll2_conn->tx_stats_en = 0; + else + p_ll2_conn->tx_stats_en = 1; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; @@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); - p_ll2_conn->tx_stats_en = 1; p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; @@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, if (rc) goto q_allocate_fail; + rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, + rx_num_desc * 2, p_params->mtu); + if (rc) + goto q_allocate_fail; + /* Register callbacks for the Rx/Tx queues */ - comp_rx_cb = qed_ll2_rxq_completion; - comp_tx_cb = qed_ll2_txq_completion; + if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + comp_rx_cb = qed_ll2_lb_rxq_completion; + comp_tx_cb = qed_ll2_lb_txq_completion; + } else { + comp_rx_cb = qed_ll2_rxq_completion; + comp_tx_cb = qed_ll2_txq_completion; + } if (rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, @@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1); + qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + return rc; } @@ -1213,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u16 vlan, u8 bd_flags, u16 l4_hdr_offset_w, + enum qed_ll2_tx_dest e_tx_dest, enum qed_ll2_roce_flavor_type qed_roce_flavor, dma_addr_t first_frag, u16 first_frag_len, void *cookie, u8 notify_fw) @@ -1222,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, enum core_roce_flavor_type roce_flavor; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; + enum core_tx_dest tx_dest; unsigned long flags; int rc = 0; @@ -1252,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, goto out; } + tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW : + CORE_TX_DEST_LB; if (qed_roce_flavor == QED_LL2_ROCE) { roce_flavor = CORE_ROCE; } else if (qed_roce_flavor == QED_LL2_RROCE) { @@ -1266,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, num_of_bds, first_frag, first_frag_len, cookie, notify_fw); qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, - num_of_bds, CORE_TX_DEST_NW, + num_of_bds, tx_dest, vlan, bd_flags, l4_hdr_offset_w, roce_flavor, first_frag, first_frag_len); @@ -1341,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } + if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + return rc; } @@ -1371,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); + qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); + mutex_lock(&p_ll2_conn->mutex); p_ll2_conn->b_active = false; mutex_unlock(&p_ll2_conn->mutex); @@ -1517,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) enum qed_ll2_conn_type conn_type; struct qed_ptt *p_ptt; int rc, i; + u8 gsi_enable = 1; /* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); @@ -1548,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; + gsi_enable = 0; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@ -1564,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; ll2_info.tx_tc = 0; ll2_info.tx_dest = CORE_TX_DEST_NW; - ll2_info.gsi_enable = 1; + ll2_info.gsi_enable = gsi_enable; rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, @@ -1611,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) goto release_terminate; } + if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); + rc = qed_ll2_start_ooo(cdev, params); + if (rc) { + DP_INFO(cdev, + "Failed to initialize the OOO LL2 queue\n"); + goto release_terminate; + } + } + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (!p_ptt) { DP_INFO(cdev, "Failed to acquire PTT\n"); @@ -1660,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev) qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); eth_zero_addr(cdev->ll2_mac_address); + if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && + cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) + qed_ll2_stop_ooo(cdev); + rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); if (rc) @@ -1714,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), cdev->ll2->handle, 1 + skb_shinfo(skb)->nr_frags, - vlan, flags, 0, 0 /* RoCE FLAVOR */, + vlan, flags, 0, QED_LL2_TX_DEST_NW, + 0 /* RoCE FLAVOR */, mapping, skb->len, skb, 1); if (rc) goto err; @@ -1730,6 +2229,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; goto err; } } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 4e3d62a16cab..6625a3ae5a33 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -41,6 +41,12 @@ enum qed_ll2_conn_type { MAX_QED_LL2_RX_CONN_TYPE }; +enum qed_ll2_tx_dest { + QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ + QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_MAX +}; + struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, * @param l4_hdr_offset_w L4 Header Offset from start of packet * (in words). This is needed if both l4_csum * and ipv6_ext are set + * @param e_tx_dest indicates if the packet is to be transmitted via + * loopback or to the network * @param first_frag * @param first_frag_len * @param cookie @@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u16 vlan, u8 bd_flags, u16 l4_hdr_offset_w, + enum qed_ll2_tx_dest e_tx_dest, enum qed_ll2_roce_flavor_type qed_roce_flavor, dma_addr_t first_frag, u16 first_frag_len, void *cookie, u8 notify_fw); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 333c7442e48a..aeb98d8c5626 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -221,6 +221,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->mf_mode = cdev->mf_mode; dev_info->tx_switching = true; + + if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == + QED_WOL_SUPPORT_PME) + dev_info->wol_support = true; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, @@ -243,6 +247,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, &dev_info->mfw_rev, NULL); } + dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; + return 0; } @@ -1430,11 +1436,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +static int qed_update_wol(struct qed_dev *cdev, bool enabled) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED + : QED_OV_WOL_DISABLED); + if (rc) + goto out; + rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return rc; +} + +static int qed_update_drv_state(struct qed_dev *cdev, bool active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? + QED_OV_DRIVER_STATE_ACTIVE : + QED_OV_DRIVER_STATE_DISABLED); + + qed_ptt_release(hwfn, ptt); + + return status; +} + +static int qed_update_mac(struct qed_dev *cdev, u8 *mac) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mac(hwfn, ptt, mac); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + +static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, .selftest_register = &qed_selftest_register, .selftest_clock = &qed_selftest_clock, + .selftest_nvram = &qed_selftest_nvram, }; const struct qed_common_ops qed_common_ops_pass = { @@ -1464,6 +1565,10 @@ const struct qed_common_ops qed_common_ops_pass = { .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, + .update_drv_state = &qed_update_drv_state, + .update_mac = &qed_update_mac, + .update_mtu = &qed_update_mtu, + .update_wol = &qed_update_wol, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index bdc9ba92f6d4..6dd3ce443484 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/etherdevice.h> #include "qed.h" #include "qed_dcbx.h" #include "qed_hsi.h" @@ -329,6 +330,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_mcp_mb_params *p_mb_params) { u32 union_data_addr; + int rc; /* MCP not initialized */ @@ -374,11 +376,32 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param) { struct qed_mcp_mb_params mb_params; + union drv_union_data data_src; int rc; memset(&mb_params, 0, sizeof(mb_params)); + memset(&data_src, 0, sizeof(data_src)); mb_params.cmd = cmd; mb_params.param = param; + + /* In case of UNLOAD_DONE, set the primary MAC */ + if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) && + (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) { + u8 *p_mac = p_hwfn->cdev->wol_mac; + + data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; + data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | + p_mac[4] << 8 | p_mac[5]; + + DP_VERBOSE(p_hwfn, + (QED_MSG_SP | NETIF_MSG_IFDOWN), + "Setting WoL MAC: %pM --> [%08x,%08x]\n", + p_mac, data_src.wol_mac.mac_upper, + data_src.wol_mac.mac_lower); + + mb_params.p_data_src = &data_src; + } + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; @@ -1001,28 +1024,89 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) return 0; } +/* Old MFW has a global configuration for all PFs regarding RDMA support */ +static void +qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, + enum qed_pci_personality *p_proto) +{ + /* There wasn't ever a legacy MFW that published iwarp. + * So at this point, this is either plain l2 or RoCE. + */ + if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) + *p_proto = QED_PCI_ETH_ROCE; + else + *p_proto = QED_PCI_ETH; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "According to Legacy capabilities, L2 personality is %08x\n", + (u32) *p_proto); +} + +static int +qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality *p_proto) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp != FW_MSG_CODE_OK) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "MFW lacks support for command; Returns %08x\n", + resp); + return -EINVAL; + } + + switch (param) { + case FW_MB_PARAM_GET_PF_RDMA_NONE: + *p_proto = QED_PCI_ETH; + break; + case FW_MB_PARAM_GET_PF_RDMA_ROCE: + *p_proto = QED_PCI_ETH_ROCE; + break; + case FW_MB_PARAM_GET_PF_RDMA_BOTH: + DP_NOTICE(p_hwfn, + "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n"); + *p_proto = QED_PCI_ETH_ROCE; + break; + case FW_MB_PARAM_GET_PF_RDMA_IWARP: + default: + DP_NOTICE(p_hwfn, + "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", + param); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", + (u32) *p_proto, resp, param); + return 0; +} + static int qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, struct public_func *p_info, + struct qed_ptt *p_ptt, enum qed_pci_personality *p_proto) { int rc = 0; switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - if (test_bit(QED_DEV_CAP_ROCE, - &p_hwfn->hw_info.device_capabilities)) - *p_proto = QED_PCI_ETH_ROCE; - else - *p_proto = QED_PCI_ETH; + if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); - rc = -EINVAL; - break; + /* Fallthrough */ default: rc = -EINVAL; } @@ -1042,7 +1126,8 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; - if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, + &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; @@ -1057,6 +1142,9 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->mac[3] = (u8)(shmem_info.mac_lower >> 16); info->mac[4] = (u8)(shmem_info.mac_lower >> 8); info->mac[5] = (u8)(shmem_info.mac_lower); + + /* Store primary MAC for later possible WoL */ + memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); } else { DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); } @@ -1068,13 +1156,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + info->mtu = (u16)shmem_info.mtu_size; + + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; + p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; + if (qed_mcp_is_init(p_hwfn)) { + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; + } + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), - "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n", + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, info->mac[0], info->mac[1], info->mac[2], info->mac[3], info->mac[4], info->mac[5], - info->wwn_port, info->wwn_node, info->ovlan); + info->wwn_port, info->wwn_node, + info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); return 0; } @@ -1223,6 +1328,178 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; } +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (client) { + case QED_OV_CLIENT_DRV: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; + break; + case QED_OV_CLIENT_USER: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; + break; + case QED_OV_CLIENT_VENDOR_SPEC: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; + break; + default: + DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (drv_state) { + case QED_OV_DRIVER_STATE_NOT_LOADED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; + break; + case QED_OV_DRIVER_STATE_DISABLED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; + break; + case QED_OV_DRIVER_STATE_ACTIVE: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; + break; + default: + DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send driver state\n"); + + return rc; +} + +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); + + return rc; +} + +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *mac) +{ + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VMAC; + mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << + DRV_MSG_CODE_VMAC_TYPE_SHIFT; + mb_params.param |= MCP_PF_ID(p_hwfn); + ether_addr_copy(&union_data.raw_data[0], mac); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); + + /* Store primary MAC for later possible WoL */ + memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); + + return rc; +} + +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_ov_wol wol) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Can't change WoL configuration when WoL isn't supported\n"); + return -EINVAL; + } + + switch (wol) { + case QED_OV_WOL_DEFAULT: + drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; + break; + case QED_OV_WOL_DISABLED: + drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; + break; + default: + DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); + + /* Store the WoL update for a future unload */ + p_hwfn->cdev->wol_config = (u8)wol; + + return rc; +} + +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (eswitch) { + case QED_OV_ESWITCH_NONE: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; + break; + case QED_OV_ESWITCH_VEB: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; + break; + case QED_OV_ESWITCH_VEPA: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; + break; + default: + DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); + + return rc; +} + int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode) { @@ -1271,6 +1548,52 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, return rc; } +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) +{ + u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + u32 resp = 0, resp_param = 0; + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, + addr + offset + + (bytes_to_copy << + DRV_MB_PARAM_NVM_LEN_SHIFT), + &resp, &resp_param, + &read_len, + (u32 *)(p_buf + offset)); + + if (rc || (resp != FW_MSG_CODE_NVM_OK)) { + DP_NOTICE(cdev, "MCP command rc = %d\n", rc); + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't preemptable. Sleep a bit to prevent CPU hogging. + */ + if (bytes_left % 0x1000 < + (bytes_left - read_len) % 0x1000) + usleep_range(1000, 2000); + + offset += read_len; + bytes_left -= read_len; + } + + cdev->mcp_nvm_resp = resp; + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; @@ -1312,3 +1635,101 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return rc; } + +int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images) +{ + u32 drv_mb_param = 0, rsp; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, num_images); + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + rc = -EINVAL; + + return rc; +} + +int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index) +{ + u32 buf_size = 0, param, resp = 0, resp_param = 0; + int rc; + + param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; + param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_BIST_TEST, param, + &resp, &resp_param, + &buf_size, + (u32 *)p_image_att); + if (rc) + return rc; + + if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (p_image_att->return_code != 1)) + rc = -EINVAL; + + return rc; +} + +#define QED_RESC_ALLOC_VERSION_MAJOR 1 +#define QED_RESC_ALLOC_VERSION_MINOR 0 +#define QED_RESC_ALLOC_VERSION \ + ((QED_RESC_ALLOC_VERSION_MAJOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ + (QED_RESC_ALLOC_VERSION_MINOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) +int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct resource_info *p_resc_info, + u32 *p_mcp_resp, u32 *p_mcp_param) +{ + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + memset(&union_data, 0, sizeof(union_data)); + mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + mb_params.param = QED_RESC_ALLOC_VERSION; + + /* Need to have a sufficient large struct, as the cmd_and_union + * is going to do memcpy from and to it. + */ + memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info)); + + mb_params.p_data_src = &union_data; + mb_params.p_data_dst = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + /* Copy the data back */ + memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info)); + *p_mcp_resp = mb_params.mcp_resp; + *p_mcp_param = mb_params.mcp_param; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n", + *p_mcp_param, + p_resc_info->res_id, + p_resc_info->size, + p_resc_info->offset, + p_resc_info->vf_size, + p_resc_info->vf_offset, p_resc_info->flags); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index dff520ed069b..407a2c1830fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -92,6 +92,8 @@ struct qed_mcp_function_info { #define QED_MCP_VLAN_UNSET (0xffff) u16 ovlan; + + u16 mtu; }; struct qed_mcp_nvm_common { @@ -147,6 +149,30 @@ union qed_mcp_protocol_stats { struct qed_mcp_rdma_stats rdma_stats; }; +enum qed_ov_eswitch { + QED_OV_ESWITCH_NONE, + QED_OV_ESWITCH_VEB, + QED_OV_ESWITCH_VEPA +}; + +enum qed_ov_client { + QED_OV_CLIENT_DRV, + QED_OV_CLIENT_USER, + QED_OV_CLIENT_VENDOR_SPEC +}; + +enum qed_ov_driver_state { + QED_OV_DRIVER_STATE_NOT_LOADED, + QED_OV_DRIVER_STATE_DISABLED, + QED_OV_DRIVER_STATE_ACTIVE +}; + +enum qed_ov_wol { + QED_OV_WOL_DEFAULT, + QED_OV_WOL_DISABLED, + QED_OV_WOL_ENABLED +}; + /** * @brief - returns the link params of the hw function * @@ -278,6 +304,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** + * @brief Notify MFW about the change in base device properties + * + * @param p_hwfn + * @param p_ptt + * @param client - qed client type + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client); + +/** + * @brief Notify MFW about the driver state + * + * @param p_hwfn + * @param p_ptt + * @param drv_state - Driver state + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state); + +/** + * @brief Send MTU size to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mtu - MTU size + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu); + +/** + * @brief Send MAC address to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mac - MAC address + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *mac); + +/** + * @brief Send WOL mode to MFW + * + * @param p_hwfn + * @param p_ptt + * @param wol - WOL mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_wol wol); + +/** * @brief Set LED status * * @param p_hwfn @@ -291,6 +380,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, enum qed_led_mode mode); /** + * @brief Read from nvm + * + * @param cdev + * @param addr - nvm offset + * @param p_buf - nvm read buffer + * @param len - buffer len + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); + +/** * @brief Bist register test * * @param p_hwfn - hw function @@ -312,6 +413,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Bist nvm test - get number of images + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param num_images - number of images if operation was + * successful. 0 if not. + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images); + +/** + * @brief Bist nvm test - get image attributes by index + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_image_att - Attributes of image + * @param image_index - Index of image to get information for + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -546,4 +676,32 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); +/** + * @brief Send eswitch mode to MFW + * + * @param p_hwfn + * @param p_ptt + * @param eswitch - eswitch mode + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch); + +/** + * @brief - Gets the MFW allocation info for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param p_resc_info - descriptor of requested resource + * @param p_mcp_resp + * @param p_mcp_param + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct resource_info *p_resc_info, + u32 *p_mcp_resp, u32 *p_mcp_param); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c new file mode 100644 index 000000000000..155abcb507fd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -0,0 +1,501 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_ooo.h" + +static struct qed_ooo_archipelago +*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn, + struct qed_ooo_info + *p_ooo_info, + u32 cid) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + + list_for_each_entry(p_archipelago, + &p_ooo_info->archipelagos_list, list_entry) { + if (p_archipelago->cid == cid) + return p_archipelago; + } + + return NULL; +} + +static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 isle) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_isle = NULL; + u8 the_num_of_isle = 1; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return NULL; + } + + list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) { + if (the_num_of_isle == isle) + return p_isle; + the_num_of_isle++; + } + + return NULL; +} + +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) +{ + struct qed_ooo_history *p_history = &p_ooo_info->ooo_history; + + if (p_history->head_idx == p_history->num_of_cqes) + p_history->head_idx = 0; + p_history->p_cqes[p_history->head_idx] = *p_cqe; + p_history->head_idx++; +} + +struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ooo_info *p_ooo_info; + u16 max_num_archipelagos = 0; + u16 max_num_isles = 0; + u32 i; + + if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown personality\n"); + return NULL; + } + + max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; + max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; + + if (!max_num_archipelagos) { + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown amount of connections\n"); + return NULL; + } + + p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); + if (!p_ooo_info) + return NULL; + + INIT_LIST_HEAD(&p_ooo_info->free_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->free_isles_list); + INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list); + INIT_LIST_HEAD(&p_ooo_info->archipelagos_list); + + p_ooo_info->p_isles_mem = kcalloc(max_num_isles, + sizeof(struct qed_ooo_isle), + GFP_KERNEL); + if (!p_ooo_info->p_isles_mem) + goto no_isles_mem; + + for (i = 0; i < max_num_isles; i++) { + INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list); + list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry, + &p_ooo_info->free_isles_list); + } + + p_ooo_info->p_archipelagos_mem = + kcalloc(max_num_archipelagos, + sizeof(struct qed_ooo_archipelago), + GFP_KERNEL); + if (!p_ooo_info->p_archipelagos_mem) + goto no_archipelagos_mem; + + for (i = 0; i < max_num_archipelagos; i++) { + INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list); + list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry, + &p_ooo_info->free_archipelagos_list); + } + + p_ooo_info->ooo_history.p_cqes = + kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES, + sizeof(struct ooo_opaque), + GFP_KERNEL); + if (!p_ooo_info->ooo_history.p_cqes) + goto no_history_mem; + + return p_ooo_info; + +no_history_mem: + kfree(p_ooo_info->p_archipelagos_mem); +no_archipelagos_mem: + kfree(p_ooo_info->p_isles_mem); +no_isles_mem: + kfree(p_ooo_info); + return NULL; +} + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid) +{ + struct qed_ooo_archipelago *p_archipelago; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + bool b_found = false; + + if (list_empty(&p_ooo_info->archipelagos_list)) + return; + + list_for_each_entry(p_archipelago, + &p_ooo_info->archipelagos_list, list_entry) { + if (p_archipelago->cid == cid) { + list_del(&p_archipelago->list_entry); + b_found = true; + break; + } + } + + if (!b_found) + return; + + while (!list_empty(&p_archipelago->isles_list)) { + p_isle = list_first_entry(&p_archipelago->isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } + + list_add_tail(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); +} + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_archipelago *p_arch; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + + while (!list_empty(&p_ooo_info->archipelagos_list)) { + p_arch = list_first_entry(&p_ooo_info->archipelagos_list, + struct qed_ooo_archipelago, + list_entry); + + list_del(&p_arch->list_entry); + + while (!list_empty(&p_arch->isles_list)) { + p_isle = list_first_entry(&p_arch->isles_list, + struct qed_ooo_isle, + list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = + list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } + list_add_tail(&p_arch->list_entry, + &p_ooo_info->free_archipelagos_list); + } + if (!list_empty(&p_ooo_info->ready_buffers_list)) + list_splice_tail_init(&p_ooo_info->ready_buffers_list, + &p_ooo_info->free_buffers_list); +} + +void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +{ + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); + memset(p_ooo_info->ooo_history.p_cqes, 0, + p_ooo_info->ooo_history.num_of_cqes * + sizeof(struct ooo_opaque)); + p_ooo_info->ooo_history.head_idx = 0; +} + +void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer; + + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); + while (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } + + kfree(p_ooo_info->p_isles_mem); + kfree(p_ooo_info->p_archipelagos_mem); + kfree(p_ooo_info->ooo_history.p_cqes); + kfree(p_ooo_info); +} + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) +{ + list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail) +{ + if (on_tail) + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); + else + list_add(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->ready_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_isle = NULL; + u8 isle_idx; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); + if (!p_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + drop_isle, cid); + return; + } + if (list_empty(&p_isle->buffers_list)) + DP_NOTICE(p_hwfn, + "Isle %d is empty(cid %d)\n", drop_isle, cid); + else + list_splice_tail_init(&p_isle->buffers_list, + &p_ooo_info->free_buffers_list); + + list_del(&p_isle->list_entry); + p_ooo_info->cur_isles_number--; + list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list); + } + + if (list_empty(&p_archipelago->isles_list)) { + list_del(&p_archipelago->list_entry); + list_add(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); + } +} + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_prev_isle = NULL; + struct qed_ooo_isle *p_isle = NULL; + + if (ooo_isle > 1) { + p_prev_isle = qed_ooo_seek_isle(p_hwfn, + p_ooo_info, cid, ooo_isle - 1); + if (!p_prev_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + ooo_isle - 1, cid); + return; + } + } + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago && (ooo_isle != 1)) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return; + } + + if (!list_empty(&p_ooo_info->free_isles_list)) { + p_isle = list_first_entry(&p_ooo_info->free_isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + if (!list_empty(&p_isle->buffers_list)) { + DP_NOTICE(p_hwfn, "Free isle is not empty\n"); + INIT_LIST_HEAD(&p_isle->buffers_list); + } + } else { + DP_NOTICE(p_hwfn, "No more free isles\n"); + return; + } + + if (!p_archipelago && + !list_empty(&p_ooo_info->free_archipelagos_list)) { + p_archipelago = + list_first_entry(&p_ooo_info->free_archipelagos_list, + struct qed_ooo_archipelago, list_entry); + + list_del(&p_archipelago->list_entry); + if (!list_empty(&p_archipelago->isles_list)) { + DP_NOTICE(p_hwfn, + "Free OOO connection is not empty\n"); + INIT_LIST_HEAD(&p_archipelago->isles_list); + } + p_archipelago->cid = cid; + list_add(&p_archipelago->list_entry, + &p_ooo_info->archipelagos_list); + } else if (!p_archipelago) { + DP_NOTICE(p_hwfn, "No more free OOO connections\n"); + list_add(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + list_add(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + return; + } + + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + p_ooo_info->cur_isles_number++; + p_ooo_info->gen_isles_number++; + + if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number) + p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number; + + if (!p_prev_isle) + list_add(&p_isle->list_entry, &p_archipelago->isles_list); + else + list_add(&p_isle->list_entry, &p_prev_isle->list_entry); +} + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side) +{ + struct qed_ooo_isle *p_isle = NULL; + + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); + if (!p_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", ooo_isle, cid); + return; + } + + if (buffer_side == QED_OOO_LEFT_BUF) + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + else + list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); +} + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_right_isle = NULL; + struct qed_ooo_isle *p_left_isle = NULL; + + p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle + 1); + if (!p_right_isle) { + DP_NOTICE(p_hwfn, + "Right isle %d is not found(cid %d)\n", + left_isle + 1, cid); + return; + } + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + list_del(&p_right_isle->list_entry); + p_ooo_info->cur_isles_number--; + if (left_isle) { + p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle); + if (!p_left_isle) { + DP_NOTICE(p_hwfn, + "Left isle %d is not found(cid %d)\n", + left_isle, cid); + return; + } + list_splice_tail_init(&p_right_isle->buffers_list, + &p_left_isle->buffers_list); + } else { + list_splice_tail_init(&p_right_isle->buffers_list, + &p_ooo_info->ready_buffers_list); + if (list_empty(&p_archipelago->isles_list)) { + list_del(&p_archipelago->list_entry); + list_add(&p_archipelago->list_entry, + &p_ooo_info->free_archipelagos_list); + } + } + list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h new file mode 100644 index 000000000000..7a0670a9a074 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -0,0 +1,176 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_OOO_H +#define _QED_OOO_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include "qed.h" + +#define QED_MAX_NUM_ISLES 256 +#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512 + +#define QED_OOO_LEFT_BUF 0 +#define QED_OOO_RIGHT_BUF 1 + +struct qed_ooo_buffer { + struct list_head list_entry; + void *rx_buffer_virt_addr; + dma_addr_t rx_buffer_phys_addr; + u32 rx_buffer_size; + u16 packet_length; + u16 parse_flags; + u16 vlan; + u8 placement_offset; +}; + +struct qed_ooo_isle { + struct list_head list_entry; + struct list_head buffers_list; +}; + +struct qed_ooo_archipelago { + struct list_head list_entry; + struct list_head isles_list; + u32 cid; +}; + +struct qed_ooo_history { + struct ooo_opaque *p_cqes; + u32 head_idx; + u32 num_of_cqes; +}; + +struct qed_ooo_info { + struct list_head free_buffers_list; + struct list_head ready_buffers_list; + struct list_head free_isles_list; + struct list_head free_archipelagos_list; + struct list_head archipelagos_list; + struct qed_ooo_archipelago *p_archipelagos_mem; + struct qed_ooo_isle *p_isles_mem; + struct qed_ooo_history ooo_history; + u32 cur_isles_number; + u32 max_isles_number; + u32 gen_isles_number; +}; + +#if IS_ENABLED(CONFIG_QED_ISCSI) +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe); + +struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn); + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid); + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); + +void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer); + +struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail); + +struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size); + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, struct qed_ooo_buffer *p_buffer); + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side); + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) {} + +static inline struct qed_ooo_info *qed_ooo_alloc( + struct qed_hwfn *p_hwfn) { return NULL; } + +static inline void +qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid) {} + +static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) + {} + +static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) {} + +static inline void qed_ooo_free(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) {} + +static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, + u8 on_tail) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) {} + +static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) {} + +static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, + u8 buffer_side) {} + +static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index b414a0542177..97544205a8c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -82,6 +82,8 @@ 0x1c80000UL #define BAR0_MAP_REG_XSDM_RAM \ 0x1e00000UL +#define BAR0_MAP_REG_YSDM_RAM \ + 0x1e80000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL #define PRS_REG_SEARCH_TCP \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f3a825a8f8d5..2a16547c8966 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2658,7 +2658,6 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); return -ENOMEM; } - memset(roce_ll2, 0, sizeof(*roce_ll2)); roce_ll2->handle = QED_LL2_UNUSED_HANDLE; roce_ll2->cbs = params->cbs; roce_ll2->cb_cookie = params->cb_cookie; @@ -2772,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, /* Tx header */ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, 1 + pkt->n_seg, 0, flags, 0, + QED_LL2_TX_DEST_NW, qed_roce_flavor, pkt->header.baddr, pkt->header.len, pkt, 1); if (rc) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 9b7678f26909..48bfaecaf6dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,3 +1,4 @@ +#include <linux/crc32.h> #include "qed.h" #include "qed_dev_api.h" #include "qed_mcp.h" @@ -75,3 +76,103 @@ int qed_selftest_clock(struct qed_dev *cdev) return rc; } + +int qed_selftest_nvram(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 num_images, i, j, nvm_crc, calc_crc; + struct bist_nvm_image_att image_att; + u8 *buf = NULL; + __be32 val; + int rc; + + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + + /* Acquire from MFW the amount of available images */ + rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) { + DP_ERR(p_hwfn, "Failed getting number of images\n"); + return -EINVAL; + } + + /* Iterate over images and validate CRC */ + for (i = 0; i < num_images; i++) { + /* This mailbox returns information about the image required for + * reading it. + */ + rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, + &image_att, i); + if (rc) { + DP_ERR(p_hwfn, + "Failed getting image index %d attributes\n", + i); + goto err0; + } + + /* After MFW crash dump is collected - the image's CRC stops + * being valid. + */ + if (image_att.image_type == NVM_TYPE_MDUMP) + continue; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", + i, image_att.len); + + /* Allocate a buffer for holding the nvram image */ + buf = kzalloc(image_att.len, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto err0; + } + + /* Read image into buffer */ + rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr, + buf, image_att.len); + if (rc) { + DP_ERR(p_hwfn, + "Failed reading image index %d from nvm.\n", i); + goto err1; + } + + /* Convert the buffer into big-endian format (excluding the + * closing 4 bytes of CRC). + */ + for (j = 0; j < image_att.len - 4; j += 4) { + val = cpu_to_be32(*(u32 *)&buf[j]); + *(u32 *)&buf[j] = (__force u32)val; + } + + /* Calc CRC for the "actual" image buffer, i.e. not including + * the last 4 CRC bytes. + */ + nvm_crc = *(u32 *)(buf + image_att.len - 4); + calc_crc = crc32(0xffffffff, buf, image_att.len - 4); + calc_crc = (__force u32)~cpu_to_be32(calc_crc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc); + + if (calc_crc != nvm_crc) { + rc = -EINVAL; + goto err1; + } + + /* Done with this image; Free to prevent double release + * on subsequent failure. + */ + kfree(buf); + buf = NULL; + } + + qed_ptt_release(p_hwfn, p_ptt); + return 0; + +err1: + kfree(buf); +err0: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index 50eb0b49950f..739ddb730967 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev); * @return int */ int qed_selftest_clock(struct qed_dev *cdev); + +/** + * @brief qed_selftest_nvram - Perform nvram test + * + * @param cdev + * + * @return int + */ +int qed_selftest_nvram(struct qed_dev *cdev); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b2c08e4d2a9b..9c897bc68d05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -110,8 +110,8 @@ union qed_spq_req_comp { }; struct qed_spq_comp_done { - u64 done; - u8 fw_return_code; + unsigned int done; + u8 fw_return_code; }; struct qed_spq_entry { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 2888eb0628f8..a39ef2e7a9a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, - p_hwfn->p_eq->chain.pbl.p_phys_table); + p_hwfn->p_eq->chain.pbl_sp.p_phys_table); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, - p_hwfn->p_consq->chain.pbl.p_phys_table); + p_hwfn->p_consq->chain.pbl_sp.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); @@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: - DP_NOTICE(p_hwfn, "Unkown personality %d\n", + DP_NOTICE(p_hwfn, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 9fbaf9429fd0..f022469bdcf8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -24,7 +24,9 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iscsi.h" #include "qed_mcp.h" +#include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" @@ -35,7 +37,11 @@ ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) -#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** * Blocking Imp. (BLOCK/EBLOCK mode) @@ -48,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, comp_done = (struct qed_spq_comp_done *)cookie; - comp_done->done = 0x1; - comp_done->fw_return_code = fw_return_code; + comp_done->fw_return_code = fw_return_code; - /* make update visible to waiting thread */ - smp_wmb(); + /* Make sure completion done is visible on waiting thread */ + smp_store_release(&comp_done->done, 0x1); } -static int qed_spq_block(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - u8 *p_fw_ret) +static int __qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool sleep_between_iter) { - int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; struct qed_spq_comp_done *comp_done; - int rc; + u32 iter_cnt; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { + iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + : SPQ_BLOCK_DELAY_MAX_ITER; + + while (iter_cnt--) { + /* Validate we receive completion update */ + if (READ_ONCE(comp_done->done) == 1) { + /* Read updated FW return value */ + smp_read_barrier_depends(); if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - usleep_range(5000, 10000); - sleep_count--; + + if (sleep_between_iter) + msleep(SPQ_BLOCK_SLEEP_MS); + else + udelay(SPQ_BLOCK_DELAY_US); + } + + return -EBUSY; +} + +static int qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct qed_spq_comp_done *comp_done; + int rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (!rc) + return 0; } + /* Move to polling with a sleeping period between iterations */ + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); - if (rc != 0) + if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); + goto err; + } /* Retry after drain */ - sleep_count = SPQ_BLOCK_SLEEP_LENGTH; - while (sleep_count) { - /* validate we receive completion update */ - smp_rmb(); - if (comp_done->done == 1) { - if (p_fw_ret) - *p_fw_ret = comp_done->fw_return_code; - return 0; - } - usleep_range(5000, 10000); - sleep_count--; - } + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } - - DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); +err: + DP_NOTICE(p_hwfn, + "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", + le32_to_cpu(p_ent->elem.hdr.cid), + p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + le16_to_cpu(p_ent->elem.hdr.echo)); return -EBUSY; } @@ -245,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, p_eqe->echo, &p_eqe->data); + case PROTOCOLID_ISCSI: + if (!IS_ENABLED(CONFIG_QED_ISCSI)) + return -EINVAL; + if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) { + u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid); + + qed_ooo_release_connection_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid); + return 0; + } + + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + p_eqe->opcode, &p_eqe->data); + } else { + DP_NOTICE(p_hwfn, + "iSCSI async completion is not set\n"); + return -EINVAL; + } default: DP_NOTICE(p_hwfn, "Unknown Async completion for protocol: %d\n", @@ -725,7 +781,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here. */ - rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { /* This is an allocated p_ent which does not need to diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index d2d6621fe0e5..85b09dd1787a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, } static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, - int rel_vf_id, bool b_enabled_only) + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) { if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); @@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, b_enabled_only) return false; + if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && + b_non_malicious) + return false; + return true; } @@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return NULL; } - if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) + if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, + b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", @@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } -static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, + int vfid, bool b_fail_malicious) { /* Check PF supports sriov */ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || @@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) return false; /* Check VF validity */ - if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) return false; return true; } +bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +{ + return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); +} + static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, u16 rel_vf_id, u8 to_disable) { @@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + /* It's possible VF was previously considered malicious */ + vf->b_malicious = false; + rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); if (rc) return rc; @@ -793,37 +808,70 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 rel_vf_id, u16 num_rx_queues) + struct qed_iov_vf_init_params *p_params) { u8 num_of_vf_avaiable_chains = 0; struct qed_vf_info *vf = NULL; + u16 qid, num_irqs; int rc = 0; u32 cids; u8 i; - vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); return -EINVAL; } if (vf->b_init) { - DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); + DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", + p_params->rel_vf_id); return -EINVAL; } + /* Perform sanity checking on the requested queue_id */ + for (i = 0; i < p_params->num_queues; i++) { + u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); + u16 max_vf_qzone = min_vf_qzone + + FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; + + qid = p_params->req_rx_queue[i]; + if (qid < min_vf_qzone || qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", + qid, + p_params->rel_vf_id, + min_vf_qzone, max_vf_qzone); + return -EINVAL; + } + + qid = p_params->req_tx_queue[i]; + if (qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", + qid, p_params->rel_vf_id, max_vf_qzone); + return -EINVAL; + } + + /* If client *really* wants, Tx qid can be shared with PF */ + if (qid < min_vf_qzone) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", + p_params->rel_vf_id, qid, i); + } + /* Limit number of queues according to number of CIDs */ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", - vf->relative_vf_id, num_rx_queues, (u16) cids); - num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); + vf->relative_vf_id, p_params->num_queues, (u16)cids); + num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, p_ptt, - vf, - num_rx_queues); + vf, num_irqs); if (!num_of_vf_avaiable_chains) { DP_ERR(p_hwfn, "no available igu sbs\n"); return -ENOMEM; @@ -834,25 +882,22 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf->num_txqs = num_of_vf_avaiable_chains; for (i = 0; i < vf->num_rxqs; i++) { - u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, - vf->igu_sbs[i]); + struct qed_vf_q_info *p_queue = &vf->vf_queues[i]; - if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { - DP_NOTICE(p_hwfn, - "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", - vf->relative_vf_id, queue_id); - return -EINVAL; - } + p_queue->fw_rx_qid = p_params->req_rx_queue[i]; + p_queue->fw_tx_qid = p_params->req_tx_queue[i]; /* CIDs are per-VF, so no problem having them 0-based. */ - vf->vf_queues[i].fw_rx_qid = queue_id; - vf->vf_queues[i].fw_tx_qid = queue_id; - vf->vf_queues[i].fw_cid = i; + p_queue->fw_cid = i; DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", - vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); + "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n", + vf->relative_vf_id, + i, vf->igu_sbs[i], + p_queue->fw_rx_qid, + p_queue->fw_tx_qid, p_queue->fw_cid); } + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); if (!rc) { vf->b_init = true; @@ -1172,8 +1217,19 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->num_active_rxqs = 0; - for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) - p_vf->vf_queues[i].rxq_active = 0; + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i]; + + if (p_queue->p_rx_cid) { + qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); + p_queue->p_rx_cid = NULL; + } + + if (p_queue->p_tx_cid) { + qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); + p_queue->p_tx_cid = NULL; + } + } memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); @@ -1579,21 +1635,21 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, /* Update all the Rx queues */ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { - u16 qid; + struct qed_queue_cid *p_cid; - if (!p_vf->vf_queues[i].rxq_active) + p_cid = p_vf->vf_queues[i].p_rx_cid; + if (!p_cid) continue; - qid = p_vf->vf_queues[i].fw_rx_qid; - - rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, + rc = qed_sp_eth_rx_queues_update(p_hwfn, + (void **)&p_cid, 1, 0, 1, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to send Rx update fo queue[0x%04x]\n", - qid); + p_cid->rel.queue_id); return rc; } } @@ -1767,23 +1823,34 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; + struct qed_vf_q_info *p_queue; struct vfpf_start_rxq_tlv *req; bool b_legacy_vf = false; int rc; - memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_rxq; if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; - params.vf_qid = req->rx_qid; + /* Acquire a new queue-cid */ + p_queue = &vf->vf_queues[req->rx_qid]; + + memset(¶ms, 0, sizeof(params)); + params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; params.sb = req->hw_sb; params.sb_idx = req->sb_index; + p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, + vf->opaque_fid, + p_queue->fw_cid, + req->rx_qid, ¶ms); + if (!p_queue->p_rx_cid) + goto out; + /* Legacy VFs have their Producers in a different location, which they * calculate on their own and clean the producer prior to this. */ @@ -1796,21 +1863,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 0); } + p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf; - rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, - vf->vf_queues[req->rx_qid].fw_cid, - ¶ms, - vf->abs_vf_id + 0x10, - req->bd_max_bytes, - req->rxq_addr, - req->cqe_pbl_addr, req->cqe_pbl_size, - b_legacy_vf); - + rc = qed_eth_rxq_start_ramrod(p_hwfn, + p_queue->p_rx_cid, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, req->cqe_pbl_size); if (rc) { status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); + p_queue->p_rx_cid = NULL; } else { status = PFVF_STATUS_SUCCESS; - vf->vf_queues[req->rx_qid].rxq_active = true; vf->num_active_rxqs++; } @@ -1867,7 +1932,9 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; union qed_qm_pq_params pq_params; struct vfpf_start_txq_tlv *req; + struct qed_vf_q_info *p_queue; int rc; + u16 pq; /* Prepare the parameters which would choose the right PQ */ memset(&pq_params, 0, sizeof(pq_params)); @@ -1881,24 +1948,31 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; - params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; + /* Acquire a new queue-cid */ + p_queue = &vf->vf_queues[req->tx_qid]; + + params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; params.sb = req->hw_sb; params.sb_idx = req->sb_index; - rc = qed_sp_eth_txq_start_ramrod(p_hwfn, - vf->opaque_fid, - vf->vf_queues[req->tx_qid].fw_cid, - ¶ms, - vf->abs_vf_id + 0x10, - req->pbl_addr, - req->pbl_size, &pq_params); + p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, + vf->opaque_fid, + p_queue->fw_cid, + req->tx_qid, ¶ms); + if (!p_queue->p_tx_cid) + goto out; + pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params); + rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, + req->pbl_addr, req->pbl_size, pq); if (rc) { status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); + p_queue->p_tx_cid = NULL; } else { status = PFVF_STATUS_SUCCESS; - vf->vf_queues[req->tx_qid].txq_active = true; } out: @@ -1909,6 +1983,7 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 rxq_id, u8 num_rxqs, bool cqe_completion) { + struct qed_vf_q_info *p_queue; int rc = 0; int qid; @@ -1916,16 +1991,18 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, return -EINVAL; for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { - if (vf->vf_queues[qid].rxq_active) { - rc = qed_sp_eth_rx_queue_stop(p_hwfn, - vf->vf_queues[qid]. - fw_rx_qid, false, - cqe_completion); + p_queue = &vf->vf_queues[qid]; - if (rc) - return rc; - } - vf->vf_queues[qid].rxq_active = false; + if (!p_queue->p_rx_cid) + continue; + + rc = qed_eth_rx_queue_stop(p_hwfn, + p_queue->p_rx_cid, + false, cqe_completion); + if (rc) + return rc; + + vf->vf_queues[qid].p_rx_cid = NULL; vf->num_active_rxqs--; } @@ -1936,22 +2013,24 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) { int rc = 0; + struct qed_vf_q_info *p_queue; int qid; if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) return -EINVAL; for (qid = txq_id; qid < txq_id + num_txqs; qid++) { - if (vf->vf_queues[qid].txq_active) { - rc = qed_sp_eth_tx_queue_stop(p_hwfn, - vf->vf_queues[qid]. - fw_tx_qid); + p_queue = &vf->vf_queues[qid]; + if (!p_queue->p_tx_cid) + continue; - if (rc) - return rc; - } - vf->vf_queues[qid].txq_active = false; + rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); + if (rc) + return rc; + + p_queue->p_tx_cid = NULL; } + return rc; } @@ -2006,10 +2085,11 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_update_rxq_tlv *req; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_FAILURE; u8 complete_event_flg; u8 complete_cqe_flg; u16 qid; @@ -2020,29 +2100,36 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + /* Validate inputs */ + if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF || + !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) { + DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, req->num_rxqs); + goto out; + } + for (i = 0; i < req->num_rxqs; i++) { qid = req->rx_qid + i; - - if (!vf->vf_queues[qid].rxq_active) { - DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", - qid); - status = PFVF_STATUS_FAILURE; - break; + if (!vf->vf_queues[qid].p_rx_cid) { + DP_INFO(p_hwfn, + "VF[%d] rx_qid = %d isn`t active!\n", + vf->relative_vf_id, qid); + goto out; } - rc = qed_sp_eth_rx_queues_update(p_hwfn, - vf->vf_queues[qid].fw_rx_qid, - 1, - complete_cqe_flg, - complete_event_flg, - QED_SPQ_MODE_EBLOCK, NULL); - - if (rc) { - status = PFVF_STATUS_FAILURE; - break; - } + handlers[i] = vf->vf_queues[qid].p_rx_cid; } + rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, + req->num_rxqs, + complete_cqe_flg, + complete_event_flg, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) + goto out; + + status = PFVF_STATUS_SUCCESS; +out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, length, status); } @@ -2253,7 +2340,7 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "rss_ind_table[%d] = %d, rxq is out of range\n", i, q_idx); - else if (!vf->vf_queues[q_idx].rxq_active) + else if (!vf->vf_queues[q_idx].p_rx_cid) DP_NOTICE(p_hwfn, "rss_ind_table[%d] = %d, rxq is not active\n", i, q_idx); @@ -2804,6 +2891,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, return rc; } + /* Workaround to make VF-PF channel ready, as FW + * doesn't do that as a part of FLR. + */ + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. */ @@ -2942,7 +3036,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx->first_tlv = mbx->req_virt->first_tlv; /* check if tlv type is known */ - if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && + !p_vf->b_malicious) { switch (mbx->first_tlv.tl.type) { case CHANNEL_TLV_ACQUIRE: qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); @@ -2984,6 +3079,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); break; } + } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_MALICIOUS); } else { /* unknown TLV - this may belong to a VF driver from the future * - a version written after this PF driver was written, which @@ -3033,20 +3137,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); } -static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, - u16 abs_vfid, struct regpair *vf_msg) +static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, + u16 abs_vfid) { - u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; - struct qed_vf_info *p_vf; + u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; - if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { + if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", + "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", abs_vfid); - return 0; + return NULL; } - p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; + + return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; +} + +static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, + u16 abs_vfid, struct regpair *vf_msg) +{ + struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, + abs_vfid); + + if (!p_vf) + return 0; /* List the physical address of the request so that handler * could later on copy the message from it. @@ -3060,6 +3174,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, return 0; } +static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct malicious_vf_eqe_data *p_data) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); + + if (!p_vf) + return; + + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + + p_vf->b_malicious = true; +} + int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data) { @@ -3067,6 +3198,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); + case COMMON_EVENT_MALICIOUS_VF: + qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); + return 0; default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); @@ -3083,7 +3217,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) goto out; for (i = rel_vf_id; i < p_iov->total_vfs; i++) - if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) + if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) return i; out: @@ -3130,6 +3264,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, return; } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced MAC to malicious VF [%d]\n", vfid); + return; + } + feature = 1 << MAC_ADDR_FORCED; memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); @@ -3153,6 +3293,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, return; } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced vlan to malicious VF [%d]\n", vfid); + return; + } + feature = 1 << VLAN_ADDR_FORCED; vf_info->bulletin.p_virt->pvid = pvid; if (pvid) @@ -3367,7 +3513,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) qed_for_each_vf(hwfn, j) { int k; - if (!qed_iov_is_valid_vfid(hwfn, j, true)) + if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) continue; /* Wait until VF is disabled before releasing */ @@ -3394,9 +3540,28 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) return 0; } +static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, + u16 vfid, + struct qed_iov_vf_init_params *params) +{ + u16 base, i; + + /* Since we have an equal resource distribution per-VF, and we assume + * PF has acquired the QED_PF_L2_QUE first queues, we start setting + * sequentially from there. + */ + base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; + + params->rel_vf_id = vfid; + for (i = 0; i < params->num_queues; i++) { + params->req_rx_queue[i] = base + i; + params->req_tx_queue[i] = base + i; + } +} + static int qed_sriov_enable(struct qed_dev *cdev, int num) { - struct qed_sb_cnt_info sb_cnt_info; + struct qed_iov_vf_init_params params; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -3405,11 +3570,17 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) return -EINVAL; } + memset(¶ms, 0, sizeof(params)); + /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { struct qed_hwfn *hwfn = &cdev->hwfns[j]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); - int num_sbs = 0, limit = 16; + + /* Make sure not to use more than 16 queues per VF */ + params.num_queues = min_t(int, + FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, + 16); if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); @@ -3417,19 +3588,12 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } - if (IS_MF_DEFAULT(hwfn)) - limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; - - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(hwfn, &sb_cnt_info); - num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); - for (i = 0; i < num; i++) { - if (!qed_iov_is_valid_vfid(hwfn, i, false)) + if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) continue; - rc = qed_iov_init_hw_for_vf(hwfn, - ptt, i, num_sbs / num); + qed_sriov_enable_qid_config(hwfn, i, ¶ms); + rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); if (rc) { DP_ERR(cdev, "Failed to enable VF[%d]\n", i); qed_ptt_release(hwfn, ptt); @@ -3477,7 +3641,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) return -EINVAL; } - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; @@ -3509,7 +3673,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) return -EINVAL; } - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; @@ -3543,7 +3707,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, if (IS_VF(cdev)) return -EINVAL; - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; @@ -3647,7 +3811,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev, if (IS_VF(cdev)) return -EINVAL; - if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 0dd23e409b3f..509c02b4772e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -58,6 +58,23 @@ struct qed_public_vf_info { int tx_rate; }; +struct qed_iov_vf_init_params { + u16 rel_vf_id; + + /* Number of requested Queues; Currently, don't support different + * number of Rx/Tx queues. + */ + + u16 num_queues; + + /* Allow the client to choose which qzones to use for Rx/Tx, + * and which queue_base to use for Tx queues on a per-queue basis. + * Notice values should be relative to the PF resources. + */ + u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF]; + u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF]; +}; + /* This struct is part of qed_dev and contains data relevant to all hwfns; * Initialized only if SR-IOV cpabability is exposed in PCIe config space. */ @@ -99,10 +116,10 @@ struct qed_iov_vf_mbx { struct qed_vf_q_info { u16 fw_rx_qid; + struct qed_queue_cid *p_rx_cid; u16 fw_tx_qid; + struct qed_queue_cid *p_tx_cid; u8 fw_cid; - u8 rxq_active; - u8 txq_active; }; enum vf_state { @@ -132,6 +149,7 @@ struct qed_vf_info { struct qed_iov_vf_mbx vf_mbx; enum vf_state state; bool b_init; + bool b_malicious; u8 to_disable; struct qed_bulletin bulletin; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index abf5bf11f865..60b31a8ede73 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -388,18 +388,18 @@ free_p_iov: #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) -int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_qid, - u16 sb, - u8 sb_index, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, void __iomem **pp_prod) +int +qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_rxq_tlv *req; + u8 rx_qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ @@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; /* If PF is legacy, we'll need to calculate producers ourselves * as well as clean them. */ - if (pp_prod && p_iov->b_pre_fp_hsi) { + if (p_iov->b_pre_fp_hsi) { u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; u32 init_prod_val = 0; - *pp_prod = (u8 __iomem *)p_hwfn->regview + - MSTORM_QZONE_START(p_hwfn->cdev) + - hw_qid * MSTORM_QZONE_SIZE; + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, } /* Learn the address of the producer from the response */ - if (pp_prod && !p_iov->b_pre_fp_hsi) { + if (!p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; @@ -462,7 +463,8 @@ exit: return rc; } -int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, bool cqe_completion) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_rxqs_tlv *req; @@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); - req->rx_qid = rx_qid; + req->rx_qid = p_cid->rel.queue_id; req->num_rxqs = 1; req->cqe_completion = cqe_completion; @@ -496,28 +498,28 @@ exit: return rc; } -int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, void __iomem **pp_doorbell) +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; + u16 qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); - req->tx_qid = tx_queue_id; + req->tx_qid = qid; /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, goto exit; } - if (pp_doorbell) { - /* Modern PFs provide the actual offsets, while legacy - * provided only the queue id. - */ - if (!p_iov->b_pre_fp_hsi) { - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - resp->offset; - } else { - u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; - u32 db_addr; - - db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - db_addr; - } + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[qid]; - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - tx_queue_id, *pp_doorbell, resp->offset); + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr_vf(cid, + DQ_DEMS_LEGACY); } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + qid, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } -int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_txqs_tlv *req; @@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); - req->tx_qid = tx_qid; + req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; /* add list termination tlv */ @@ -1171,6 +1169,13 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; } +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) +{ + struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; + + *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; +} + bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) { struct qed_bulletin_content *bulletin; @@ -1230,8 +1235,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, &is_mac_forced); - if (is_mac_exist && is_mac_forced && cookie) - ops->force_mac(cookie, mac); + if (is_mac_exist && cookie) + ops->force_mac(cookie, mac, !!is_mac_forced); /* Always update link configuration according to bulletin */ qed_link_update(hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 35db7a28aa13..11eb3854e6f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -40,6 +40,7 @@ enum { PFVF_STATUS_NOT_SUPPORTED, PFVF_STATUS_NO_RESOURCE, PFVF_STATUS_FORCED, + PFVF_STATUS_MALICIOUS, }; /* vf pf channel tlvs */ @@ -622,6 +623,14 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** + * @brief Get number of MAC filters allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated MAC filters + */ +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); + +/** * @brief Check if VF can set a MAC address * * @param p_hwfn @@ -657,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** * @brief VF - start the RX Queue by sending a message to the PF * @param p_hwfn - * @param cid - zero based within the VF - * @param rx_queue_id - zero based within the VF - * @param sb - VF status block for this queue - * @param sb_index - Index within the status block + * @param p_cid - Only relative fields are relevant * @param bd_max_bytes - maximum number of bytes per bd * @param bd_chain_phys_addr - physical address of bd chain * @param cqe_pbl_addr - physical address of pbl @@ -671,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); * @return int */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, @@ -693,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, * * @return int */ -int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, void __iomem **pp_doorbell); +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell); /** * @brief VF - stop the RX queue by sending a message to the PF * * @param p_hwfn - * @param rx_qid + * @param p_cid * @param cqe_completion * * @return int */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, - u16 rx_qid, bool cqe_completion); + struct qed_queue_cid *p_cid, bool cqe_completion); /** * @brief VF - stop the TX queue by sending a message to the PF @@ -720,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, * * @return int */ -int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid); +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** * @brief VF - send a vport update command @@ -871,6 +874,11 @@ static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, { } +static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, + u8 *num_mac_filters) +{ +} + static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) { return false; @@ -888,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) } static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_adr, dma_addr_t cqe_pbl_addr, @@ -900,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, + struct qed_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { @@ -910,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, - u16 rx_qid, bool cqe_completion) + struct qed_queue_cid *p_cid, + bool cqe_completion) { return -EINVAL; } -static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) { return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 974689a13337..c79dc78746fc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -16,6 +16,7 @@ #include <linux/bitmap.h> #include <linux/kernel.h> #include <linux/mutex.h> +#include <linux/bpf.h> #include <linux/io.h> #include <linux/qed/common_hsi.h> #include <linux/qed/eth_common.h> @@ -127,10 +128,9 @@ struct qede_dev { const struct qed_eth_ops *ops; - struct qed_dev_eth_info dev_info; + struct qed_dev_eth_info dev_info; #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) -#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \ - (edev)->dev_info.num_tc) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) struct qede_fastpath *fp_array; u8 req_num_tx; @@ -139,17 +139,9 @@ struct qede_dev { u8 fp_num_rx; u16 req_queues; u16 num_queues; - u8 num_tc; #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) -#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \ - (edev)->num_tc) -#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \ - QEDE_TSS_COUNT(edev)) -#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev)) -#define QEDE_TX_QUEUE(edev, txqidx) \ - (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\ - (edev), (txqidx))]) +#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; unsigned char primary_mac[ETH_ALEN]; @@ -193,7 +185,11 @@ struct qede_dev { u16 vxlan_dst_port; u16 geneve_dst_port; + bool wol_enabled; + struct qede_rdma_dev rdma_info; + + struct bpf_prog *xdp_prog; }; enum QEDE_STATE { @@ -223,39 +219,67 @@ enum qede_agg_state { }; struct qede_agg_info { - struct sw_rx_data replace_buf; - dma_addr_t replace_buf_mapping; - struct sw_rx_data start_buf; - dma_addr_t start_buf_mapping; - struct eth_fast_path_rx_tpa_start_cqe start_cqe; - enum qede_agg_state agg_state; + /* rx_buf is a data buffer that can be placed / consumed from rx bd + * chain. It has two purposes: We will preallocate the data buffer + * for each aggregation when we open the interface and will place this + * buffer on the rx-bd-ring when we receive TPA_START. We don't want + * to be in a state where allocation fails, as we can't reuse the + * consumer buffer in the rx-chain since FW may still be writing to it + * (since header needs to be modified for TPA). + * The second purpose is to keep a pointer to the bd buffer during + * aggregation. + */ + struct sw_rx_data buffer; + dma_addr_t buffer_mapping; + struct sk_buff *skb; - int frag_id; + + /* We need some structs from the start cookie until termination */ u16 vlan_tag; + u16 start_cqe_bd_len; + u8 start_cqe_placement_offset; + + u8 state; + u8 frag_id; + + u8 tunnel_type; }; struct qede_rx_queue { - __le16 *hw_cons_ptr; - struct sw_rx_data *sw_rx_ring; - u16 sw_rx_cons; - u16 sw_rx_prod; - struct qed_chain rx_bd_ring; - struct qed_chain rx_comp_ring; - void __iomem *hw_rxq_prod_addr; + __le16 *hw_cons_ptr; + void __iomem *hw_rxq_prod_addr; + + /* Required for the allocation of replacement buffers */ + struct device *dev; + + struct bpf_prog *xdp_prog; + + u16 sw_rx_cons; + u16 sw_rx_prod; + + u16 num_rx_buffers; /* Slowpath */ + u8 data_direction; + u8 rxq_id; + + u32 rx_buf_size; + u32 rx_buf_seg_size; + + u64 rcv_pkts; + + struct sw_rx_data *sw_rx_ring; + struct qed_chain rx_bd_ring; + struct qed_chain rx_comp_ring ____cacheline_aligned; /* GRO */ - struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; + struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; - int rx_buf_size; - unsigned int rx_buf_seg_size; + u64 rx_hw_errors; + u64 rx_alloc_errors; + u64 rx_ip_frags; - u16 num_rx_buffers; - u16 rxq_id; + u64 xdp_no_pass; - u64 rcv_pkts; - u64 rx_hw_errors; - u64 rx_alloc_errors; - u64 rx_ip_frags; + void *handle; }; union db_prod { @@ -271,20 +295,39 @@ struct sw_tx_bd { }; struct qede_tx_queue { - int index; /* Queue index */ - __le16 *hw_cons_ptr; - struct sw_tx_bd *sw_tx_ring; - u16 sw_tx_cons; - u16 sw_tx_prod; - struct qed_chain tx_pbl; - void __iomem *doorbell_addr; - union db_prod tx_db; - - u16 num_tx_buffers; - u64 xmit_pkts; - u64 stopped_cnt; - - bool is_legacy; + u8 is_xdp; + bool is_legacy; + u16 sw_tx_cons; + u16 sw_tx_prod; + u16 num_tx_buffers; /* Slowpath only */ + + u64 xmit_pkts; + u64 stopped_cnt; + + __le16 *hw_cons_ptr; + + /* Needed for the mapping of packets */ + struct device *dev; + + void __iomem *doorbell_addr; + union db_prod tx_db; + int index; /* Slowpath only */ +#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ + QEDE_MAX_TSS_CNT(edev)) +#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) + + /* Regular Tx requires skb + metadata for release purpose, + * while XDP requires only the pages themselves. + */ + union { + struct sw_tx_bd *skbs; + struct page **pages; + } sw_tx_ring; + + struct qed_chain tx_pbl; + + /* Slowpath; Should be kept in end [unless missing padding] */ + void *handle; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -301,13 +344,16 @@ struct qede_fastpath { struct qede_dev *edev; #define QEDE_FASTPATH_TX BIT(0) #define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_XDP BIT(2) #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) u8 type; u8 id; + u8 xdp_xmit; struct napi_struct napi; struct qed_sb_info *sb_info; struct qede_rx_queue *rxq; - struct qede_tx_queue *txqs; + struct qede_tx_queue *txq; + struct qede_tx_queue *xdp_tx; #define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[VEC_NAME_SIZE]; @@ -320,6 +366,7 @@ struct qede_fastpath { #define XMIT_L4_CSUM BIT(0) #define XMIT_LSO BIT(1) #define XMIT_ENC BIT(2) +#define XMIT_ENC_GSO_L4_CSUM BIT(3) #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) @@ -329,8 +376,13 @@ struct qede_fastpath { #define QEDE_SP_VXLAN_PORT_CONFIG 2 #define QEDE_SP_GENEVE_PORT_CONFIG 3 -union qede_reload_args { - u16 mtu; +struct qede_reload_args { + void (*func)(struct qede_dev *edev, struct qede_reload_args *args); + union { + netdev_features_t features; + struct bpf_prog *new_prog; + u16 mtu; + } u; }; #ifdef CONFIG_DCB @@ -339,15 +391,14 @@ void qede_set_dcbnl_ops(struct net_device *ndev); void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, - void (*func)(struct qede_dev *edev, - union qede_reload_args *args), - union qede_reload_args *args); + struct qede_reload_args *args, bool is_locked); int qede_change_mtu(struct net_device *dev, int new_mtu); void qede_fill_by_demand_stats(struct qede_dev *edev); +void __qede_lock(struct qede_dev *edev); +void __qede_unlock(struct qede_dev *edev); bool qede_has_rx_work(struct qede_rx_queue *rxq); int qede_txq_has_work(struct qede_tx_queue *txq); -void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, - u8 count); +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define RX_RING_SIZE_POW 13 @@ -362,8 +413,9 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX -#define QEDE_MIN_PKT_LEN 64 -#define QEDE_RX_HDR_SIZE 256 +#define QEDE_MIN_PKT_LEN 64 +#define QEDE_RX_HDR_SIZE 256 +#define QEDE_MAX_JUMBO_PACKET_SIZE 9600 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 7567cc464b88..1c48f445c93b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -16,13 +16,6 @@ #include <linux/capability.h> #include "qede.h" -#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) -#define QEDE_STAT_STRING(stat_name) (#stat_name) -#define _QEDE_STAT(stat_name, pf_only) \ - {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} -#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) -#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) - #define QEDE_RQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_rx_queue, stat_name)) #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) @@ -39,12 +32,10 @@ static const struct { QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_ip_frags), + QEDE_RQSTAT(xdp_no_pass), }; #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) -#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ - (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ - qede_rqstats_arr[(sindex)].offset))) #define QEDE_TQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_tx_queue, stat_name)) #define QEDE_TQSTAT_STRING(stat_name) (#stat_name) @@ -59,10 +50,12 @@ static const struct { QEDE_TQSTAT(stopped_cnt), }; -#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \ - (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\ - qede_tqstats_arr[(sindex)].offset))) - +#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) +#define QEDE_STAT_STRING(stat_name) (#stat_name) +#define _QEDE_STAT(stat_name, pf_only) \ + {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} +#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) +#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -136,10 +129,6 @@ static const struct { QEDE_STAT(coalesced_bytes), }; -#define QEDE_STATS_DATA(dev, index) \ - (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ - + qede_stats_arr[(index)].offset))) - #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) enum { @@ -157,6 +146,7 @@ enum qede_ethtool_tests { QEDE_ETHTOOL_MEMORY_TEST, QEDE_ETHTOOL_REGISTER_TEST, QEDE_ETHTOOL_CLOCK_TEST, + QEDE_ETHTOOL_NVRAM_TEST, QEDE_ETHTOOL_TEST_MAX }; @@ -166,41 +156,63 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { "Memory (online)\t\t", "Register (online)\t", "Clock (online)\t\t", + "Nvram (online)\t\t", }; +static void qede_get_strings_stats_txq(struct qede_dev *edev, + struct qede_tx_queue *txq, u8 **buf) +{ + int i; + + for (i = 0; i < QEDE_NUM_TQSTATS; i++) { + if (txq->is_xdp) + sprintf(*buf, "%d [XDP]: %s", + QEDE_TXQ_XDP_TO_IDX(edev, txq), + qede_tqstats_arr[i].string); + else + sprintf(*buf, "%d: %s", txq->index, + qede_tqstats_arr[i].string); + *buf += ETH_GSTRING_LEN; + } +} + +static void qede_get_strings_stats_rxq(struct qede_dev *edev, + struct qede_rx_queue *rxq, u8 **buf) +{ + int i; + + for (i = 0; i < QEDE_NUM_RQSTATS; i++) { + sprintf(*buf, "%d: %s", rxq->rxq_id, + qede_rqstats_arr[i].string); + *buf += ETH_GSTRING_LEN; + } +} + static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { - int i, j, k; + struct qede_fastpath *fp; + int i; - for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { - int tc; + /* Account for queue statistics */ + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { + fp = &edev->fp_array[i]; - if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { - for (j = 0; j < QEDE_NUM_RQSTATS; j++) - sprintf(buf + (k + j) * ETH_GSTRING_LEN, - "%d: %s", i, - qede_rqstats_arr[j].string); - k += QEDE_NUM_RQSTATS; - } + if (fp->type & QEDE_FASTPATH_RX) + qede_get_strings_stats_rxq(edev, fp->rxq, &buf); - if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { - for (tc = 0; tc < edev->num_tc; tc++) { - for (j = 0; j < QEDE_NUM_TQSTATS; j++) - sprintf(buf + (k + j) * - ETH_GSTRING_LEN, - "%d.%d: %s", i, tc, - qede_tqstats_arr[j].string); - k += QEDE_NUM_TQSTATS; - } - } + if (fp->type & QEDE_FASTPATH_XDP) + qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); + + if (fp->type & QEDE_FASTPATH_TX) + qede_get_strings_stats_txq(edev, fp->txq, &buf); } - for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { + /* Account for non-queue statistics */ + for (i = 0; i < QEDE_NUM_STATS; i++) { if (IS_VF(edev) && qede_stats_arr[i].pf_only) continue; - strcpy(buf + (k + j) * ETH_GSTRING_LEN, - qede_stats_arr[i].string); - j++; + strcpy(buf, qede_stats_arr[i].string); + buf += ETH_GSTRING_LEN; } } @@ -226,42 +238,61 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } +static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf) +{ + int i; + + for (i = 0; i < QEDE_NUM_TQSTATS; i++) { + **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset)); + (*buf)++; + } +} + +static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf) +{ + int i; + + for (i = 0; i < QEDE_NUM_RQSTATS; i++) { + **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset)); + (*buf)++; + } +} + static void qede_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { struct qede_dev *edev = netdev_priv(dev); - int sidx, cnt = 0; - int qid; + struct qede_fastpath *fp; + int i; qede_fill_by_demand_stats(edev); - mutex_lock(&edev->qede_lock); + /* Need to protect the access to the fastpath array */ + __qede_lock(edev); - for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) { - int tc; + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { + fp = &edev->fp_array[i]; - if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) { - for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) - buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); - } + if (fp->type & QEDE_FASTPATH_RX) + qede_get_ethtool_stats_rxq(fp->rxq, &buf); - if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) { - for (tc = 0; tc < edev->num_tc; tc++) { - for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) - buf[cnt++] = QEDE_TQSTATS_DATA(edev, - sidx, - qid, tc); - } - } + if (fp->type & QEDE_FASTPATH_XDP) + qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); + + if (fp->type & QEDE_FASTPATH_TX) + qede_get_ethtool_stats_txq(fp->txq, &buf); } - for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { - if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) + for (i = 0; i < QEDE_NUM_STATS; i++) { + if (IS_VF(edev) && qede_stats_arr[i].pf_only) continue; - buf[cnt++] = QEDE_STATS_DATA(edev, sidx); + *buf = *((u64 *)(((void *)&edev->stats) + + qede_stats_arr[i].offset)); + + buf++; } - mutex_unlock(&edev->qede_lock); + __qede_unlock(edev); } static int qede_get_sset_count(struct net_device *dev, int stringset) @@ -278,8 +309,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) if (qede_stats_arr[i].pf_only) num_stats--; } - return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS + - QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc; + + /* Account for the Regular Tx statistics */ + num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; + + /* Account for the Regular Rx statistics */ + num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; + + /* Account for XDP statistics [if needed] */ + if (edev->xdp_prog) + num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS; + return num_stats; + case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: @@ -325,7 +366,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = { { \ int i; \ \ - for (i = 0; i < QED_LM_COUNT; i++) { \ + for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ if ((caps) & (qed_lm_map[i].qed_link_mode)) \ __set_bit(qed_lm_map[i].ethtool_link_mode,\ lk_ksettings->link_modes.name); \ @@ -336,7 +377,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = { { \ int i; \ \ - for (i = 0; i < QED_LM_COUNT; i++) { \ + for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ if (test_bit(qed_lm_map[i].ethtool_link_mode, \ lk_ksettings->link_modes.name)) \ caps |= qed_lm_map[i].qed_link_mode; \ @@ -350,6 +391,8 @@ static int qede_get_link_ksettings(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; + __qede_lock(edev); + memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); @@ -369,6 +412,9 @@ static int qede_get_link_ksettings(struct net_device *dev, base->speed = SPEED_UNKNOWN; base->duplex = DUPLEX_UNKNOWN; } + + __qede_unlock(edev); + base->port = current_link.port; base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; @@ -488,6 +534,45 @@ static void qede_get_drvinfo(struct net_device *ndev, strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); } +static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct qede_dev *edev = netdev_priv(ndev); + + if (edev->dev_info.common.wol_support) { + wol->supported = WAKE_MAGIC; + wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct qede_dev *edev = netdev_priv(ndev); + bool wol_requested; + int rc; + + if (wol->wolopts & ~WAKE_MAGIC) { + DP_INFO(edev, + "Can't support WoL options other than magic-packet\n"); + return -EINVAL; + } + + wol_requested = !!(wol->wolopts & WAKE_MAGIC); + if (wol_requested == edev->wol_enabled) + return 0; + + /* Need to actually change configuration */ + if (!edev->dev_info.common.wol_support) { + DP_INFO(edev, "Device doesn't support WoL\n"); + return -EINVAL; + } + + rc = edev->ops->common->update_wol(edev->cdev, wol_requested); + if (!rc) + edev->wol_enabled = wol_requested; + + return rc; +} + static u32 qede_get_msglevel(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); @@ -638,8 +723,7 @@ static int qede_set_ringparam(struct net_device *dev, edev->q_num_rx_buffers = ering->rx_pending; edev->q_num_tx_buffers = ering->tx_pending; - if (netif_running(edev->ndev)) - qede_reload(edev, NULL, NULL); + qede_reload(edev, NULL, false); return 0; } @@ -724,35 +808,27 @@ static int qede_get_regs_len(struct net_device *ndev) return -EINVAL; } -static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) +static void qede_update_mtu(struct qede_dev *edev, + struct qede_reload_args *args) { - edev->ndev->mtu = args->mtu; + edev->ndev->mtu = args->u.mtu; } /* Netdevice NDOs */ -#define ETH_MAX_JUMBO_PACKET_SIZE 9600 -#define ETH_MIN_PACKET_SIZE 60 int qede_change_mtu(struct net_device *ndev, int new_mtu) { struct qede_dev *edev = netdev_priv(ndev); - union qede_reload_args args; - - if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { - DP_ERR(edev, "Can't support requested MTU size\n"); - return -EINVAL; - } + struct qede_reload_args args; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Configuring MTU size of %d\n", new_mtu); - /* Set the mtu field and re-start the interface if needed*/ - args.mtu = new_mtu; - - if (netif_running(edev->ndev)) - qede_reload(edev, &qede_update_mtu, &args); + /* Set the mtu field and re-start the interface if needed */ + args.u.mtu = new_mtu; + args.func = &qede_update_mtu; + qede_reload(edev, &args, false); - qede_update_mtu(edev, &args); + edev->ops->common->update_mtu(edev->cdev, new_mtu); return 0; } @@ -836,8 +912,7 @@ static int qede_set_channels(struct net_device *dev, sizeof(edev->rss_params.rss_ind_table)); } - if (netif_running(dev)) - qede_reload(edev, NULL, NULL); + qede_reload(edev, NULL, false); return 0; } @@ -1143,7 +1218,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, for_each_queue(i) { if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { - txq = edev->fp_array[i].txqs; + txq = edev->fp_array[i].txq; break; } } @@ -1155,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - txq->sw_tx_ring[idx].skb = skb; + txq->sw_tx_ring.skbs[idx].skb = skb; first_bd = qed_chain_produce(&txq->tx_pbl); memset(first_bd, 0, sizeof(*first_bd)); val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; @@ -1209,7 +1284,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); txq->sw_tx_cons++; - txq->sw_tx_ring[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].skb = NULL; return 0; } @@ -1277,13 +1352,13 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) break; } - qede_recycle_rx_bd_ring(rxq, edev, 1); + qede_recycle_rx_bd_ring(rxq, 1); qed_chain_recycle_consumed(&rxq->rx_comp_ring); break; } DP_INFO(edev, "Not the transmitted packet\n"); - qede_recycle_rx_bd_ring(rxq, edev, 1); + qede_recycle_rx_bd_ring(rxq, 1); qed_chain_recycle_consumed(&rxq->rx_comp_ring); } @@ -1405,6 +1480,11 @@ static void qede_self_test(struct net_device *dev, buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } + + if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) { + buf[QEDE_ETHTOOL_NVRAM_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } } static int qede_set_tunable(struct net_device *dev, @@ -1455,6 +1535,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_drvinfo = qede_get_drvinfo, .get_regs_len = qede_get_regs_len, .get_regs = qede_get_regs, + .get_wol = qede_get_wol, + .set_wol = qede_set_wol, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 85f46dbecd5b..aecdd1c5c0ea 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -94,11 +94,26 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); #define TX_TIMEOUT (5 * HZ) +/* Utilize last protocol index for XDP */ +#define XDP_PI 11 + static void qede_remove(struct pci_dev *pdev); -static int qede_alloc_rx_buffer(struct qede_dev *edev, - struct qede_rx_queue *rxq); +static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +/* The qede lock is used to protect driver state change and driver flows that + * are not reentrant. + */ +void __qede_lock(struct qede_dev *edev) +{ + mutex_lock(&edev->qede_lock); +} + +void __qede_unlock(struct qede_dev *edev) +{ + mutex_unlock(&edev->qede_lock); +} + #ifdef CONFIG_QED_SRIOV static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) @@ -166,15 +181,20 @@ static struct pci_driver qede_pci_driver = { .id_table = qede_pci_tbl, .probe = qede_probe, .remove = qede_remove, + .shutdown = qede_shutdown, #ifdef CONFIG_QED_SRIOV .sriov_configure = qede_sriov_configure, #endif }; -static void qede_force_mac(void *dev, u8 *mac) +static void qede_force_mac(void *dev, u8 *mac, bool forced) { struct qede_dev *edev = dev; + /* MAC hints take effect only if we haven't set one already */ + if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) + return; + ether_addr_copy(edev->ndev->dev_addr, mac); ether_addr_copy(edev->primary_mac, mac); } @@ -284,12 +304,12 @@ static int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) { u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring[idx].skb; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct eth_tx_1st_bd *first_bd; struct eth_tx_bd *tx_data_bd; int bds_consumed = 0; int nbds; - bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; + bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; int i, split_bd_len = 0; if (unlikely(!skb)) { @@ -329,20 +349,19 @@ static int qede_free_tx_pkt(struct qede_dev *edev, /* Free skb */ dev_kfree_skb_any(skb); - txq->sw_tx_ring[idx].skb = NULL; - txq->sw_tx_ring[idx].flags = 0; + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; return 0; } /* Unmap the data and free skb when mapping failed during start_xmit */ -static void qede_free_failed_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, +static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, int nbd, bool data_split) { u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring[idx].skb; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct eth_tx_bd *tx_data_bd; int i, split_bd_len = 0; @@ -359,7 +378,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, nbd--; } - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); /* Unmap the data of the skb frags */ @@ -367,7 +386,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, tx_data_bd = (struct eth_tx_bd *) qed_chain_produce(&txq->tx_pbl); if (tx_data_bd->nbytes) - dma_unmap_page(&edev->pdev->dev, + dma_unmap_page(txq->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); } @@ -378,12 +397,11 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Free skb */ dev_kfree_skb_any(skb); - txq->sw_tx_ring[idx].skb = NULL; - txq->sw_tx_ring[idx].flags = 0; + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; } -static u32 qede_xmit_type(struct qede_dev *edev, - struct sk_buff *skb, int *ipv6_ext) +static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) { u32 rc = XMIT_L4_CSUM; __be16 l3_proto; @@ -396,8 +414,19 @@ static u32 qede_xmit_type(struct qede_dev *edev, (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1; - if (skb->encapsulation) + if (skb->encapsulation) { rc |= XMIT_ENC; + if (skb_is_gso(skb)) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || + (gso_type & SKB_GSO_GRE_CSUM)) + rc |= XMIT_ENC_GSO_L4_CSUM; + + rc |= XMIT_LSO; + return rc; + } + } if (skb_is_gso(skb)) rc |= XMIT_LSO; @@ -439,18 +468,16 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); } -static int map_frag_to_bd(struct qede_dev *edev, +static int map_frag_to_bd(struct qede_tx_queue *txq, skb_frag_t *frag, struct eth_tx_bd *bd) { dma_addr_t mapping; /* Map skb non-linear frag data for DMA */ - mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, + mapping = skb_frag_dma_map(txq->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); + if (unlikely(dma_mapping_error(txq->dev, mapping))) return -ENOMEM; - } /* Setup the data pointer of the frag data */ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); @@ -470,8 +497,7 @@ static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) -static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, - u8 xmit_type) +static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) { int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; @@ -507,6 +533,47 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) mmiowb(); } +static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, + struct sw_rx_data *metadata, u16 padding, u16 length) +{ + struct qede_tx_queue *txq = fp->xdp_tx; + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct eth_tx_1st_bd *first_bd; + + if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + txq->stopped_cnt++; + return -ENOMEM; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); + first_bd->data.bitfields |= + (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + first_bd->data.nbds = 1; + + /* We can safely ignore the offset, as it's 0 for XDP */ + BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + + /* Synchronize the buffer back to device, as program [probably] + * has changed it. + */ + dma_sync_single_for_device(&edev->pdev->dev, + metadata->mapping + padding, + length, PCI_DMA_TODEVICE); + + txq->sw_tx_ring.pages[idx] = metadata->data; + txq->sw_tx_prod++; + + /* Mark the fastpath for future XDP doorbell */ + fp->xdp_xmit = 1; + + return 0; +} + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) @@ -530,15 +597,15 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); - txq = QEDE_TX_QUEUE(edev, txq_index); + txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; netdev_txq = netdev_get_tx_queue(ndev, txq_index); WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); - xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); + xmit_type = qede_xmit_type(skb, &ipv6_ext); #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) - if (qede_pkt_req_lin(edev, skb, xmit_type)) { + if (qede_pkt_req_lin(skb, xmit_type)) { if (skb_linearize(skb)) { DP_NOTICE(edev, "SKB linearization failed - silently dropping this SKB\n"); @@ -550,7 +617,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - txq->sw_tx_ring[idx].skb = skb; + txq->sw_tx_ring.skbs[idx].skb = skb; first_bd = (struct eth_tx_1st_bd *) qed_chain_produce(&txq->tx_pbl); memset(first_bd, 0, sizeof(*first_bd)); @@ -558,11 +625,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; /* Map skb linear data for DMA and set in the first BD */ - mapping = dma_map_single(&edev->pdev->dev, skb->data, + mapping = dma_map_single(txq->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + if (unlikely(dma_mapping_error(txq->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); - qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -633,6 +700,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (unlikely(xmit_type & XMIT_ENC)) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { + u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + + first_bd->data.bd_flags.bitfields |= 1 << tmp; + } hlen = qede_get_skb_hlen(skb, true); } else { first_bd->data.bd_flags.bitfields |= @@ -664,7 +737,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* this marks the BD as one that has no * individual mapping */ - txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD; + txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; first_bd->nbytes = cpu_to_le16(hlen); @@ -680,12 +753,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Handle fragmented skb */ /* special handle for frags inside 2nd and 3rd bds.. */ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { - rc = map_frag_to_bd(edev, + rc = map_frag_to_bd(txq, &skb_shinfo(skb)->frags[frag_idx], tx_data_bd); if (rc) { - qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, - data_split); + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -705,12 +777,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, memset(tx_data_bd, 0, sizeof(*tx_data_bd)); - rc = map_frag_to_bd(edev, + rc = map_frag_to_bd(txq, &skb_shinfo(skb)->frags[frag_idx], tx_data_bd); if (rc) { - qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, - data_split); + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -775,6 +846,27 @@ int qede_txq_has_work(struct qede_tx_queue *txq) return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); } +static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct eth_tx_1st_bd *bd; + u16 hw_bd_cons; + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & + NUM_TX_BDS_MAX]); + + txq->sw_tx_cons++; + txq->xmit_pkts++; + } +} + static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; @@ -858,16 +950,6 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq) return hw_comp_cons != sw_comp_cons; } -static bool qede_has_tx_work(struct qede_fastpath *fp) -{ - u8 tc; - - for (tc = 0; tc < fp->edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) - return true; - return false; -} - static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) { qed_chain_consume(&rxq->rx_bd_ring); @@ -877,8 +959,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) /* This function reuses the buffer(from an offset) from * consumer index to producer index in the bd ring */ -static inline void qede_reuse_page(struct qede_dev *edev, - struct qede_rx_queue *rxq, +static inline void qede_reuse_page(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) { struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); @@ -900,27 +981,62 @@ static inline void qede_reuse_page(struct qede_dev *edev, /* In case of allocation failures reuse buffers * from consumer index to produce buffers for firmware */ -void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, - struct qede_dev *edev, u8 count) +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) { struct sw_rx_data *curr_cons; for (; count > 0; count--) { curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - qede_reuse_page(edev, rxq, curr_cons); + qede_reuse_page(rxq, curr_cons); qede_rx_bd_ring_consume(rxq); } } -static inline int qede_realloc_rx_buffer(struct qede_dev *edev, - struct qede_rx_queue *rxq, +static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) +{ + struct sw_rx_data *sw_rx_data; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + struct page *data; + + data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!data)) + return -ENOMEM; + + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(rxq->dev, data, 0, + PAGE_SIZE, rxq->data_direction); + if (unlikely(dma_mapping_error(rxq->dev, mapping))) { + __free_page(data); + return -ENOMEM; + } + + sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; + sw_rx_data->data = data; + sw_rx_data->mapping = mapping; + + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); + WARN_ON(!rx_bd); + rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + + rxq->sw_rx_prod++; + + return 0; +} + +static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) { /* Move to the next segment in the page */ curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + if (unlikely(qede_alloc_rx_buffer(rxq))) { /* Since we failed to allocate new buffer * current buffer can be used again. */ @@ -929,15 +1045,15 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, return -ENOMEM; } - dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(rxq->dev, curr_cons->mapping, + PAGE_SIZE, rxq->data_direction); } else { /* Increment refcount of the page as we don't want * network stack to take the ownership of the page * which can be recycled multiple times by the driver. */ page_ref_inc(curr_cons->data); - qede_reuse_page(edev, rxq, curr_cons); + qede_reuse_page(rxq, curr_cons); } return 0; @@ -971,22 +1087,20 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) mmiowb(); } -static u32 qede_get_rxhash(struct qede_dev *edev, - u8 bitfields, - __le32 rss_hash, enum pkt_hash_types *rxhash_type) +static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) { + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; enum rss_hash_type htype; + u32 hash = 0; htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); - - if ((edev->ndev->features & NETIF_F_RXHASH) && htype) { - *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) || - (htype == RSS_HASH_TYPE_IPV6)) ? - PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; - return le32_to_cpu(rss_hash); + if (htype) { + hash_type = ((htype == RSS_HASH_TYPE_IPV4) || + (htype == RSS_HASH_TYPE_IPV6)) ? + PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; + hash = le32_to_cpu(rss_hash); } - *rxhash_type = PKT_HASH_TYPE_NONE; - return 0; + skb_set_hash(skb, hash, hash_type); } static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) @@ -1002,12 +1116,14 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) static inline void qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, + struct qede_rx_queue *rxq, struct sk_buff *skb, u16 vlan_tag) { if (vlan_tag) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); + fp->rxq->rcv_pkts++; } static void qede_set_gro_params(struct qede_dev *edev, @@ -1035,7 +1151,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev, struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; struct sk_buff *skb = tpa_info->skb; - if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) goto out; /* Add one frag and update the appropriate fields in the skb */ @@ -1043,7 +1159,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev, current_bd->data, current_bd->page_offset, len_on_bd); - if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { + if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { /* Incr page ref count to reuse on allocation failure * so that it doesn't get freed while freeing SKB. */ @@ -1061,8 +1177,9 @@ static int qede_fill_frag_skb(struct qede_dev *edev, return 0; out: - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; - qede_recycle_rx_bd_ring(rxq, edev, 1); + tpa_info->state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, 1); + return -ENOMEM; } @@ -1073,12 +1190,10 @@ static void qede_tpa_start(struct qede_dev *edev, struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->replace_buf; - dma_addr_t mapping = tpa_info->replace_buf_mapping; + struct sw_rx_data *replace_buf = &tpa_info->buffer; + dma_addr_t mapping = tpa_info->buffer_mapping; struct sw_rx_data *sw_rx_data_cons; struct sw_rx_data *sw_rx_data_prod; - enum pkt_hash_types rxhash_type; - u32 rxhash; sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; @@ -1099,11 +1214,11 @@ static void qede_tpa_start(struct qede_dev *edev, /* move partial skb from cons to pool (don't unmap yet) * save mapping, incase we drop the packet later on. */ - tpa_info->start_buf = *sw_rx_data_cons; + tpa_info->buffer = *sw_rx_data_cons; mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), le32_to_cpu(rx_bd_cons->addr.lo)); - tpa_info->start_buf_mapping = mapping; + tpa_info->buffer_mapping = mapping; rxq->sw_rx_cons++; /* set tpa state to start only if we are able to allocate skb @@ -1114,27 +1229,27 @@ static void qede_tpa_start(struct qede_dev *edev, le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + tpa_info->state = QEDE_AGG_STATE_ERROR; goto cons_buf; } - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); - memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe)); - /* Start filling in the aggregation info */ + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); tpa_info->frag_id = 0; - tpa_info->agg_state = QEDE_AGG_STATE_START; + tpa_info->state = QEDE_AGG_STATE_START; - rxhash = qede_get_rxhash(edev, cqe->bitfields, - cqe->rss_hash, &rxhash_type); - skb_set_hash(tpa_info->skb, rxhash, rxhash_type); + /* Store some information from first CQE */ + tpa_info->start_cqe_placement_offset = cqe->placement_offset; + tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); if ((le16_to_cpu(cqe->pars_flags.flags) >> PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); else tpa_info->vlan_tag = 0; + qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); + /* This is needed in order to enable forwarding support */ qede_set_gro_params(edev, tpa_info->skb, cqe); @@ -1146,7 +1261,7 @@ cons_buf: /* We still need to handle bd_len_list to consume buffers */ if (unlikely(cqe->ext_bd_len_list[1])) { DP_ERR(edev, "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + tpa_info->state = QEDE_AGG_STATE_ERROR; } } @@ -1197,7 +1312,7 @@ static void qede_gro_receive(struct qede_dev *edev, #ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { - skb_set_network_header(skb, 0); + skb_reset_network_header(skb); switch (skb->protocol) { case htons(ETH_P_IP): @@ -1216,7 +1331,7 @@ static void qede_gro_receive(struct qede_dev *edev, send_skb: skb_record_rx_queue(skb, fp->rxq->rxq_id); - qede_skb_receive(edev, fp, skb, vlan_tag); + qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); } static inline void qede_tpa_cont(struct qede_dev *edev, @@ -1253,7 +1368,7 @@ static void qede_tpa_end(struct qede_dev *edev, DP_ERR(edev, "Strange - TPA emd with more than a single len_list entry\n"); - if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) goto err; /* Sanity */ @@ -1267,14 +1382,9 @@ static void qede_tpa_end(struct qede_dev *edev, le16_to_cpu(cqe->total_packet_len), skb->len); memcpy(skb->data, - page_address(tpa_info->start_buf.data) + - tpa_info->start_cqe.placement_offset + - tpa_info->start_buf.page_offset, - le16_to_cpu(tpa_info->start_cqe.len_on_first_bd)); - - /* Recycle [mapped] start buffer for the next replacement */ - tpa_info->replace_buf = tpa_info->start_buf; - tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; + page_address(tpa_info->buffer.data) + + tpa_info->start_cqe_placement_offset + + tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); /* Finalize the SKB */ skb->protocol = eth_type_trans(skb, edev->ndev); @@ -1287,18 +1397,11 @@ static void qede_tpa_end(struct qede_dev *edev, qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); - tpa_info->agg_state = QEDE_AGG_STATE_NONE; + tpa_info->state = QEDE_AGG_STATE_NONE; return; err: - /* The BD starting the aggregation is still mapped; Re-use it for - * future aggregations [as replacement buffer] - */ - memcpy(&tpa_info->replace_buf, &tpa_info->start_buf, - sizeof(struct sw_rx_data)); - tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; - tpa_info->start_buf.data = NULL; - tpa_info->agg_state = QEDE_AGG_STATE_NONE; + tpa_info->state = QEDE_AGG_STATE_NONE; dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; } @@ -1380,238 +1483,364 @@ static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, return false; } -static int qede_rx_int(struct qede_fastpath *fp, int budget) +/* Return true iff packet is to be passed to stack */ +static bool qede_rx_xdp(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct bpf_prog *prog, + struct sw_rx_data *bd, + struct eth_fast_path_rx_reg_cqe *cqe) { - struct qede_dev *edev = fp->edev; - struct qede_rx_queue *rxq = fp->rxq; - - u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag; - int rx_pkt = 0; - u8 csum_flag; + u16 len = le16_to_cpu(cqe->len_on_first_bd); + struct xdp_buff xdp; + enum xdp_action act; - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + xdp.data = page_address(bd->data) + cqe->placement_offset; + xdp.data_end = xdp.data + len; - /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD in the while-loop before reading hw_comp_cons. If the CQE is - * read before it is written by FW, then FW writes CQE and SB, and then - * the CPU reads the hw_comp_cons, it will use an old CQE. + /* Queues always have a full reset currently, so for the time + * being until there's atomic program replace just mark read + * side for map helpers. */ - rmb(); + rcu_read_lock(); + act = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); - /* Loop to complete all indicated BDs */ - while (sw_comp_cons != hw_comp_cons) { - struct eth_fast_path_rx_reg_cqe *fp_cqe; - enum pkt_hash_types rxhash_type; - enum eth_rx_cqe_type cqe_type; - struct sw_rx_data *sw_rx_data; - union eth_rx_cqe *cqe; - struct sk_buff *skb; - struct page *data; - __le16 flags; - u16 len, pad; - u32 rx_hash; - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *) - qed_chain_consume(&rxq->rx_comp_ring); - cqe_type = cqe->fast_path_regular.type; - - if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { - edev->ops->eth_cqe_completion( - edev->cdev, fp->id, - (struct eth_slow_path_rx_cqe *)cqe); - goto next_cqe; + if (act == XDP_PASS) + return true; + + /* Count number of packets not to be passed to stack */ + rxq->xdp_no_pass++; + + switch (act) { + case XDP_TX: + /* We need the replacement buffer before transmit. */ + if (qede_alloc_rx_buffer(rxq)) { + qede_recycle_rx_bd_ring(rxq, 1); + return false; } - if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { - switch (cqe_type) { - case ETH_RX_CQE_TYPE_TPA_START: - qede_tpa_start(edev, rxq, - &cqe->fast_path_tpa_start); - goto next_cqe; - case ETH_RX_CQE_TYPE_TPA_CONT: - qede_tpa_cont(edev, rxq, - &cqe->fast_path_tpa_cont); - goto next_cqe; - case ETH_RX_CQE_TYPE_TPA_END: - qede_tpa_end(edev, fp, - &cqe->fast_path_tpa_end); - goto next_rx_only; - default: - break; - } + /* Now if there's a transmission problem, we'd still have to + * throw current buffer, as replacement was already allocated. + */ + if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(bd->data); } - /* Get the data from the SW ring */ - sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - data = sw_rx_data->data; - - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - pad = fp_cqe->placement_offset; - flags = cqe->fast_path_regular.pars_flags.flags; - - /* If this is an error packet then drop it */ - parse_flag = le16_to_cpu(flags); - - csum_flag = qede_check_csum(parse_flag); - if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular, - parse_flag)) { - rxq->rx_ip_frags++; - goto alloc_skb; - } + /* Regardless, we've consumed an Rx BD */ + qede_rx_bd_ring_consume(rxq); + return false; - DP_NOTICE(edev, - "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", - sw_comp_cons, parse_flag); - rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); - goto next_cqe; - } + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: + qede_recycle_rx_bd_ring(rxq, cqe->bd_num); + } -alloc_skb: - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) { - DP_NOTICE(edev, - "skb allocation failed, dropping incoming packet\n"); - qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); - rxq->rx_alloc_errors++; - goto next_cqe; + return false; +} + +static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, + u16 pad) +{ + unsigned int offset = bd->page_offset; + struct skb_frag_struct *frag; + struct page *page = bd->data; + unsigned int pull_len; + struct sk_buff *skb; + unsigned char *va; + + /* Allocate a new SKB with a sufficient large header len */ + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + /* Copy data into SKB - if it's small, we can simply copy it and + * re-use the already allcoated & mapped memory. + */ + if (len + pad <= edev->rx_copybreak) { + memcpy(skb_put(skb, len), + page_address(page) + pad + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, pad + offset, len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + /* Correct the skb & frag sizes offset after the pull */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(page); + dev_kfree_skb_any(skb); + return NULL; + } + +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + return skb; +} + +static int qede_rx_build_jumbo(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sk_buff *skb, + struct eth_fast_path_rx_reg_cqe *cqe, + u16 first_bd_len) +{ + u16 pkt_len = le16_to_cpu(cqe->pkt_len); + struct sw_rx_data *bd; + u16 bd_cons_idx; + u8 num_frags; + + pkt_len -= first_bd_len; + + /* We've already used one BD for the SKB. Now take care of the rest */ + for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : + pkt_len; + + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + goto out; } - /* Copy data into SKB */ - if (len + pad <= edev->rx_copybreak) { - memcpy(skb_put(skb, len), - page_address(data) + pad + - sw_rx_data->page_offset, len); - qede_reuse_page(edev, rxq, sw_rx_data); + /* We need a replacement buffer for each BD */ + if (unlikely(qede_alloc_rx_buffer(rxq))) + goto out; + + /* Now that we've allocated the replacement buffer, + * we can safely consume the next BD and map it to the SKB. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + qede_rx_bd_ring_consume(rxq); + + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + bd->data, 0, cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (unlikely(pkt_len)) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + +out: + return num_frags; +} + +static int qede_rx_process_tpa_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + union eth_rx_cqe *cqe, + enum eth_rx_cqe_type type) +{ + switch (type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); + return 0; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); + return 0; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); + return 1; + default: + return 0; + } +} + +static int qede_rx_process_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); + struct eth_fast_path_rx_reg_cqe *fp_cqe; + u16 len, pad, bd_cons_idx, parse_flag; + enum eth_rx_cqe_type cqe_type; + union eth_rx_cqe *cqe; + struct sw_rx_data *bd; + struct sk_buff *skb; + __le16 flags; + u8 csum_flag; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + /* Process an unlikely slowpath event */ + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + struct eth_slow_path_rx_cqe *sp_cqe; + + sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; + edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); + return 0; + } + + /* Handle TPA cqes */ + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) + return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); + + /* Get the data from the SW ring; Consume it only after it's evident + * we wouldn't recycle it. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + pad = fp_cqe->placement_offset; + + /* Run eBPF program if one is attached */ + if (xdp_prog) + if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) + return 1; + + /* If this is an error packet then drop it */ + flags = cqe->fast_path_regular.pars_flags.flags; + parse_flag = le16_to_cpu(flags); + + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + rxq->rx_ip_frags++; } else { - struct skb_frag_struct *frag; - unsigned int pull_len; - unsigned char *va; - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data, - pad + sw_rx_data->page_offset, - len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(edev, rxq, - sw_rx_data))) { - DP_ERR(edev, "Failed to allocate rx buffer\n"); - /* Incr page ref count to reuse on allocation - * failure so that it doesn't get freed while - * freeing SKB. - */ - - page_ref_inc(sw_rx_data->data); - rxq->rx_alloc_errors++; - qede_recycle_rx_bd_ring(rxq, edev, - fp_cqe->bd_num); - dev_kfree_skb_any(skb); - goto next_cqe; - } + DP_NOTICE(edev, + "CQE has error, flags = %x, dropping incoming packet\n", + parse_flag); + rxq->rx_hw_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; } + } - qede_rx_bd_ring_consume(rxq); + /* Basic validation passed; Need to prepare an SKB. This would also + * guarantee to finally consume the first BD upon success. + */ + skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + if (!skb) { + rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } - if (fp_cqe->bd_num != 1) { - u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); - u8 num_frags; - - pkt_len -= len; - - for (num_frags = fp_cqe->bd_num - 1; num_frags > 0; - num_frags--) { - u16 cur_size = pkt_len > rxq->rx_buf_size ? - rxq->rx_buf_size : pkt_len; - if (unlikely(!cur_size)) { - DP_ERR(edev, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - qede_recycle_rx_bd_ring(rxq, edev, - num_frags); - dev_kfree_skb_any(skb); - goto next_cqe; - } - - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { - qede_recycle_rx_bd_ring(rxq, edev, - num_frags); - dev_kfree_skb_any(skb); - goto next_cqe; - } - - sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - qede_rx_bd_ring_consume(rxq); - - dma_unmap_page(&edev->pdev->dev, - sw_rx_data->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - - skb_fill_page_desc(skb, - skb_shinfo(skb)->nr_frags++, - sw_rx_data->data, 0, - cur_size); - - skb->truesize += PAGE_SIZE; - skb->data_len += cur_size; - skb->len += cur_size; - pkt_len -= cur_size; - } + /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed + * by a single cqe. + */ + if (fp_cqe->bd_num > 1) { + u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, + fp_cqe, len); - if (unlikely(pkt_len)) - DP_ERR(edev, - "Mapped all BDs of jumbo, but still have %d bytes\n", - pkt_len); + if (unlikely(unmapped_frags > 0)) { + qede_recycle_rx_bd_ring(rxq, unmapped_frags); + dev_kfree_skb_any(skb); + return 0; } + } - skb->protocol = eth_type_trans(skb, edev->ndev); + /* The SKB contains all the data. Now prepare meta-magic */ + skb->protocol = eth_type_trans(skb, edev->ndev); + qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); + qede_set_skb_csum(skb, csum_flag); + skb_record_rx_queue(skb, rxq->rxq_id); - rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, - fp_cqe->rss_hash, &rxhash_type); + /* SKB is prepared - pass it to stack */ + qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); - skb_set_hash(skb, rx_hash, rxhash_type); + return 1; +} - qede_set_skb_csum(skb, csum_flag); +static int qede_rx_int(struct qede_fastpath *fp, int budget) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_dev *edev = fp->edev; + u16 hw_comp_cons, sw_comp_cons; + int work_done = 0; - skb_record_rx_queue(skb, fp->rxq->rxq_id); + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); -next_rx_only: - rx_pkt++; + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD in the while-loop before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, and then + * the CPU reads the hw_comp_cons, it will use an old CQE. + */ + rmb(); -next_cqe: /* don't consume bd rx buffer */ + /* Loop to complete all indicated BDs */ + while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { + qede_rx_process_cqe(edev, fp, rxq); qed_chain_recycle_consumed(&rxq->rx_comp_ring); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - /* CR TPA - revisit how to handle budget in TPA perhaps - * increase on "end" - */ - if (rx_pkt == budget) - break; - } /* repeat while sw_comp_cons != hw_comp_cons... */ + work_done++; + } /* Update producers */ qede_update_rx_prod(edev, rxq); - rxq->rcv_pkts += rx_pkt; + return work_done; +} + +static bool qede_poll_is_more_work(struct qede_fastpath *fp) +{ + qed_sb_update_sb_idx(fp->sb_info); + + /* *_has_*_work() reads the status block, thus we need to ensure that + * status block indices have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that we won't write the + * "newer" value of the status block to HW (if there was a DMA right + * after qede_has_rx_work and if there is no rmb, the memory reading + * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). + * In this case there will never be another interrupt until there is + * another update of the status block, while there is still unhandled + * work. + */ + rmb(); + + if (likely(fp->type & QEDE_FASTPATH_RX)) + if (qede_has_rx_work(fp->rxq)) + return true; - return rx_pkt; + if (fp->type & QEDE_FASTPATH_XDP) + if (qede_txq_has_work(fp->xdp_tx)) + return true; + + if (likely(fp->type & QEDE_FASTPATH_TX)) + if (qede_txq_has_work(fp->txq)) + return true; + + return false; } static int qede_poll(struct napi_struct *napi, int budget) @@ -1620,48 +1849,35 @@ static int qede_poll(struct napi_struct *napi, int budget) napi); struct qede_dev *edev = fp->edev; int rx_work_done = 0; - u8 tc; - for (tc = 0; tc < edev->num_tc; tc++) - if (likely(fp->type & QEDE_FASTPATH_TX) && - qede_txq_has_work(&fp->txqs[tc])) - qede_tx_int(edev, &fp->txqs[tc]); + if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) + qede_tx_int(edev, fp->txq); + + if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) + qede_xdp_tx_int(edev, fp->xdp_tx); rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { - qed_sb_update_sb_idx(fp->sb_info); - /* *_has_*_work() reads the status block, - * thus we need to ensure that status block indices - * have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that - * we won't write the "newer" value of the status block - * to HW (if there was a DMA right after - * qede_has_rx_work and if there is no rmb, the memory - * reading (qed_sb_update_sb_idx) may be postponed - * to right before *_ack_sb). In this case there - * will never be another interrupt until there is - * another update of the status block, while there - * is still unhandled work. - */ - rmb(); - - /* Fall out from the NAPI loop if needed */ - if (!((likely(fp->type & QEDE_FASTPATH_RX) && - qede_has_rx_work(fp->rxq)) || - (likely(fp->type & QEDE_FASTPATH_TX) && - qede_has_tx_work(fp)))) { + if (!qede_poll_is_more_work(fp)) { napi_complete(napi); /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, - 1 /*update*/); + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); } else { rx_work_done = budget; } } + if (fp->xdp_xmit) { + u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + + fp->xdp_xmit = 0; + fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(fp->xdp_tx); + } + return rx_work_done; } @@ -1912,7 +2128,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qede_dev *edev = netdev_priv(dev); struct qede_vlan *vlan, *tmp; - int rc; + int rc = 0; DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); @@ -1936,6 +2152,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) } /* If interface is down, cache this VLAN ID and return */ + __qede_lock(edev); if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Interface is down, VLAN %d will be configured when interface is up\n", @@ -1943,8 +2160,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) if (vid != 0) edev->non_configured_vlans++; list_add(&vlan->list, &edev->vlan_list); - - return 0; + goto out; } /* Check for the filter limit. @@ -1960,7 +2176,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) DP_ERR(edev, "Failed to configure VLAN %d\n", vlan->vid); kfree(vlan); - return -EINVAL; + goto out; } vlan->configured = true; @@ -1977,7 +2193,9 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) list_add(&vlan->list, &edev->vlan_list); - return 0; +out: + __qede_unlock(edev); + return rc; } static void qede_del_vlan_from_list(struct qede_dev *edev, @@ -2054,11 +2272,12 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qede_dev *edev = netdev_priv(dev); struct qede_vlan *vlan = NULL; - int rc; + int rc = 0; DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); /* Find whether entry exists */ + __qede_lock(edev); list_for_each_entry(vlan, &edev->vlan_list, list) if (vlan->vid == vid) break; @@ -2066,7 +2285,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) if (!vlan || (vlan->vid != vid)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Vlan isn't configured\n"); - return 0; + goto out; } if (edev->state != QEDE_STATE_OPEN) { @@ -2076,7 +2295,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Interface is down, removing VLAN from list only\n"); qede_del_vlan_from_list(edev, vlan); - return 0; + goto out; } /* Remove vlan */ @@ -2085,7 +2304,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) vid); if (rc) { DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - return -EINVAL; + goto out; } } @@ -2096,6 +2315,8 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) */ rc = qede_configure_vlan_filters(edev); +out: + __qede_unlock(edev); return rc; } @@ -2125,7 +2346,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } -static int qede_set_features(struct net_device *dev, netdev_features_t features) +static void qede_set_features_reload(struct qede_dev *edev, + struct qede_reload_args *args) +{ + edev->ndev->features = args->u.features; +} + +int qede_set_features(struct net_device *dev, netdev_features_t features) { struct qede_dev *edev = netdev_priv(dev); netdev_features_t changes = features ^ dev->features; @@ -2139,9 +2366,23 @@ static int qede_set_features(struct net_device *dev, netdev_features_t features) need_reload = edev->gro_disable; } - if (need_reload && netif_running(edev->ndev)) { - dev->features = features; - qede_reload(edev, NULL, NULL); + if (need_reload) { + struct qede_reload_args args; + + args.u.features = features; + args.func = &qede_set_features_reload; + + /* Make sure that we definitely need to reload. + * In case of an eBPF attached program, there will be no FW + * aggregations, so no need to actually reload. + */ + __qede_lock(edev); + if (edev->xdp_prog) + args.func(edev, &args); + else + qede_reload(edev, &args, true); + __qede_unlock(edev); + return 1; } @@ -2218,6 +2459,82 @@ static void qede_udp_tunnel_del(struct net_device *dev, schedule_delayed_work(&edev->sp_task, 0); } +/* 8B udp header + 8B base tunnel header + 32B option length */ +#define QEDE_MAX_TUN_HDR_LEN 48 + +static netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation) { + u8 l4_proto = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + /* Disable offloads for geneve tunnels, as HW can't parse + * the geneve header which has option length greater than 32B. + */ + if ((l4_proto == IPPROTO_UDP) && + ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } + + return features; +} + +static void qede_xdp_reload_func(struct qede_dev *edev, + struct qede_reload_args *args) +{ + struct bpf_prog *old; + + old = xchg(&edev->xdp_prog, args->u.new_prog); + if (old) + bpf_prog_put(old); +} + +static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) +{ + struct qede_reload_args args; + + if (prog && prog->xdp_adjust_head) { + DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + + /* If we're called, there was already a bpf reference increment */ + args.func = &qede_xdp_reload_func; + args.u.new_prog = prog; + qede_reload(edev, &args, false); + + return 0; +} + +static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return qede_xdp_set(edev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!edev->xdp_prog; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -2242,6 +2559,8 @@ static const struct net_device_ops qede_netdev_ops = { #endif .ndo_udp_tunnel_add = qede_udp_tunnel_add, .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_features_check = qede_features_check, + .ndo_xdp = qede_xdp, }; /* ------------------------------------------------------------------------- @@ -2282,8 +2601,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, info, sizeof(*info)); - edev->num_tc = edev->dev_info.num_tc; - INIT_LIST_HEAD(&edev->vlan_list); return edev; @@ -2308,6 +2625,8 @@ static void qede_init_ndev(struct qede_dev *edev) qede_set_ethtool_ops(ndev); + ndev->priv_flags |= IFF_UNICAST_FLT; + /* user-changeble features */ hw_features = NETIF_F_GRO | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -2315,11 +2634,14 @@ static void qede_init_ndev(struct qede_dev *edev) /* Encap features*/ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_TSO_ECN; + NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; @@ -2329,8 +2651,14 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->hw_features = hw_features; + /* MTU range: 46 - 9600 */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; + /* Set network device HW mac */ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); + + ndev->mtu = edev->dev_info.common.mtu; } /* This function converts from 32b param to two params of level and module @@ -2370,7 +2698,8 @@ static void qede_free_fp_array(struct qede_dev *edev) kfree(fp->sb_info); kfree(fp->rxq); - kfree(fp->txqs); + kfree(fp->xdp_tx); + kfree(fp->txq); } kfree(edev->fp_array); } @@ -2403,7 +2732,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev) for_each_queue(i) { fp = &edev->fp_array[i]; - fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); + fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); if (!fp->sb_info) { DP_NOTICE(edev, "sb info struct allocation failed\n"); goto err; @@ -2420,21 +2749,22 @@ static int qede_alloc_fp_array(struct qede_dev *edev) } if (fp->type & QEDE_FASTPATH_TX) { - fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), - GFP_KERNEL); - if (!fp->txqs) { - DP_NOTICE(edev, - "TXQ array allocation failed\n"); + fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL); + if (!fp->txq) goto err; - } } if (fp->type & QEDE_FASTPATH_RX) { - fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); - if (!fp->rxq) { - DP_NOTICE(edev, - "RXQ struct allocation failed\n"); + fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); + if (!fp->rxq) goto err; + + if (edev->xdp_prog) { + fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), + GFP_KERNEL); + if (!fp->xdp_tx) + goto err; + fp->type |= QEDE_FASTPATH_XDP; } } } @@ -2451,12 +2781,11 @@ static void qede_sp_task(struct work_struct *work) sp_task.work); struct qed_dev *cdev = edev->cdev; - mutex_lock(&edev->qede_lock); + __qede_lock(edev); - if (edev->state == QEDE_STATE_OPEN) { - if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) + if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) + if (edev->state == QEDE_STATE_OPEN) qede_config_rx_mode(edev->ndev); - } if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { struct qed_tunn_params tunn_params; @@ -2476,16 +2805,16 @@ static void qede_sp_task(struct work_struct *work) qed_ops->tunn_config(cdev, &tunn_params); } - mutex_unlock(&edev->qede_lock); + __qede_unlock(edev); } static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; - /* 64 rx + 64 tx */ + /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 128; + pf_params.eth_pf_params.num_cons = 192; qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -2634,10 +2963,16 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) pci_set_drvdata(pdev, NULL); + /* Release edev's reference to XDP's bpf if such exist */ + if (edev->xdp_prog) + bpf_prog_put(edev->xdp_prog); + free_netdev(ndev); /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); + if (system_state == SYSTEM_POWER_OFF) + return; qed_ops->common->remove(cdev); dev_info(&pdev->dev, "Ending qede_remove successfully\n"); @@ -2648,6 +2983,11 @@ static void qede_remove(struct pci_dev *pdev) __qede_remove(pdev, QEDE_REMOVE_NORMAL); } +static void qede_shutdown(struct pci_dev *pdev) +{ + __qede_remove(pdev, QEDE_REMOVE_NORMAL); +} + /* ------------------------------------------------------------------------- * START OF LOAD / UNLOAD * ------------------------------------------------------------------------- @@ -2731,7 +3071,7 @@ static void qede_free_rx_buffers(struct qede_dev *edev, data = rx_buf->data; dma_unmap_page(&edev->pdev->dev, - rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE); + rx_buf->mapping, PAGE_SIZE, rxq->data_direction); rx_buf->data = NULL; __free_page(data); @@ -2747,7 +3087,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + struct sw_rx_data *replace_buf = &tpa_info->buffer; if (replace_buf->data) { dma_unmap_page(&edev->pdev->dev, @@ -2773,52 +3113,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); } -static int qede_alloc_rx_buffer(struct qede_dev *edev, - struct qede_rx_queue *rxq) -{ - struct sw_rx_data *sw_rx_data; - struct eth_rx_bd *rx_bd; - dma_addr_t mapping; - struct page *data; - - data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!data)) { - DP_NOTICE(edev, "Failed to allocate Rx data [page]\n"); - return -ENOMEM; - } - - /* Map the entire page as it would be used - * for multiple RX buffer segment size mapping. - */ - mapping = dma_map_page(&edev->pdev->dev, data, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - __free_page(data); - DP_NOTICE(edev, "Failed to map Rx buffer\n"); - return -ENOMEM; - } - - sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - sw_rx_data->page_offset = 0; - sw_rx_data->data = data; - sw_rx_data->mapping = mapping; - - /* Advance PROD and get BD pointer */ - rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); - WARN_ON(!rx_bd); - rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - - rxq->sw_rx_prod++; - - return 0; -} - static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) { dma_addr_t mapping; int i; + /* Don't perform FW aggregations in case of XDP */ + if (edev->xdp_prog) + edev->gro_disable = 1; + if (edev->gro_disable) return 0; @@ -2829,7 +3132,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + struct sw_rx_data *replace_buf = &tpa_info->buffer; replace_buf->data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!replace_buf->data)) { @@ -2847,10 +3150,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) } replace_buf->mapping = mapping; - tpa_info->replace_buf.page_offset = 0; - - tpa_info->replace_buf_mapping = mapping; - tpa_info->agg_state = QEDE_AGG_STATE_NONE; + tpa_info->buffer.page_offset = 0; + tpa_info->buffer_mapping = mapping; + tpa_info->state = QEDE_AGG_STATE_NONE; } return 0; @@ -2872,8 +3174,13 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) if (rxq->rx_buf_size > PAGE_SIZE) rxq->rx_buf_size = PAGE_SIZE; - /* Segment size to spilt a page in multiple equal parts */ - rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); + /* Segment size to spilt a page in multiple equal parts, + * unless XDP is used in which case we'd use the entire page. + */ + if (!edev->xdp_prog) + rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); + else + rxq->rx_buf_seg_size = PAGE_SIZE; /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; @@ -2909,7 +3216,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) /* Allocate buffers for the Rx ring */ for (i = 0; i < rxq->num_rx_buffers; i++) { - rc = qede_alloc_rx_buffer(edev, rxq); + rc = qede_alloc_rx_buffer(rxq); if (rc) { DP_ERR(edev, "Rx buffers allocation failed at index %d\n", i); @@ -2925,7 +3232,10 @@ err: static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ - kfree(txq->sw_tx_ring); + if (txq->is_xdp) + kfree(txq->sw_tx_ring.pages); + else + kfree(txq->sw_tx_ring.skbs); /* Free the real RQ ring used by FW */ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); @@ -2934,17 +3244,22 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) /* This function allocates all memory needed per Tx queue */ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { - int size, rc; union eth_tx_bd_types *p_virt; + int size, rc; txq->num_tx_buffers = edev->q_num_tx_buffers; /* Allocate the parallel driver ring for Tx buffers */ - size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE; - txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); - if (!txq->sw_tx_ring) { - DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); - goto err; + if (txq->is_xdp) { + size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE; + txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL); + if (!txq->sw_tx_ring.pages) + goto err; + } else { + size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; + txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); + if (!txq->sw_tx_ring.skbs) + goto err; } rc = edev->ops->common->chain_alloc(edev->cdev, @@ -2966,16 +3281,13 @@ err: /* This function frees all memory of a single fp */ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { - int tc; - qede_free_mem_sb(edev, fp->sb_info); if (fp->type & QEDE_FASTPATH_RX) qede_free_mem_rxq(edev, fp->rxq); if (fp->type & QEDE_FASTPATH_TX) - for (tc = 0; tc < edev->num_tc; tc++) - qede_free_mem_txq(edev, &fp->txqs[tc]); + qede_free_mem_txq(edev, fp->txq); } /* This function allocates all memory needed for a single fp (i.e. an entity @@ -2983,28 +3295,31 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) */ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { - int rc, tc; + int rc = 0; rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); if (rc) - goto err; + goto out; if (fp->type & QEDE_FASTPATH_RX) { rc = qede_alloc_mem_rxq(edev, fp->rxq); if (rc) - goto err; + goto out; + } + + if (fp->type & QEDE_FASTPATH_XDP) { + rc = qede_alloc_mem_txq(edev, fp->xdp_tx); + if (rc) + goto out; } if (fp->type & QEDE_FASTPATH_TX) { - for (tc = 0; tc < edev->num_tc; tc++) { - rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); - if (rc) - goto err; - } + rc = qede_alloc_mem_txq(edev, fp->txq); + if (rc) + goto out; } - return 0; -err: +out: return rc; } @@ -3043,7 +3358,7 @@ static int qede_alloc_mem_load(struct qede_dev *edev) /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { - int queue_id, rxq_index = 0, txq_index = 0, tc; + int queue_id, rxq_index = 0, txq_index = 0; struct qede_fastpath *fp; for_each_queue(queue_id) { @@ -3052,25 +3367,28 @@ static void qede_init_fp(struct qede_dev *edev) fp->edev = edev; fp->id = queue_id; - memset((void *)&fp->napi, 0, sizeof(fp->napi)); - - memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); + if (fp->type & QEDE_FASTPATH_XDP) { + fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, + rxq_index); + fp->xdp_tx->is_xdp = 1; + } if (fp->type & QEDE_FASTPATH_RX) { - memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); fp->rxq->rxq_id = rxq_index++; + + /* Determine how to map buffers for this queue */ + if (fp->type & QEDE_FASTPATH_XDP) + fp->rxq->data_direction = DMA_BIDIRECTIONAL; + else + fp->rxq->data_direction = DMA_FROM_DEVICE; + fp->rxq->dev = &edev->pdev->dev; } if (fp->type & QEDE_FASTPATH_TX) { - memset((void *)fp->txqs, 0, - (edev->num_tc * sizeof(*fp->txqs))); - for (tc = 0; tc < edev->num_tc; tc++) { - fp->txqs[tc].index = txq_index + - tc * QEDE_TSS_COUNT(edev); - if (edev->dev_info.is_legacy) - fp->txqs[tc].is_legacy = true; - } - txq_index++; + fp->txq->index = txq_index++; + if (edev->dev_info.is_legacy) + fp->txq->is_legacy = 1; + fp->txq->dev = &edev->pdev->dev; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", @@ -3238,11 +3556,18 @@ static int qede_drain_txq(struct qede_dev *edev, return 0; } +static int qede_stop_txq(struct qede_dev *edev, + struct qede_tx_queue *txq, int rss_id) +{ + return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); +} + static int qede_stop_queues(struct qede_dev *edev) { struct qed_update_vport_params vport_update_params; struct qed_dev *cdev = edev->cdev; - int rc, tc, i; + struct qede_fastpath *fp; + int rc, i; /* Disable the vport */ memset(&vport_update_params, 0, sizeof(vport_update_params)); @@ -3259,53 +3584,49 @@ static int qede_stop_queues(struct qede_dev *edev) /* Flush Tx queues. If needed, request drain from MCP */ for_each_queue(i) { - struct qede_fastpath *fp = &edev->fp_array[i]; + fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { - for (tc = 0; tc < edev->num_tc; tc++) { - struct qede_tx_queue *txq = &fp->txqs[tc]; + rc = qede_drain_txq(edev, fp->txq, true); + if (rc) + return rc; + } - rc = qede_drain_txq(edev, txq, true); - if (rc) - return rc; - } + if (fp->type & QEDE_FASTPATH_XDP) { + rc = qede_drain_txq(edev, fp->xdp_tx, true); + if (rc) + return rc; } } /* Stop all Queues in reverse order */ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { - struct qed_stop_rxq_params rx_params; + fp = &edev->fp_array[i]; /* Stop the Tx Queue(s) */ - if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { - for (tc = 0; tc < edev->num_tc; tc++) { - struct qed_stop_txq_params tx_params; - u8 val; - - tx_params.rss_id = i; - val = edev->fp_array[i].txqs[tc].index; - tx_params.tx_queue_id = val; - rc = edev->ops->q_tx_stop(cdev, &tx_params); - if (rc) { - DP_ERR(edev, "Failed to stop TXQ #%d\n", - tx_params.tx_queue_id); - return rc; - } - } + if (fp->type & QEDE_FASTPATH_TX) { + rc = qede_stop_txq(edev, fp->txq, i); + if (rc) + return rc; } /* Stop the Rx Queue */ - if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { - memset(&rx_params, 0, sizeof(rx_params)); - rx_params.rss_id = i; - rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id; - - rc = edev->ops->q_rx_stop(cdev, &rx_params); + if (fp->type & QEDE_FASTPATH_RX) { + rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); if (rc) { DP_ERR(edev, "Failed to stop RXQ #%d\n", i); return rc; } } + + /* Stop the XDP forwarding queue */ + if (fp->type & QEDE_FASTPATH_XDP) { + rc = qede_stop_txq(edev, fp->xdp_tx, i); + if (rc) + return rc; + + bpf_prog_put(fp->rxq->xdp_prog); + } } /* Stop the vport */ @@ -3316,9 +3637,55 @@ static int qede_stop_queues(struct qede_dev *edev) return rc; } +static int qede_start_txq(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) +{ + dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); + u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); + struct qed_queue_start_common_params params; + struct qed_txq_start_ret_params ret_params; + int rc; + + memset(¶ms, 0, sizeof(params)); + memset(&ret_params, 0, sizeof(ret_params)); + + /* Let the XDP queue share the queue-zone with one of the regular txq. + * We don't really care about its coalescing. + */ + if (txq->is_xdp) + params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); + else + params.queue_id = txq->index; + + params.sb = fp->sb_info->igu_sb_id; + params.sb_idx = sb_idx; + + rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, + page_cnt, &ret_params); + if (rc) { + DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); + return rc; + } + + txq->doorbell_addr = ret_params.p_doorbell; + txq->handle = ret_params.p_handle; + + /* Determine the FW consumer address associated */ + txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; + + /* Prepare the doorbell parameters */ + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, + DQ_XCM_ETH_TX_BD_PROD_CMD); + txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + + return rc; +} + static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { - int rc, tc, i; int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; struct qed_update_vport_params vport_update_params; @@ -3326,6 +3693,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_start_vport_params start = {0}; bool reset_rss_indir = false; + int rc, i; if (!edev->num_queues) { DP_ERR(edev, @@ -3357,11 +3725,12 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) u32 page_cnt; if (fp->type & QEDE_FASTPATH_RX) { + struct qed_rxq_start_ret_params ret_params; struct qede_rx_queue *rxq = fp->rxq; __le16 *val; + memset(&ret_params, 0, sizeof(ret_params)); memset(&q_params, 0, sizeof(q_params)); - q_params.rss_id = i; q_params.queue_id = rxq->rxq_id; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; @@ -3371,60 +3740,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) qed_chain_get_pbl_phys(&rxq->rx_comp_ring); page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); - rc = edev->ops->q_rx_start(cdev, &q_params, + rc = edev->ops->q_rx_start(cdev, i, &q_params, rxq->rx_buf_size, rxq->rx_bd_ring.p_phys_addr, p_phys_table, - page_cnt, - &rxq->hw_rxq_prod_addr); + page_cnt, &ret_params); if (rc) { DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); return rc; } + /* Use the return parameters */ + rxq->hw_rxq_prod_addr = ret_params.p_prod; + rxq->handle = ret_params.p_handle; + val = &fp->sb_info->sb_virt->pi_array[RX_PI]; rxq->hw_cons_ptr = val; qede_update_rx_prod(edev, rxq); } - if (!(fp->type & QEDE_FASTPATH_TX)) - continue; - - for (tc = 0; tc < edev->num_tc; tc++) { - struct qede_tx_queue *txq = &fp->txqs[tc]; - - p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); - page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); - - memset(&q_params, 0, sizeof(q_params)); - q_params.rss_id = i; - q_params.queue_id = txq->index; - q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; - q_params.sb_idx = TX_PI(tc); + if (fp->type & QEDE_FASTPATH_XDP) { + rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); + if (rc) + return rc; - rc = edev->ops->q_tx_start(cdev, &q_params, - p_phys_table, page_cnt, - &txq->doorbell_addr); - if (rc) { - DP_ERR(edev, "Start TXQ #%d failed %d\n", - txq->index, rc); + fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); + if (IS_ERR(fp->rxq->xdp_prog)) { + rc = PTR_ERR(fp->rxq->xdp_prog); + fp->rxq->xdp_prog = NULL; return rc; } + } - txq->hw_cons_ptr = - &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; - SET_FIELD(txq->tx_db.data.params, - ETH_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, - DB_AGG_CMD_SET); - SET_FIELD(txq->tx_db.data.params, - ETH_DB_DATA_AGG_VAL_SEL, - DQ_XCM_ETH_TX_BD_PROD_CMD); - - txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + if (fp->type & QEDE_FASTPATH_TX) { + rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); + if (rc) + return rc; } } @@ -3519,15 +3872,18 @@ enum qede_unload_mode { QEDE_UNLOAD_NORMAL, }; -static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) +static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, + bool is_locked) { struct qed_link_params link_params; int rc; DP_INFO(edev, "Starting qede unload\n"); + if (!is_locked) + __qede_lock(edev); + qede_roce_dev_event_close(edev); - mutex_lock(&edev->qede_lock); edev->state = QEDE_STATE_CLOSED; /* Close OS Tx */ @@ -3559,7 +3915,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) qede_free_fp_array(edev); out: - mutex_unlock(&edev->qede_lock); + if (!is_locked) + __qede_unlock(edev); DP_INFO(edev, "Ending qede unload\n"); } @@ -3568,7 +3925,8 @@ enum qede_load_mode { QEDE_LOAD_RELOAD, }; -static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) +static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, + bool is_locked) { struct qed_link_params link_params; struct qed_link_output link_output; @@ -3576,21 +3934,24 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) DP_INFO(edev, "Starting qede load\n"); + if (!is_locked) + __qede_lock(edev); + rc = qede_set_num_queues(edev); if (rc) - goto err0; + goto out; rc = qede_alloc_fp_array(edev); if (rc) - goto err0; + goto out; qede_init_fp(edev); rc = qede_alloc_mem_load(edev); if (rc) goto err1; - DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", - QEDE_QUEUE_CNT(edev), edev->num_tc); + DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", + QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); rc = qede_set_real_num_queues(edev); if (rc) @@ -3612,10 +3973,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) /* Add primary mac and set Rx filters */ ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); - mutex_lock(&edev->qede_lock); - edev->state = QEDE_STATE_OPEN; - mutex_unlock(&edev->qede_lock); - /* Program un-configured VLANs */ qede_configure_vlan_filters(edev); @@ -3630,10 +3987,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) qede_roce_dev_event_open(edev); qede_link_update(edev, &link_output); + edev->state = QEDE_STATE_OPEN; + DP_INFO(edev, "Ending successfully qede load\n"); - return 0; + goto out; err4: qede_sync_free_irqs(edev); memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); @@ -3647,26 +4006,40 @@ err1: edev->num_queues = 0; edev->fp_num_tx = 0; edev->fp_num_rx = 0; -err0: +out: + if (!is_locked) + __qede_unlock(edev); + return rc; } +/* 'func' should be able to run between unload and reload assuming interface + * is actually running, or afterwards in case it's currently DOWN. + */ void qede_reload(struct qede_dev *edev, - void (*func)(struct qede_dev *, union qede_reload_args *), - union qede_reload_args *args) + struct qede_reload_args *args, bool is_locked) { - qede_unload(edev, QEDE_UNLOAD_NORMAL); - /* Call function handler to update parameters - * needed for function load. - */ - if (func) - func(edev, args); + if (!is_locked) + __qede_lock(edev); - qede_load(edev, QEDE_LOAD_RELOAD); + /* Since qede_lock is held, internal state wouldn't change even + * if netdev state would start transitioning. Check whether current + * internal configuration indicates device is up, then reload. + */ + if (edev->state == QEDE_STATE_OPEN) { + qede_unload(edev, QEDE_UNLOAD_NORMAL, true); + if (args) + args->func(edev, args); + qede_load(edev, QEDE_LOAD_RELOAD, true); + + /* Since no one is going to do it for us, re-configure */ + qede_config_rx_mode(edev->ndev); + } else if (args) { + args->func(edev, args); + } - mutex_lock(&edev->qede_lock); - qede_config_rx_mode(edev->ndev); - mutex_unlock(&edev->qede_lock); + if (!is_locked) + __qede_unlock(edev); } /* called with rtnl_lock */ @@ -3679,13 +4052,14 @@ static int qede_open(struct net_device *ndev) edev->ops->common->set_power_state(edev->cdev, PCI_D0); - rc = qede_load(edev, QEDE_LOAD_NORMAL); - + rc = qede_load(edev, QEDE_LOAD_NORMAL, false); if (rc) return rc; udp_tunnel_get_rx_info(ndev); + edev->ops->common->update_drv_state(edev->cdev, true); + return 0; } @@ -3693,7 +4067,9 @@ static int qede_close(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); - qede_unload(edev, QEDE_UNLOAD_NORMAL); + qede_unload(edev, QEDE_UNLOAD_NORMAL, false); + + edev->ops->common->update_drv_state(edev->cdev, false); return 0; } @@ -3755,6 +4131,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p) if (rc) return rc; + edev->ops->common->update_mac(edev->cdev, addr->sa_data); + /* Add MAC filter according to the new unicast HW MAC address */ ether_addr_copy(edev->primary_mac, ndev->dev_addr); return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, @@ -3821,15 +4199,8 @@ static void qede_set_rx_mode(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); - DP_INFO(edev, "qede_set_rx_mode called\n"); - - if (edev->state != QEDE_STATE_OPEN) { - DP_INFO(edev, - "qede_set_rx_mode called while interface is down\n"); - } else { - set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); - } + set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); } /* Must be called with qede_lock held */ @@ -3877,7 +4248,7 @@ static void qede_config_rx_mode(struct net_device *ndev) /* Check for promiscuous */ if ((ndev->flags & IFF_PROMISC) || - (uc_count > 15)) { /* @@@TBD resource allocation - 1 */ + (uc_count > edev->dev_info.num_mac_filters - 1)) { accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; } else { /* Add MAC filters according to the unicast secondary macs */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c index 9867f960b063..49272716a7c4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_roce.c +++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c @@ -191,8 +191,8 @@ int qede_roce_register_driver(struct qedr_driver *drv) } mutex_unlock(&qedr_dev_list_lock); - DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n", - qedr_counter); + pr_notice("qedr: discovered and registered %d RoCE funcs\n", + qedr_counter); return 0; } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b09a6b80d107..5c100ab86c00 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3755,7 +3755,6 @@ static const struct net_device_ops ql3xxx_netdev_ops = { .ndo_open = ql3xxx_open, .ndo_start_xmit = ql3xxx_send, .ndo_stop = ql3xxx_close, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ql3xxx_set_mac_address, .ndo_tx_timeout = ql3xxx_tx_timeout, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 509b596cf1e8..838cc0ceafd8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1024,12 +1024,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) struct qlcnic_adapter *adapter = netdev_priv(netdev); int rc = 0; - if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { - dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" - " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); - return -EINVAL; - } - rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); if (!rc) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 3ae3968b0edf..4c0cce962585 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2342,6 +2342,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; + /* MTU range: 68 - 9600 */ + netdev->min_mtu = P3P_MIN_MTU; + netdev->max_mtu = P3P_MAX_MTU; + err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, adapter->drv_sds_rings); if (err) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index fd4a8e473f11..1409412ab39d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4788,6 +4788,13 @@ static int qlge_probe(struct pci_dev *pdev, ndev->ethtool_ops = &qlge_ethtool_ops; ndev->watchdog_timeo = 10 * HZ; + /* MTU range: this driver only supports 1500 or 9000, so this only + * filters out values above or below, and we'll rely on + * qlge_change_mtu to make sure only 1500 or 9000 are allowed + */ + ndev->min_mtu = ETH_DATA_LEN; + ndev->max_mtu = 9000; + err = register_netdev(ndev); if (err) { dev_err(&pdev->dev, "net device registration failed.\n"); |