aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed/qed_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_main.c')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c322
1 files changed, 252 insertions, 70 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index aeb98d8c5626..b11399606990 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1,16 +1,39 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/stddef.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
@@ -21,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -29,9 +53,13 @@
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_ll2.h"
+#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#include "qed_debug.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@ -94,7 +122,7 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
- if (cdev->doorbells)
+ if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
iounmap(cdev->regview);
@@ -178,16 +206,24 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
- if (IS_PF(cdev)) {
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ if (!cdev->db_size) {
+ if (IS_PF(cdev)) {
+ DP_NOTICE(cdev, "No Doorbell bar available\n");
+ return -EINVAL;
+ } else {
+ return 0;
}
}
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
return 0;
err2:
@@ -201,18 +237,35 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hw_info *hw_info = &p_hwfn->hw_info;
+ struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
- QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
- ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+ dev_info->dev_type = cdev->type;
+ ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION;
@@ -222,9 +275,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
- if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
- QED_WOL_SUPPORT_PME)
+ if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
+
+ dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
@@ -237,6 +291,9 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
+ qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mbi_version);
+
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
@@ -247,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL);
}
- dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+ dev_info->mtu = hw_info->mtu;
return 0;
}
@@ -291,6 +348,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol;
if (params->is_vf)
@@ -562,6 +620,33 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
+{
+ /* Calling the disable function will make sure that any
+ * currently-running function is completed. The following call to the
+ * enable function makes this sequence a flush-like operation.
+ */
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_enable(p_hwfn->sp_dpc);
+ }
+}
+
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+
+ qed_slowpath_tasklet_flush(p_hwfn);
+}
+
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
@@ -604,19 +689,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
-static int qed_nic_reset(struct qed_dev *cdev)
-{
- int rc;
-
- rc = qed_hw_reset(cdev);
- if (rc)
- return rc;
-
- qed_resc_free(cdev);
-
- return 0;
-}
-
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
@@ -700,7 +772,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
- cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */
}
@@ -717,7 +789,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
return 0;
for_each_hwfn(cdev, i)
@@ -849,10 +922,22 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
+ /* In case we might support RDMA, don't allow qede to be greedy
+ * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ */
+ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
+ u16 *num_cons;
+
+ num_cons = &params->eth_pf_params.num_cons;
+ *num_cons = min_t(u16, *num_cons, 192);
+ }
+
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -863,10 +948,15 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
- struct qed_tunn_start_params tunn_info;
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+#ifdef CONFIG_RFS_ACCEL
+ struct qed_ptt *p_ptt;
+#endif
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -881,6 +971,19 @@ static int qed_slowpath_start(struct qed_dev *cdev,
QED_FW_FILE_NAME);
goto err;
}
+
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ goto err;
+ }
+ }
+#endif
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -901,33 +1004,53 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (rc)
goto err2;
- /* First Dword used to diffrentiate between various sources */
+ /* First Dword used to differentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
qed_dbg_pf_init(cdev);
}
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN |
- 1 << QED_MODE_IPGENEVE_TUNN;
-
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
-
/* Start the slowpath */
- rc = qed_hw_init(cdev, &tunn_info, true,
- cdev->int_params.out.int_mode,
- true, data);
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
/* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev);
@@ -968,6 +1091,13 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
+#ifdef CONFIG_RFS_ACCEL
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
+
qed_iov_wq_stop(cdev, false);
return rc;
@@ -981,16 +1111,24 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
+ }
- qed_nic_stop(cdev);
+ qed_nic_stop(cdev);
+
+ if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
- }
qed_disable_msix(cdev);
- qed_nic_reset(cdev);
+
+ qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
@@ -1000,17 +1138,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
return 0;
}
-static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
- char ver_str[VER_SIZE])
+static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
-
- memcpy(cdev->ver_str, ver_str, VER_SIZE);
- cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static u32 qed_sb_init(struct qed_dev *cdev,
@@ -1020,6 +1154,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
int hwfn_index;
u16 rel_sb_id;
u8 n_hwfns;
@@ -1041,8 +1176,18 @@ static u32 qed_sb_init(struct qed_dev *cdev,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
hwfn_index, rel_sb_id, sb_id);
- rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
- sb_virt_addr, sb_phy_addr, rel_sb_id);
+ if (IS_PF(p_hwfn->cdev)) {
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ qed_ptt_release(p_hwfn, p_ptt);
+ } else {
+ rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ }
return rc;
}
@@ -1083,12 +1228,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
if (!cdev)
return -ENODEV;
- if (IS_VF(cdev))
- return 0;
-
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
+ /* When VF wants to set link, force it to read the bulletin instead.
+ * This mimics the PF behavior, where a noitification [both immediate
+ * and possible later] would be generated when changing properties.
+ */
+ if (IS_VF(cdev)) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
+ return 0;
+ }
+
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EBUSY;
@@ -1245,7 +1396,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
/* TODO - at the moment assume supported and advertised speed equal */
if_link->supported_caps = QED_LM_FIBRE_BIT;
- if (params.speed.autoneg)
+ if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
@@ -1255,6 +1406,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.autoneg)
+ if_link->advertised_caps |= QED_LM_Autoneg_BIT;
+ else
+ if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
@@ -1387,6 +1542,21 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
+ u8 *buf, u16 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int rc;
+
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
@@ -1394,7 +1564,7 @@ static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u8 qid, u16 sb_id)
+ u16 qid, u16 sb_id)
{
struct qed_hwfn *hwfn;
struct qed_ptt *ptt;
@@ -1543,7 +1713,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
- .set_id = &qed_set_id,
+ .set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
@@ -1553,6 +1723,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .dbg_grc = &qed_dbg_grc,
+ .dbg_grc_size = &qed_dbg_grc_size,
.can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
@@ -1562,6 +1734,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .nvm_get_image = &qed_nvm_get_image,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
@@ -1582,12 +1755,21 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
+ case QED_MCP_FCOE_STATS:
+ qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
+ break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ break;
default:
- DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "Invalid protocol type = %d\n", type);
return;
}
}