From 42f8277f56cf4a9570b1f0fe10a4fec3f48c832a Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 23 Mar 2014 18:12:23 +0200 Subject: bnx2x: Support mng. request for driver version This adds support in a new management feature which needs the driver versions (bnx2x, bnx2fc and bnx2i) loaded for each interface. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 7 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 7 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 29 ++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 121 ++++++++++++++++++++++- 4 files changed, 163 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 722160940ab9..f33fab6abb95 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1413,6 +1413,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, + BNX2X_SP_RTNL_GET_DRV_VERSION, }; struct bnx2x_prev_path_list { @@ -1703,6 +1704,10 @@ struct bnx2x { struct bnx2x_slowpath *slowpath; dma_addr_t slowpath_mapping; + /* Mechanism protecting the drv_info_to_mcp */ + struct mutex drv_info_mutex; + bool drv_info_mng_owner; + /* Total number of FW statistics requests */ u8 fw_stats_num; @@ -2535,6 +2540,8 @@ enum { void bnx2x_set_local_cmng(struct bnx2x *bp); +void bnx2x_update_mng_version(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index acd494647f25..9261d5313b5b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2804,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (CNIC_ENABLED(bp)) bnx2x_load_cnic(bp); + if (IS_PF(bp)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ u32 val; @@ -3030,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; + /* Clear driver version indication in shmem */ + if (IS_PF(bp)) + bnx2x_update_mng_version(bp); + /* Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 46e2f18df2cb..5ba8af50c84f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2003,6 +2003,23 @@ struct shmem_lfa { #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) }; +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED 0 + + /* personalties order is important */ +#define DRV_PERS_ETHERNET 0 +#define DRV_PERS_ISCSI 1 +#define DRV_PERS_FCOE 2 + + /* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS 3 + u32 versions[MAX_DRV_PERS]; +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF @@ -2217,6 +2234,18 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + + u32 reserved5[2]; + u32 reserved6[PORT_MAX]; + + /* driver version for each personality */ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + u32 mfw_drv_indication; + + /* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5e74599b05c7..faef7b19a529 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3482,10 +3482,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) + static void bnx2x_handle_drv_info_req(struct bnx2x *bp) { enum drv_info_opcode op_code; u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + bool release = false; + int wait; /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { @@ -3496,6 +3501,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT; + /* Must prevent other flows from accessing drv_info_to_mcp */ + mutex_lock(&bp->drv_info_mutex); + memset(&bp->slowpath->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); @@ -3512,7 +3520,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) default: /* if op code isn't supported - send NACK */ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; + goto out; } /* if we got drv_info attn from MFW then these fields are defined in @@ -3524,6 +3532,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + + /* Since possible management wants both this and get_driver_version + * need to wait until management notifies us it finished utilizing + * the buffer. + */ + if (!SHMEM2_HAS(bp, mfw_drv_indication)) { + DP(BNX2X_MSG_MCP, "Management does not support indication\n"); + } else if (!bp->drv_info_mng_owner) { + u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + + for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { + u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + + /* Management is done; need to clear indication */ + if (indication & bit) { + SHMEM2_WR(bp, mfw_drv_indication, + indication & ~bit); + release = true; + break; + } + + msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); + } + } + if (!release) { + DP(BNX2X_MSG_MCP, "Management did not release indication\n"); + bp->drv_info_mng_owner = true; + } + +out: + mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ + u8 vals[4]; + int i = 0; + + if (bnx2x_format) { + i = sscanf(version, "1.%c%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + if (i > 0) + vals[0] -= '0'; + } else { + i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + } + + while (i < 4) + vals[i++] = 0; + + return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ + u32 iscsiver = DRV_VER_NOT_LOADED; + u32 fcoever = DRV_VER_NOT_LOADED; + u32 ethver = DRV_VER_NOT_LOADED; + int idx = BP_FW_MB_IDX(bp); + u8 *version; + + if (!SHMEM2_HAS(bp, func_os_drv_ver)) + return; + + mutex_lock(&bp->drv_info_mutex); + /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ + if (bp->drv_info_mng_owner) + goto out; + + if (bp->state != BNX2X_STATE_OPEN) + goto out; + + /* Parse ethernet driver version */ + ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + if (!CNIC_LOADED(bp)) + goto out; + + /* Try getting storage driver version via cnic */ + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_iscsi_stat(bp); + version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; + iscsiver = bnx2x_update_mng_version_utility(version, false); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_fcoe_stat(bp); + version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; + fcoever = bnx2x_update_mng_version_utility(version, false); + +out: + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + + mutex_unlock(&bp->drv_info_mutex); + + DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", + ethver, iscsiver, fcoever); } static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) @@ -9807,6 +9915,10 @@ sp_rtnl_not_reset: bnx2x_dcbx_resume_hw_tx(bp); } + if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, + &bp->sp_rtnl_state)) + bnx2x_update_mng_version(bp); + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -11757,6 +11869,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); + mutex_init(&bp->drv_info_mutex); + bp->drv_info_mng_owner = false; spin_lock_init(&bp->stats_lock); sema_init(&bp->stats_sema, 1); @@ -13794,6 +13908,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) REG_WR(bp, scratch_offset + i, *(host_addr + i/4)); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13811,6 +13926,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13916,6 +14032,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, rcu_assign_pointer(bp->cnic_ops, ops); + /* Schedule driver to read CNIC driver versions */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + return 0; } -- cgit From 370d4a26590fcc7510ad4a8432e4982a209f1b59 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 23 Mar 2014 18:12:24 +0200 Subject: bnx2x: Create workqueue for IOV related tasks The bnx2x sriov mechanisms were done in the bnx2x slowpath workitem which runs on the bnx2x's workqueue; This workitem is also responsible for the bottom half of interrupt handling in the driver, and specifically it also receives FW notifications of ramrod completions, allowing other flows to progress. The original design of the sriov reltaed-flows was based on the notion such flows must not sleep, since their context is the slowpath workitem. Otherwise, we might reach timeouts - those flows may wait for ramrod completion that will never arrive as the workitem wlll not be re-scheduled until that same flow will be over. In more recent time bnx2x started supporting features in which the VF interface can be configured by the tools accessing the PF on the hypervisor. This support created possible races on the VF-PF lock (which is taken either when the PF is handling a VF message or when the PF is doing some slowpath work on behalf of the VF) which may cause timeouts on the VF side and lags on the PF side. This patch changes the scheme - it creates a new workqueue for sriov related tasks and moves all handling currently done in the slowpath task into the the new workqueue. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 15 +++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 29 +++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 41 +++++++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 37 +++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 103 +++++++++++++--------- 5 files changed, 147 insertions(+), 78 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index f33fab6abb95..8e35dbaca76e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1155,10 +1155,6 @@ struct bnx2x_port { (offsetof(struct bnx2x_eth_stats, stat_name) / 4) /* slow path */ - -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; - #define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) @@ -1416,6 +1412,12 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_GET_DRV_VERSION, }; +enum bnx2x_iov_flag { + BNX2X_IOV_HANDLE_VF_MSG, + BNX2X_IOV_CONT_VFOP, + BNX2X_IOV_HANDLE_FLR, +}; + struct bnx2x_prev_path_list { struct list_head list; u8 bus; @@ -1614,6 +1616,8 @@ struct bnx2x { int mrrs; struct delayed_work sp_task; + struct delayed_work iov_task; + atomic_t interrupt_occurred; struct delayed_work sp_rtnl_task; @@ -1897,6 +1901,9 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; + /* Indication of the IOV tasks */ + unsigned long iov_task_state; + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index faef7b19a529..b5c7f77e8108 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -120,7 +120,8 @@ static int debug; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, " Default debug msglevel"); -struct workqueue_struct *bnx2x_wq; +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq; struct bnx2x_mac_vals { u32 xmac_addr; @@ -1857,7 +1858,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) return; #endif /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, true); + bnx2x_iov_sp_event(bp, cid); smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); @@ -4160,7 +4161,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_handle_drv_info_req(bp); if (val & DRV_STATUS_VF_DISABLED) - bnx2x_vf_handle_flr_event(bp); + bnx2x_schedule_iov_task(bp, + BNX2X_IOV_HANDLE_FLR); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -5351,8 +5353,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_VF_PF_CHANNEL: - DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); - bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); + bnx2x_vf_mbx_schedule(bp, + &elem->message.data.vf_pf_event); continue; case EVENT_RING_OPCODE_STAT_QUERY: @@ -5567,13 +5569,6 @@ static void bnx2x_sp_task(struct work_struct *work) le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); } - /* must be called after the EQ processing (since eq leads to sriov - * ramrod completion flows). - * This flow may have been scheduled by the arrival of a ramrod - * completion, or by the sriov code rescheduling itself. - */ - bnx2x_iov_sp_task(bp); - /* afex - poll to check if VIFSET_ACK should be sent to MFW */ if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state)) { @@ -8990,6 +8985,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) synchronize_irq(bp->pdev->irq); flush_workqueue(bnx2x_wq); + flush_workqueue(bnx2x_iov_wq); while (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED && tout--) @@ -11877,6 +11873,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); if (IS_PF(bp)) { rc = bnx2x_get_hwinfo(bp); if (rc) @@ -13499,11 +13496,18 @@ static int __init bnx2x_init(void) pr_err("Cannot create workqueue\n"); return -ENOMEM; } + bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); + if (!bnx2x_iov_wq) { + pr_err("Cannot create iov workqueue\n"); + destroy_workqueue(bnx2x_wq); + return -ENOMEM; + } ret = pci_register_driver(&bnx2x_pci_driver); if (ret) { pr_err("Cannot register driver\n"); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); } return ret; } @@ -13515,6 +13519,7 @@ static void __exit bnx2x_cleanup(void) pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 61e6f606d8a4..8e2b191234f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2042,6 +2042,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, goto failed; } + /* Prepare the VFs event synchronization mechanism */ + mutex_init(&bp->vfdb->event_mutex); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -2469,7 +2472,7 @@ get_vf: return 0; } /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, false); + bnx2x_iov_sp_event(bp, cid); return 0; } @@ -2506,7 +2509,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, } } -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) +void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) { struct bnx2x_virtf *vf; @@ -2518,8 +2521,7 @@ void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) if (vf) { /* set in_progress flag */ atomic_set(&vf->op_in_progress, 1); - if (queue_work) - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP); } } @@ -2604,7 +2606,7 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -void bnx2x_iov_sp_task(struct bnx2x *bp) +void bnx2x_iov_vfop_cont(struct bnx2x *bp) { int i; @@ -3875,3 +3877,32 @@ void bnx2x_iov_channel_down(struct bnx2x *bp) bnx2x_post_vf_bulletin(bp, vf_idx); } } + +void bnx2x_iov_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + + if (!netif_running(bp->dev)) + return; + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, + &bp->iov_task_state)) + bnx2x_vf_handle_flr_event(bp); + + if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP, + &bp->iov_task_state)) + bnx2x_iov_vfop_cont(bp); + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, + &bp->iov_task_state)) + bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ + smp_mb__before_clear_bit(); + set_bit(flag, &bp->iov_task_state); + smp_mb__after_clear_bit(); + DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b1dc751c6175..87f7c9743f71 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -30,6 +30,8 @@ enum sample_bulletin_result { #ifdef CONFIG_BNX2X_SRIOV +extern struct workqueue_struct *bnx2x_iov_wq; + /* The bnx2x device structure holds vfdb structure described below. * The VF array is indexed by the relative vfid. */ @@ -346,11 +348,6 @@ struct bnx2x_vf_mbx { u32 vf_addr_hi; struct vfpf_first_tlv first_tlv; /* saved VF request header */ - - u8 flags; -#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent - * more then one pending msg - */ }; struct bnx2x_vf_sp { @@ -427,6 +424,10 @@ struct bnx2x_vfdb { /* the number of msix vectors belonging to this PF designated for VFs */ u16 vf_sbs_pool; u16 first_vf_igu_entry; + + /* sp_rtnl synchronization */ + struct mutex event_mutex; + u64 event_occur; }; /* queue access */ @@ -476,13 +477,14 @@ void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj); -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); +void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid); int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); void bnx2x_iov_storm_stats_update(struct bnx2x *bp); -void bnx2x_iov_sp_task(struct bnx2x *bp); /* global vf mailbox routines */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event); void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); /* CORE VF API */ @@ -520,7 +522,8 @@ enum { else { \ DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ atomic_set(&vf->op_in_progress, 1); \ - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ + bnx2x_schedule_iov_task(bp, \ + BNX2X_IOV_CONT_VFOP); \ return; \ } \ } while (0) @@ -785,18 +788,21 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); + #else /* CONFIG_BNX2X_SRIOV */ static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} -static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, - bool queue_work) {} +static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) {return 1; } -static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} -static inline void bnx2x_vf_mbx(struct bnx2x *bp, - struct vf_pf_event_data *vfpf_event) {} +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } @@ -843,5 +849,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} +static inline void bnx2x_iov_task(struct work_struct *work) {} +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} + #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 1117ed7776b6..63c95658ba60 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1089,9 +1089,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, storm_memset_vf_mbx_ack(bp, vf->abs_vfid); mmiowb(); - /* initiate dmae to send the response */ - mbx->flags &= ~VF_MSG_INPROCESS; - /* copy the response header including status-done field, * must be last dmae, must be after FW is acked */ @@ -2059,13 +2056,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, } } -/* handle new vf-pf message */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) { - struct bnx2x_virtf *vf; - struct bnx2x_vf_mbx *mbx; u8 vf_idx; - int rc; DP(BNX2X_MSG_IOV, "vf pf event received: vfid %d, address_hi %x, address lo %x", @@ -2077,50 +2071,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) BNX2X_NR_VIRTFN(bp)) { BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); - goto mbx_done; + return; } + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); - mbx = BP_VF_MBX(bp, vf_idx); - /* verify an event is not currently being processed - - * debug failsafe only - */ - if (mbx->flags & VF_MSG_INPROCESS) { - BNX2X_ERR("Previous message is still being processed, vf_id %d\n", - vfpf_event->vf_id); - goto mbx_done; - } - vf = BP_VF(bp, vf_idx); + /* Update VFDB with current message and schedule its handling */ + mutex_lock(&BP_VFDB(bp)->event_mutex); + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); + mutex_unlock(&BP_VFDB(bp)->event_mutex); - /* save the VF message address */ - mbx->vf_addr_hi = vfpf_event->msg_addr_hi; - mbx->vf_addr_lo = vfpf_event->msg_addr_lo; - DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", - mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} - /* dmae to get the VF request */ - rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, - mbx->vf_addr_hi, mbx->vf_addr_lo, - sizeof(union vfpf_tlvs)/4); - if (rc) { - BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); - goto mbx_error; - } +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ + struct bnx2x_vfdb *vfdb = BP_VFDB(bp); + u64 events; + u8 vf_idx; + int rc; - /* process the VF message header */ - mbx->first_tlv = mbx->msg->req.first_tlv; + if (!vfdb) + return; - /* Clean response buffer to refrain from falsely seeing chains */ - memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + mutex_lock(&vfdb->event_mutex); + events = vfdb->event_occur; + vfdb->event_occur = 0; + mutex_unlock(&vfdb->event_mutex); - /* dispatch the request (will prepare the response) */ - bnx2x_vf_mbx_request(bp, vf, mbx); - goto mbx_done; + for_each_vf(bp, vf_idx) { + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); -mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ -mbx_done: - return; + /* Handle VFs which have pending events */ + if (!(events & (1ULL << vf_idx))) + continue; + + DP(BNX2X_MSG_IOV, + "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", + vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, + mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, + vf->abs_vfid, mbx->vf_addr_hi, + mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", + vf->abs_vfid); + bnx2x_vf_release(bp, vf, false); /* non blocking */ + return; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* Clean response buffer to refrain from falsely + * seeing chains. + */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + } } /* propagate local bulletin board to vf */ -- cgit From 2dc33bbc4f8a5d6a05bf3c673b86c37b825450f3 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 23 Mar 2014 18:12:25 +0200 Subject: bnx2x: Remove the sriov VFOP mechanism Since we now posses a workqueue dedicated for sriov, the paradigm that sriov- related tasks cannot sleep is no longer correct. The VFOP mechanism was the one previously supporting said paradigm - the sriov related tasks were broken into segments which did not require sleep, and the mechanism re-scheduled the next segment whenever possible. This patch remvoes the VFOP mechanism altogether - the resulting code is a much easier to follow code; The segments are gathered into straight-forward functions which sleep whenever neccessary. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 1804 +++++---------------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 348 +--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 421 ++--- 5 files changed, 622 insertions(+), 1954 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8e35dbaca76e..4d8f8aba0ea5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1414,7 +1414,6 @@ enum sp_rtnl_flag { enum bnx2x_iov_flag { BNX2X_IOV_HANDLE_VF_MSG, - BNX2X_IOV_CONT_VFOP, BNX2X_IOV_HANDLE_FLR, }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b5c7f77e8108..a78edaccceee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1857,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) #else return; #endif - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid); smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 8e2b191234f1..df1507288b3c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -117,87 +117,7 @@ static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, return true; } -/* VFOP - VF slow-path operation support */ - -#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 - /* VFOP operations states */ -enum bnx2x_vfop_qctor_state { - BNX2X_VFOP_QCTOR_INIT, - BNX2X_VFOP_QCTOR_SETUP, - BNX2X_VFOP_QCTOR_INT_EN -}; - -enum bnx2x_vfop_qdtor_state { - BNX2X_VFOP_QDTOR_HALT, - BNX2X_VFOP_QDTOR_TERMINATE, - BNX2X_VFOP_QDTOR_CFCDEL, - BNX2X_VFOP_QDTOR_DONE -}; - -enum bnx2x_vfop_vlan_mac_state { - BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - BNX2X_VFOP_VLAN_MAC_CLEAR, - BNX2X_VFOP_VLAN_MAC_CHK_DONE, - BNX2X_VFOP_MAC_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST_0 -}; - -enum bnx2x_vfop_qsetup_state { - BNX2X_VFOP_QSETUP_CTOR, - BNX2X_VFOP_QSETUP_VLAN0, - BNX2X_VFOP_QSETUP_DONE -}; - -enum bnx2x_vfop_mcast_state { - BNX2X_VFOP_MCAST_DEL, - BNX2X_VFOP_MCAST_ADD, - BNX2X_VFOP_MCAST_CHK_DONE -}; -enum bnx2x_vfop_qflr_state { - BNX2X_VFOP_QFLR_CLR_VLAN, - BNX2X_VFOP_QFLR_CLR_MAC, - BNX2X_VFOP_QFLR_TERMINATE, - BNX2X_VFOP_QFLR_DONE -}; - -enum bnx2x_vfop_flr_state { - BNX2X_VFOP_FLR_QUEUES, - BNX2X_VFOP_FLR_HW -}; - -enum bnx2x_vfop_close_state { - BNX2X_VFOP_CLOSE_QUEUES, - BNX2X_VFOP_CLOSE_HW -}; - -enum bnx2x_vfop_rxmode_state { - BNX2X_VFOP_RXMODE_CONFIG, - BNX2X_VFOP_RXMODE_DONE -}; - -enum bnx2x_vfop_qteardown_state { - BNX2X_VFOP_QTEARDOWN_RXMODE, - BNX2X_VFOP_QTEARDOWN_CLR_VLAN, - BNX2X_VFOP_QTEARDOWN_CLR_MAC, - BNX2X_VFOP_QTEARDOWN_CLR_MCAST, - BNX2X_VFOP_QTEARDOWN_QDTOR, - BNX2X_VFOP_QTEARDOWN_DONE -}; - -enum bnx2x_vfop_rss_state { - BNX2X_VFOP_RSS_CONFIG, - BNX2X_VFOP_RSS_DONE -}; - -enum bnx2x_vfop_tpa_state { - BNX2X_VFOP_TPA_CONFIG, - BNX2X_VFOP_TPA_DONE -}; - -#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) - void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, struct bnx2x_queue_setup_params *setup_params, @@ -241,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type) { struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; @@ -310,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, } } -/* VFOP queue construction */ -static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_create(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qctor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QCTOR_INIT: - - /* has this queue already been opened? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_ACTIVE) { - DP(BNX2X_MSG_IOV, - "Entered qctor but queue was already up. Aborting gracefully\n"); - goto op_done; - } - - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_SETUP; - - q_params->cmd = BNX2X_Q_CMD_INIT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QCTOR_SETUP: - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_INT_EN; - - /* copy pre-prepared setup params to the queue-state params */ - vfop->op_p->qctor.qstate.params.setup = - vfop->op_p->qctor.prep_qsetup; - - q_params->cmd = BNX2X_Q_CMD_SETUP; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + struct bnx2x_queue_state_params *q_params; + int rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QCTOR_INT_EN: + /* Prepare ramrod information */ + q_params = &qctor->qstate; + q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); - /* enable interrupts */ - bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), - USTORM_ID, 0, IGU_INT_ENABLE, 0); - goto op_done; - default: - bnx2x_vfop_default(state); + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); + goto out; } -op_err: - BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} - -static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + /* Run Queue 'construction' ramrods */ + q_params->cmd = BNX2X_Q_CMD_INIT; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - vfop->args.qctor.qid = qid; - vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); + memcpy(&q_params->params.setup, &qctor->prep_qsetup, + sizeof(struct bnx2x_queue_setup_params)); + q_params->cmd = BNX2X_Q_CMD_SETUP; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, - bnx2x_vfop_qctor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, - cmd->block); - } - return -ENOMEM; + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), + USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: + return rc; } -/* VFOP queue destruction */ -static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qdtor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QDTOR_HALT: - - /* has this queue already been stopped? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_STOPPED) { - DP(BNX2X_MSG_IOV, - "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; - - q_params->cmd = BNX2X_Q_CMD_HALT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QDTOR_TERMINATE: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; - - q_params->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_CFC_DEL}; + struct bnx2x_queue_state_params q_params; + int rc, i; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_QDTOR_CFCDEL: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; + /* Prepare ramrod information */ + memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); + q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params->cmd = BNX2X_Q_CMD_CFC_DEL; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); + goto out; + } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); -op_done: - case BNX2X_VFOP_QDTOR_DONE: - /* invalidate the context */ - if (qdtor->cxt) { - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + /* Run Queue 'destruction' ramrods */ + for (i = 0; i < ARRAY_SIZE(cmds); i++) { + q_params.cmd = cmds[i]; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); + return rc; } - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); } -op_pending: - return; -} - -static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_queue_state_params *qstate = - &vf->op_params.qctor.qstate; - - memset(qstate, 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - - vfop->args.qdtor.qid = qid; - vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); - - bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, - bnx2x_vfop_qdtor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, - cmd->block); - } else { - BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); - return -ENOMEM; +out: + /* Clean Context */ + if (bnx2x_vfq(vf, qid, cxt)) { + bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; + bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; } + + return 0; } static void @@ -516,731 +330,291 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) BP_VFDB(bp)->vf_sbs_pool++; } -/* VFOP MAC/VLAN helpers */ -static inline void bnx2x_vfop_credit(struct bnx2x *bp, - struct bnx2x_vfop *vfop, - struct bnx2x_vlan_mac_obj *obj) +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *obj, + atomic_t *counter) { - struct bnx2x_vfop_args_filters *args = &vfop->args.filters; - - /* update credit only if there is no error - * and a valid credit counter - */ - if (!vfop->rc && args->credit) { - struct list_head *pos; - int read_lock; - int cnt = 0; + struct list_head *pos; + int read_lock; + int cnt = 0; - read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); - if (read_lock) - DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); - list_for_each(pos, &obj->head) - cnt++; + list_for_each(pos, &obj->head) + cnt++; - if (!read_lock) - bnx2x_vlan_mac_h_read_unlock(bp, obj); + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); - atomic_set(args->credit, cnt); - } + atomic_set(counter, cnt); } -static int bnx2x_vfop_set_user_req(struct bnx2x *bp, - struct bnx2x_vfop_filter *pos, - struct bnx2x_vlan_mac_data *user_req) +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, bool drv_only, bool mac) { - user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : - BNX2X_VLAN_MAC_DEL; - - switch (pos->type) { - case BNX2X_VFOP_FILTER_MAC: - memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); - break; - case BNX2X_VFOP_FILTER_VLAN: - user_req->u.vlan.vlan = pos->vid; - break; - default: - BNX2X_ERR("Invalid filter type, skipping\n"); - return 1; - } - return 0; -} - -static int bnx2x_vfop_config_list(struct bnx2x *bp, - struct bnx2x_vfop_filters *filters, - struct bnx2x_vlan_mac_ramrod_params *vlan_mac) -{ - struct bnx2x_vfop_filter *pos, *tmp; - struct list_head rollback_list, *filters_list = &filters->head; - struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; - int rc = 0, cnt = 0; - - INIT_LIST_HEAD(&rollback_list); - - list_for_each_entry_safe(pos, tmp, filters_list, link) { - if (bnx2x_vfop_set_user_req(bp, pos, user_req)) - continue; + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; - rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (rc >= 0) { - cnt += pos->add ? 1 : -1; - list_move(&pos->link, &rollback_list); - rc = 0; - } else if (rc == -EEXIST) { - rc = 0; - } else { - BNX2X_ERR("Failed to add a new vlan_mac command\n"); - break; - } - } + DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, + mac ? "MACs" : "VLANs"); - /* rollback if error or too many rules added */ - if (rc || cnt > filters->add_cnt) { - BNX2X_ERR("error or too many rules added. Performing rollback\n"); - list_for_each_entry_safe(pos, tmp, &rollback_list, link) { - pos->add = !pos->add; /* reverse op */ - bnx2x_vfop_set_user_req(bp, pos, user_req); - bnx2x_config_vlan_mac(bp, vlan_mac); - list_del(&pos->link); - } - cnt = 0; - if (!rc) - rc = -EINVAL; + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (mac) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + } else { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); } - filters->add_cnt = cnt; - return rc; -} - -/* VFOP set VLAN/MAC */ -static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; - struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; - struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; - - enum bnx2x_vfop_vlan_mac_state state = vfop->state; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - bnx2x_vfop_reset_wq(vf); - - switch (state) { - case BNX2X_VFOP_VLAN_MAC_CLEAR: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do delete */ - vfop->rc = obj->delete_all(bp, obj, - &vlan_mac->user_req.vlan_mac_flags, - &vlan_mac->ramrod_flags); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do config */ - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (vfop->rc == -EEXIST) - vfop->rc = 0; + ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CHK_DONE: - vfop->rc = !!obj->raw.check_pending(&obj->raw); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MAC_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (vfop->rc) - goto op_err; - - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (!vfop->rc) { - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); - default: - bnx2x_vfop_default(state); + /* Start deleting */ + rc = ramrod.vlan_mac_obj->delete_all(bp, + ramrod.vlan_mac_obj, + &ramrod.user_req.vlan_mac_flags, + &ramrod.ramrod_flags); + if (rc) { + BNX2X_ERR("Failed to delete all %s\n", + mac ? "MACs" : "VLANs"); + return rc; } -op_err: - BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); -op_done: - kfree(filters); - bnx2x_vfop_credit(bp, vfop, obj); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} - -struct bnx2x_vfop_vlan_mac_flags { - bool drv_only; - bool dont_consume; - bool single_cmd; - bool add; -}; -static void -bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; - - memset(ramrod, 0, sizeof(*ramrod)); + /* Clear the vlan counters */ + if (!mac) + atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); - /* ramrod flags */ - if (flags->drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); - if (flags->single_cmd) - set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); - - /* mac_vlan flags */ - if (flags->dont_consume) - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); - - /* cmd */ - ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; -} - -static inline void -bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); - set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); + return 0; } -static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_mac_vlan_filter *filter, + bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single */ - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false /* don't care */, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); - - /* set extra args */ - vfop->args.filters = filters; + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", + vf->abs_vfid, filter->add ? "Adding" : "Deleting", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (filter->type == BNX2X_VF_FILTER_VLAN) { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + } else { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + } + ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + /* Verify there are available vlan credits */ + if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && + (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= + vf_vlan_rules_cnt(vf))) { + BNX2X_ERR("No credits for vlan\n"); + return -ENOMEM; } - return -ENOMEM; -} - -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop; - if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) - return -EINVAL; - - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = macs, - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care since only the items in the - * filters list affect the sp operation, - * not the list itself - */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); - - /* set extra args */ - filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Add/Remove the filter */ + rc = bnx2x_config_vlan_mac(bp, &ramrod); + if (rc && rc != -EEXIST) { + BNX2X_ERR("Failed to %s %s\n", + filter->add ? "add" : "delete", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : + "VLAN"); + return rc; } - return -ENOMEM; -} - -static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, u16 vid, bool add) -{ - struct bnx2x_vfop *vfop; - if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) - return -EINVAL; + /* Update the vlan counters */ + if (filter->type == BNX2X_VF_FILTER_VLAN) + bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, + &bnx2x_vfq(vf, qid, vlan_count)); - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = false, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = add, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - ramrod->user_req.u.vlan.vlan = vid; - - /* set object */ - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - - /* set extra args */ - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; + return 0; } -static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); + int rc = 0, i; - /* set object */ - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - - /* set extra args */ - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; -} - -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) return -EINVAL; - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = vlans, - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - - /* set extra args */ - filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - - atomic_read(filters.credit); - - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + /* Prepare ramrod params */ + for (i = 0; i < filters->count; i++) { + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], drv_only); + if (rc) + break; } - return -ENOMEM; -} - -/* VFOP queue setup (queue constructor + set vlan 0) */ -static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qctor.qid; - enum bnx2x_vfop_qsetup_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_qsetup, - .block = false, - }; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QSETUP_CTOR: - /* init the queue ctor command */ - vfop->state = BNX2X_VFOP_QSETUP_VLAN0; - vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QSETUP_VLAN0: - /* skip if non-leading or FPGA/EMU*/ - if (qid) - goto op_done; - /* init the queue set-vlan command (for vlan 0) */ - vfop->state = BNX2X_VFOP_QSETUP_DONE; - vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); - if (vfop->rc) - goto op_err; - return; -op_err: - BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QSETUP_DONE: - vf->cfg_flags |= VF_CFG_VLAN; - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, - BNX2X_MSG_IOV); - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + /* Rollback if needed */ + if (i != filters->count) { + BNX2X_ERR("Managed only %d/%d filters - rolling back\n", + i, filters->count + 1); + while (--i >= 0) { + filters->filters[i].add = !filters->filters[i].add; + bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], + drv_only); + } } -} - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qctor.qid = qid; + /* It's our responsibility to free the filters */ + kfree(filters); - bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, - bnx2x_vfop_qsetup, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, - cmd->block); - } - return -ENOMEM; + return rc; } -/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ -static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qflr_state state = vfop->state; - struct bnx2x_queue_state_params *qstate; - struct bnx2x_vfop_cmd cmd; + int rc; - bnx2x_vfop_reset_wq(vf); + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - if (vfop->rc < 0) + rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); + if (rc) goto op_err; - DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); - - cmd.done = bnx2x_vfop_qflr; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QFLR_CLR_VLAN: - /* vlan-clear-all: driver-only, don't consume credit */ - vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); - if (vfop->rc) - goto op_err; - return; + /* Configure vlan0 for leading queue */ + if (!qid) { + struct bnx2x_vf_mac_vlan_filter filter; - case BNX2X_VFOP_QFLR_CLR_MAC: - /* mac-clear-all: driver only consume credit */ - vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); - if (vfop->rc) + memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); + filter.type = BNX2X_VF_FILTER_VLAN; + filter.add = true; + filter.vid = 0; + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); + if (rc) goto op_err; - return; - - case BNX2X_VFOP_QFLR_TERMINATE: - qstate = &vfop->op_p->qctor.qstate; - memset(qstate , 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - vfop->state = BNX2X_VFOP_QFLR_DONE; - - DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", - vf->abs_vfid, qstate->q_obj->state); - - if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { - qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; - qstate->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, qstate); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); - } else { - goto op_done; - } + } + /* Schedule the configuration of any pending vlan filters */ + vf->cfg_flags |= VF_CFG_VLAN; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_MSG_IOV); + return 0; op_err: - BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QFLR_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -op_pending: - return; + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - vfop->args.qx.qid = qid; - if ((qid == LEADING_IDX) && - bnx2x_validate_vf_sp_objs(bp, vf, false)) - bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, - bnx2x_vfop_qflr, cmd->done); - else - bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE, - bnx2x_vfop_qflr, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, - cmd->block); - } - return -ENOMEM; -} - -/* VFOP multi-casts */ -static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; - struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; - struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; - enum bnx2x_vfop_mcast_state state = vfop->state; - int i; + int rc; - bnx2x_vfop_reset_wq(vf); + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - if (vfop->rc < 0) - goto op_err; + /* If needed, clean the filtering data base */ + if ((qid == LEADING_IDX) && + bnx2x_validate_vf_sp_objs(bp, vf, false)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + if (rc) + goto op_err; + } - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_MCAST_DEL: - /* clear existing mcasts */ - vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD - : BNX2X_VFOP_MCAST_CHK_DONE; - mcast->mcast_list_len = vf->mcast_list_len; - vf->mcast_list_len = args->mc_num; - vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_MCAST_ADD: - if (raw->check_pending(raw)) - goto op_pending; - - /* update mcast list on the ramrod params */ - INIT_LIST_HEAD(&mcast->mcast_list); - for (i = 0; i < args->mc_num; i++) - list_add_tail(&(args->mc[i].link), - &mcast->mcast_list); - mcast->mcast_list_len = args->mc_num; + /* Terminate queue */ + if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { + struct bnx2x_queue_state_params qstate; - /* add new mcasts */ - vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; - vfop->rc = bnx2x_config_mcast(bp, mcast, - BNX2X_MCAST_CMD_ADD); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MCAST_CHK_DONE: - vfop->rc = raw->check_pending(raw) ? 1 : 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - default: - bnx2x_vfop_default(state); + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate.cmd = BNX2X_Q_CMD_TERMINATE; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) + goto op_err; } -op_err: - BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); -op_done: - kfree(args->mc); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only) -{ - struct bnx2x_vfop *vfop = NULL; - size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); - struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : - NULL; - - if (!mc_sz || mc) { - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - int i; - struct bnx2x_mcast_ramrod_params *ramrod = - &vf->op_params.mcast; - - /* set ramrod params */ - memset(ramrod, 0, sizeof(*ramrod)); - ramrod->mcast_obj = &vf->mcast_obj; - if (drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, - &ramrod->ramrod_flags); - - /* copy mcasts pointers */ - vfop->args.mc_list.mc_num = mcast_num; - vfop->args.mc_list.mc = mc; - for (i = 0; i < mcast_num; i++) - mc[i].mac = mcasts[i]; - - bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, - bnx2x_vfop_mcast, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, - cmd->block); - } else { - kfree(mc); - } - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -/* VFOP rx-mode */ -static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; - enum bnx2x_vfop_rxmode_state state = vfop->state; + struct bnx2x_mcast_list_elem *mc = NULL; + struct bnx2x_mcast_ramrod_params mcast; + int rc, i; - bnx2x_vfop_reset_wq(vf); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - if (vfop->rc < 0) - goto op_err; + /* Prepare Multicast command */ + memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); + mcast.mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); + if (mc_num) { + mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), + GFP_KERNEL); + if (!mc) { + BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + return -ENOMEM; + } + } - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + /* clear existing mcasts */ + mcast.mcast_list_len = vf->mcast_list_len; + vf->mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) { + BNX2X_ERR("Failed to remove multicasts\n"); + return rc; + } - switch (state) { - case BNX2X_VFOP_RXMODE_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* update mcast list on the ramrod params */ + if (mc_num) { + INIT_LIST_HEAD(&mcast.mcast_list); + for (i = 0; i < mc_num; i++) { + mc[i].mac = mcasts[i]; + list_add_tail(&mc[i].link, + &mcast.mcast_list); + } - /* record the accept flags in vfdb so hypervisor can modify them - * if necessary - */ - bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = - ramrod->rx_accept_flags; - vfop->rc = bnx2x_config_rx_mode(bp, ramrod); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RXMODE_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + /* add new mcasts */ + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + if (rc) + BNX2X_ERR("Faled to add multicasts\n"); + kfree(mc); } -op_pending: - return; + + return rc; } static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, @@ -1268,121 +642,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); } -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags) +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_rx_mode_ramrod_params *ramrod = - &vf->op_params.rx_mode; + struct bnx2x_rx_mode_ramrod_params ramrod; - bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, - bnx2x_vfop_rxmode, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, - cmd->block); - } - return -ENOMEM; + bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; + return bnx2x_config_rx_mode(bp, &ramrod); } -/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, - * queue destructor) - */ -static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qteardown_state state = vfop->state; - struct bnx2x_vfop_cmd cmd; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - cmd.done = bnx2x_vfop_qdown; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QTEARDOWN_RXMODE: - /* Drop all */ - if (bnx2x_validate_vf_sp_objs(bp, vf, true)) - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; - else - vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; - vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: - /* vlan-clear-all: don't consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_MAC: - /* mac-clear-all: consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; + int rc; - case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: - vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); - if (vfop->rc) - goto op_err; - return; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QTEARDOWN_QDTOR: - /* run the queue destruction flow */ - DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); - vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; - DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); - vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); - DP(BNX2X_MSG_IOV, "returned from cmd\n"); - if (vfop->rc) + /* Remove all classification configuration for leading queue */ + if (qid == LEADING_IDX) { + rc = bnx2x_vf_rxmode(bp, vf, qid, 0); + if (rc) goto op_err; - return; -op_err: - BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); - case BNX2X_VFOP_QTEARDOWN_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -} - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - /* for non leading queues skip directly to qdown sate */ - if (vfop) { - vfop->args.qx.qid = qid; - bnx2x_vfop_opset(qid == LEADING_IDX ? - BNX2X_VFOP_QTEARDOWN_RXMODE : - BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, - cmd->block); + /* Remove filtering if feasible */ + if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, true); + if (rc) + goto op_err; + rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); + if (rc) + goto op_err; + } } - return -ENOMEM; + /* Destroy queue */ + rc = bnx2x_vf_queue_destroy(bp, vf, qid); + if (rc) + goto op_err; + return rc; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, rc); + return rc; } /* VF enable primitives @@ -1582,120 +891,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_tx_hw_flushed(bp, poll_cnt); } -static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_flr_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_flr, - .block = false, - }; - - if (vfop->rc < 0) - goto op_err; + int rc, i; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - switch (state) { - case BNX2X_VFOP_FLR_QUEUES: - /* the cleanup operations are valid if and only if the VF - * was first acquired. - */ - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, - qx->qid); - if (vfop->rc) - goto op_err; - return; - } - /* remove multicasts */ - vfop->state = BNX2X_VFOP_FLR_HW; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, - 0, true); - if (vfop->rc) - goto op_err; - return; - case BNX2X_VFOP_FLR_HW: + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_flr(bp, vf, i); + if (rc) + goto out; + } - /* dispatch final cleanup and wait for HW queues to flush */ - bnx2x_vf_flr_clnup_hw(bp, vf); + /* remove multicasts */ + bnx2x_vf_mcast(bp, vf, NULL, 0, true); - /* release VF resources */ - bnx2x_vf_free_resc(bp, vf); + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); - /* re-open the mailbox */ - bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - vf->flr_clnup_stage = VF_FLR_ACK; - bnx2x_vfop_end(bp, vf, vfop); - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); -} - -static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t done) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, - bnx2x_vfop_flr, done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); - } - return -ENOMEM; + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + return; +out: + BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", + vf->abs_vfid, i, rc); } -static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) +static void bnx2x_vf_flr_clnup(struct bnx2x *bp) { - int i = prev_vf ? prev_vf->index + 1 : 0; struct bnx2x_virtf *vf; + int i; - /* find next VF to cleanup */ -next_vf_to_clean: - for (; - i < BNX2X_NR_VIRTFN(bp) && - (bnx2x_vf(bp, i, state) != VF_RESET || - bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); - i++) - ; + for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { + /* VF should be RESET & in FLR cleanup states */ + if (bnx2x_vf(bp, i, state) != VF_RESET || + !bnx2x_vf(bp, i, flr_clnup_stage)) + continue; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, - BNX2X_NR_VIRTFN(bp)); + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", + i, BNX2X_NR_VIRTFN(bp)); - if (i < BNX2X_NR_VIRTFN(bp)) { vf = BP_VF(bp, i); /* lock the vf pf channel */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); /* invoke the VF FLR SM */ - if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { - BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", - vf->abs_vfid); + bnx2x_vf_flr(bp, vf); - /* mark the VF to be ACKED and continue */ - vf->flr_clnup_stage = VF_FLR_ACK; - goto next_vf_to_clean; - } - return; - } - - /* we are done, update vf records */ - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - - if (vf->flr_clnup_stage != VF_FLR_ACK) - continue; - - vf->flr_clnup_stage = VF_FLR_EPILOG; + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = false; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); } /* Acknowledge the handled VFs. @@ -1745,7 +997,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) if (reset) { /* set as reset and ready for cleanup */ vf->state = VF_RESET; - vf->flr_clnup_stage = VF_FLR_CLN; + vf->flr_clnup_stage = true; DP(BNX2X_MSG_IOV, "Initiating Final cleanup for VF %d\n", @@ -1754,7 +1006,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) } /* do the FLR cleanup for all marked VFs*/ - bnx2x_vf_flr_clnup(bp, NULL); + bnx2x_vf_flr_clnup(bp); } /* IOV global initialization routines */ @@ -2021,7 +1273,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_vf(bp, i, index) = i; bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; bnx2x_vf(bp, i, state) = VF_FREE; - INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); mutex_init(&bnx2x_vf(bp, i, op_mutex)); bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; } @@ -2288,7 +1539,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp) /* release all the VFs */ for_each_vf(bp, i) - bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ + bnx2x_vf_release(bp, BP_VF(bp, i)); return 0; } @@ -2378,6 +1629,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, smp_mb__after_clear_bit(); } +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); +} + int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) { struct bnx2x_virtf *vf; @@ -2402,6 +1659,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: cid = (elem->message.data.eth_event.echo & BNX2X_SWCID_MASK); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); @@ -2466,13 +1724,15 @@ get_vf: vf->abs_vfid, qidx); bnx2x_vf_handle_filters_eqe(bp, vf); break; + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_rss_update_eqe(bp, vf); case EVENT_RING_OPCODE_VF_FLR: case EVENT_RING_OPCODE_MALICIOUS_VF: /* Do nothing for now */ return 0; } - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid); return 0; } @@ -2509,22 +1769,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, } } -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) -{ - struct bnx2x_virtf *vf; - - /* check if the cid is the VF range */ - if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) - return; - - vf = bnx2x_vf_by_cid(bp, vf_cid); - if (vf) { - /* set in_progress flag */ - atomic_set(&vf->op_in_progress, 1); - bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP); - } -} - void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) { int i; @@ -2606,33 +1850,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -void bnx2x_iov_vfop_cont(struct bnx2x *bp) -{ - int i; - - if (!IS_SRIOV(bp)) - return; - /* Iterate over all VFs and invoke state transition for VFs with - * 'in-progress' slow-path operations - */ - DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP), - "searching for pending vf operations\n"); - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); - - if (!vf) { - BNX2X_ERR("VF was null! skipping...\n"); - continue; - } - - if (!list_empty(&vf->op_list_head) && - atomic_read(&vf->op_in_progress)) { - DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); - bnx2x_vfop_cur(bp, vf)->transition(bp, vf); - } - } -} - static inline struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) { @@ -2868,52 +2085,26 @@ static void bnx2x_set_vf_state(void *cookie) p->vf->state = p->state; } -/* VFOP close (teardown the queues, delete mcasts and close HW) */ -static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_close_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_close, - .block = false, - }; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + int rc = 0, i; - switch (state) { - case BNX2X_VFOP_CLOSE_QUEUES: - - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); - if (vfop->rc) - goto op_err; - return; - } - vfop->state = BNX2X_VFOP_CLOSE_HW; - vfop->rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_CLOSE_HW: + /* Close all queues */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_teardown(bp, vf, i); + if (rc) + goto op_err; + } - /* disable the interrupts */ - DP(BNX2X_MSG_IOV, "disabling igu\n"); - bnx2x_vf_igu_disable(bp, vf); + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); - /* disable the VF */ - DP(BNX2X_MSG_IOV, "clearing qtbl\n"); - bnx2x_vf_clr_qtbl(bp, vf); - - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); /* need to make sure there are no outstanding stats ramrods which may * cause the device to access the VF's stats buffer which it will free @@ -2928,43 +2119,20 @@ op_done: } DP(BNX2X_MSG_IOV, "set state to acquired\n"); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - /* Not supported at the moment; Exists for macros only */ - return; -} -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, - bnx2x_vfop_close, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, - cmd->block); - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); + return rc; } /* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ -static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_release, - .block = false, - }; - - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - - if (vfop->rc < 0) - goto op_err; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, vf->state == VF_FREE ? "Free" : @@ -2975,193 +2143,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) switch (vf->state) { case VF_ENABLED: - vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vfop->rc) + rc = bnx2x_vf_close(bp, vf); + if (rc) goto op_err; - return; - + /* Fallthrough to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - goto op_done; + break; case VF_FREE: case VF_RESET: - /* do nothing */ - goto op_done; default: - bnx2x_vfop_default(vf->state); - } -op_err: - BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); -} - -static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_rss_state state; - - if (!vfop) { - BNX2X_ERR("vfop was null\n"); - return; + break; } - - state = vfop->state; - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_RSS_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RSS_DONE; - bnx2x_config_rss(bp, &vfop->op_p->rss); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + return 0; op_err: - BNX2X_ERR("RSS error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RSS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -op_pending: - return; -} - -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(-1, /* use vf->state */ - bnx2x_vfop_release, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, - cmd->block); - } - return -ENOMEM; + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); + return rc; } -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, - cmd->block); - } - return -ENOMEM; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); + return bnx2x_config_rss(bp, rss); } -/* VFOP tpa update, send update on all queues */ -static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa; - enum bnx2x_vfop_tpa_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); + aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; + struct bnx2x_queue_state_params qstate; + int qid, rc = 0; - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n", - vf->abs_vfid, tpa_args->qid, - state); - - switch (state) { - case BNX2X_VFOP_TPA_CONFIG: - - if (tpa_args->qid < vf_rxq_count(vf)) { - struct bnx2x_queue_state_params *qstate = - &vf->op_params.qstate; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj); + /* Set ramrod params */ + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + memcpy(&qstate.params.update_tpa, params, + sizeof(struct bnx2x_queue_update_tpa_params)); + qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); - /* The only thing that changes for the ramrod params - * between calls is the sge_map - */ - qstate->params.update_tpa.sge_map = - tpa_args->sge_map[tpa_args->qid]; - - DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n", - tpa_args->qid, - U64_HI(qstate->params.update_tpa.sge_map), - U64_LO(qstate->params.update_tpa.sge_map)); - qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA; - vfop->rc = bnx2x_queue_state_change(bp, qstate); - - tpa_args->qid++; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + for (qid = 0; qid < vf_rxq_count(vf); qid++) { + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.params.update_tpa.sge_map = sge_addr[qid]; + DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", + vf->abs_vfid, qid, U64_HI(sge_addr[qid]), + U64_LO(sge_addr[qid])); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) { + BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", + U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), + vf->abs_vfid, qid); + return rc; } - vfop->state = BNX2X_VFOP_TPA_DONE; - vfop->rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("TPA update error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_TPA_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); } -op_pending: - return; -} - -int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct vfpf_tpa_tlv *tpa_tlv) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = 0; /* loop */ - memcpy(&vfop->args.tpa.sge_map, - tpa_tlv->tpa_client_info.sge_addr, - sizeof(vfop->args.tpa.sge_map)); - bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG, - bnx2x_vfop_tpa, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa, - cmd->block); - } - return -ENOMEM; + return rc; } /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop_cmd cmd = { - .done = NULL, - .block = block, - }; int rc; DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); + rc = bnx2x_vf_free(bp, vf); if (rc) WARN(rc, "VF[%d] Failed to allocate resources for release op- rc=%d\n", vf->abs_vfid, rc); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + return rc; } static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, @@ -3889,10 +2951,6 @@ void bnx2x_iov_task(struct work_struct *work) &bp->iov_task_state)) bnx2x_vf_handle_flr_event(bp); - if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP, - &bp->iov_task_state)) - bnx2x_iov_vfop_cont(bp); - if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, &bp->iov_task_state)) bnx2x_vf_mbx(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 87f7c9743f71..db73a247ecfb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -88,113 +88,32 @@ struct bnx2x_vf_queue { bool sp_initialized; }; -/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: - * q-init, q-setup and SB index +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index */ -struct bnx2x_vfop_qctor_params { +struct bnx2x_vf_queue_construct_params { struct bnx2x_queue_state_params qstate; struct bnx2x_queue_setup_params prep_qsetup; }; -/* VFOP parameters (one copy per VF) */ -union bnx2x_vfop_params { - struct bnx2x_vlan_mac_ramrod_params vlan_mac; - struct bnx2x_rx_mode_ramrod_params rx_mode; - struct bnx2x_mcast_ramrod_params mcast; - struct bnx2x_config_rss_params rss; - struct bnx2x_vfop_qctor_params qctor; - struct bnx2x_queue_state_params qstate; -}; - /* forward */ struct bnx2x_virtf; /* VFOP definitions */ -typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); -struct bnx2x_vfop_cmd { - vfop_handler_t done; - bool block; -}; - -/* VFOP queue filters command additional arguments */ -struct bnx2x_vfop_filter { - struct list_head link; +struct bnx2x_vf_mac_vlan_filter { int type; -#define BNX2X_VFOP_FILTER_MAC 1 -#define BNX2X_VFOP_FILTER_VLAN 2 +#define BNX2X_VF_FILTER_MAC 1 +#define BNX2X_VF_FILTER_VLAN 2 bool add; u8 *mac; u16 vid; }; -struct bnx2x_vfop_filters { - int add_cnt; - struct list_head head; - struct bnx2x_vfop_filter filters[]; -}; - -/* transient list allocated, built and saved until its - * passed to the SP-VERBs layer. - */ -struct bnx2x_vfop_args_mcast { - int mc_num; - struct bnx2x_mcast_list_elem *mc; -}; - -struct bnx2x_vfop_args_qctor { - int qid; - u16 sb_idx; -}; - -struct bnx2x_vfop_args_qdtor { - int qid; - struct eth_context *cxt; -}; - -struct bnx2x_vfop_args_defvlan { - int qid; - bool enable; - u16 vid; - u8 prio; -}; - -struct bnx2x_vfop_args_qx { - int qid; - bool en_add; -}; - -struct bnx2x_vfop_args_filters { - struct bnx2x_vfop_filters *multi_filter; - atomic_t *credit; /* non NULL means 'don't consume credit' */ -}; - -struct bnx2x_vfop_args_tpa { - int qid; - dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF]; -}; - -union bnx2x_vfop_args { - struct bnx2x_vfop_args_mcast mc_list; - struct bnx2x_vfop_args_qctor qctor; - struct bnx2x_vfop_args_qdtor qdtor; - struct bnx2x_vfop_args_defvlan defvlan; - struct bnx2x_vfop_args_qx qx; - struct bnx2x_vfop_args_filters filters; - struct bnx2x_vfop_args_tpa tpa; -}; - -struct bnx2x_vfop { - struct list_head link; - int rc; /* return code */ - int state; /* next state */ - union bnx2x_vfop_args args; /* extra arguments */ - union bnx2x_vfop_params *op_p; /* ramrod params */ - - /* state machine callbacks */ - vfop_handler_t transition; - vfop_handler_t done; +struct bnx2x_vf_mac_vlan_filters { + int count; + struct bnx2x_vf_mac_vlan_filter filters[]; }; /* vf context */ @@ -214,15 +133,7 @@ struct bnx2x_virtf { #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ - /* non 0 during flr cleanup */ - u8 flr_clnup_stage; -#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' - * sans the end-wait - */ -#define VF_FLR_ACK 2 /* ACK flr notification */ -#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW - * ~ final cleanup' end wait - */ + bool flr_clnup_stage; /* true during flr cleanup */ /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ @@ -286,11 +197,6 @@ struct bnx2x_virtf { struct bnx2x_rss_config_obj rss_conf_obj; /* slow-path operations */ - atomic_t op_in_progress; - int op_rc; - bool op_wait_blocking; - struct list_head op_list_head; - union bnx2x_vfop_params op_params; struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; }; @@ -477,7 +383,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj); -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid); int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); void bnx2x_iov_storm_stats_update(struct bnx2x *bp); @@ -497,163 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map); -/* VFOP generic helpers */ -#define bnx2x_vfop_default(state) do { \ - BNX2X_ERR("Bad state %d\n", (state)); \ - vfop->rc = -EINVAL; \ - goto op_err; \ - } while (0) - -enum { - VFOP_DONE, - VFOP_CONT, - VFOP_VERIFY_PEND, -}; - -#define bnx2x_vfop_finalize(vf, rc, next) do { \ - if ((rc) < 0) \ - goto op_err; \ - else if ((rc) > 0) \ - goto op_pending; \ - else if ((next) == VFOP_DONE) \ - goto op_done; \ - else if ((next) == VFOP_VERIFY_PEND) \ - BNX2X_ERR("expected pending\n"); \ - else { \ - DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ - atomic_set(&vf->op_in_progress, 1); \ - bnx2x_schedule_iov_task(bp, \ - BNX2X_IOV_CONT_VFOP); \ - return; \ - } \ - } while (0) - -#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ - do { \ - vfop->state = first_state; \ - vfop->op_p = &vf->op_params; \ - vfop->transition = trans_hndlr; \ - vfop->done = done_hndlr; \ - } while (0) - -static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - WARN_ON(list_empty(&vf->op_list_head)); - return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); -} - -static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); - - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - if (vfop) { - INIT_LIST_HEAD(&vfop->link); - list_add(&vfop->link, &vf->op_list_head); - } - return vfop; -} - -static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, - struct bnx2x_vfop *vfop) -{ - /* rc < 0 - error, otherwise set to 0 */ - DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); - if (vfop->rc >= 0) - vfop->rc = 0; - DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); - - /* unlink the current op context and propagate error code - * must be done before invoking the 'done()' handler - */ - WARN(!mutex_is_locked(&vf->op_mutex), - "about to access vf op linked list but mutex was not locked!"); - list_del(&vfop->link); - - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); - vf->op_rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } else { - struct bnx2x_vfop *cur_vfop; - - DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); - cur_vfop = bnx2x_vfop_cur(bp, vf); - cur_vfop->rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } - - /* invoke done handler */ - if (vfop->done) { - DP(BNX2X_MSG_IOV, "calling done handler\n"); - vfop->done(bp, vf); - } else { - /* there is no done handler for the operation to unlock - * the mutex. Must have gotten here from PF initiated VF RELEASE - */ - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - } - - DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - - /* if this is the last nested op reset the wait_blocking flag - * to release any blocking wrappers, only after 'done()' is invoked - */ - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); - vf->op_wait_blocking = false; - } - - kfree(vfop); -} - -static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - /* can take a while if any port is running */ - int cnt = 5000; - - might_sleep(); - while (cnt--) { - if (vf->op_wait_blocking == false) { -#ifdef BNX2X_STOP_ON_ERROR - DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - usleep_range(1000, 2000); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static inline int bnx2x_vfop_transition(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t transition, - bool block) -{ - if (block) - vf->op_wait_blocking = true; - transition(bp, vf); - if (block) - return bnx2x_vfop_wait_blocking(bp, vf); - return 0; -} - /* VFOP queue construction helpers */ void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, @@ -668,64 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type); -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only); - -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only); - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only); - -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags); - -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); - -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss); -int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct vfpf_tpa_tlv *tpa_tlv); +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params); /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); @@ -796,7 +521,6 @@ void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} -static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) {return 1; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 63c95658ba60..fe3737e56d08 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -673,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } @@ -1048,7 +1049,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, } static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int vf_rc) { struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; @@ -1060,7 +1062,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); - resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); /* send response */ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + @@ -1108,14 +1110,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, return; mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ + bnx2x_vf_release(bp, vf); } static void bnx2x_vf_mbx_resp(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int rc) { bnx2x_vf_mbx_resp_single_tlv(bp, vf); - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); } static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, @@ -1239,8 +1242,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, sizeof(struct channel_list_end_tlv)); /* send the response */ - vf->op_rc = vfop_status; - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); } static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1272,19 +1274,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_init_tlv *init = &mbx->msg->req.init; + int rc; /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; vf->stats_stride = init->stats_stride; - vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); /* set VF multiqueue statistics collection mode */ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) vf->cfg_flags |= VF_CFG_STATS_COALESCE; /* response */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* convert MBX queue-flags to standard SP queue-flags */ @@ -1319,16 +1322,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + struct bnx2x_vf_queue_construct_params qctor; + int rc = 0; /* verify vf_qid */ if (setup_q->vf_qid >= vf_rxq_count(vf)) { BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", setup_q->vf_qid, vf_rxq_count(vf)); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto response; } @@ -1346,9 +1347,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_leading_vfq_init(bp, vf, q); /* re-init the VF operation context */ - memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); - setup_p = &vf->op_params.qctor.prep_qsetup; - init_p = &vf->op_params.qctor.qstate.params.init; + memset(&qctor, 0 , + sizeof(struct bnx2x_vf_queue_construct_params)); + setup_p = &qctor.prep_qsetup; + init_p = &qctor.qstate.params.init; /* activate immediately */ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); @@ -1434,44 +1436,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, q->index, q->sb_idx); } /* complete the preparations */ - bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); + bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); - vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); - if (vf->op_rc) + rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); + if (rc) goto response; - return; } response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } -enum bnx2x_vfop_filters_state { - BNX2X_VFOP_MBX_Q_FILTERS_MACS, - BNX2X_VFOP_MBX_Q_FILTERS_VLANS, - BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, - BNX2X_VFOP_MBX_Q_FILTERS_MCAST, - BNX2X_VFOP_MBX_Q_FILTERS_DONE -}; - static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, struct bnx2x_virtf *vf, struct vfpf_set_q_filters_tlv *tlv, - struct bnx2x_vfop_filters **pfl, + struct bnx2x_vf_mac_vlan_filters **pfl, u32 type_flag) { int i, j; - struct bnx2x_vfop_filters *fl = NULL; + struct bnx2x_vf_mac_vlan_filters *fl = NULL; size_t fsz; - fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + - sizeof(struct bnx2x_vfop_filters); + fsz = tlv->n_mac_vlan_filters * + sizeof(struct bnx2x_vf_mac_vlan_filter) + + sizeof(struct bnx2x_vf_mac_vlan_filters); fl = kzalloc(fsz, GFP_KERNEL); if (!fl) return -ENOMEM; - INIT_LIST_HEAD(&fl->head); - for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; @@ -1479,17 +1471,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, continue; if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { fl->filters[j].mac = msg_filter->mac; - fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; + fl->filters[j].type = BNX2X_VF_FILTER_MAC; } else { fl->filters[j].vid = msg_filter->vlan_tag; - fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; + fl->filters[j].type = BNX2X_VF_FILTER_VLAN; } fl->filters[j].add = (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? true : false; - list_add_tail(&fl->filters[j++].link, &fl->head); + fl->count++; } - if (list_empty(&fl->head)) + if (!fl->count) kfree(fl); else *pfl = fl; @@ -1529,168 +1521,97 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID -static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) { - int rc; + int rc = 0; struct vfpf_set_q_filters_tlv *msg = &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_filters_state state = vfop->state; - - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_mbx_qfilters, - .block = false, - }; + /* check for any mac/vlan changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vf_mac_vlan_filters *fl = NULL; - DP(BNX2X_MSG_IOV, "STATE: %d\n", state); - - if (vfop->rc < 0) - goto op_err; - - switch (state) { - case BNX2X_VFOP_MBX_Q_FILTERS_MACS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (rc) + goto op_err; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build mac list */ - struct bnx2x_vfop_filters *fl = NULL; + if (fl) { - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_MAC_FILTER); - if (vfop->rc) + /* set mac list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set mac list */ - rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build vlan list */ - struct bnx2x_vfop_filters *fl = NULL; + /* build vlan list */ + fl = NULL; - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_VLAN_FILTER); - if (vfop->rc) + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set vlan list */ - rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ + } - case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); - if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { - unsigned long accept = 0; - struct pf_vf_bulletin_content *bulletin = - BP_VF_BULLETIN(bp, vf->index); + /* covert VF-PF if mask to bnx2x accept flags */ + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); - /* covert VF-PF if mask to bnx2x accept flags */ - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) - __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); - if (msg->rx_mask & - VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) - __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) - __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); - - /* A packet arriving the vf's mac should be accepted - * with any vlan, unless a vlan has already been - * configured. - */ - if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); - - /* set rx-mode */ - rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, - msg->vf_qid, accept); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; - - if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { - /* set mcasts */ - rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, - msg->n_multicast, false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } - /* fall through */ -op_done: - case BNX2X_VFOP_MBX_Q_FILTERS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; -op_err: - BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", - vf->abs_vfid, msg->vf_qid, vfop->rc); - goto op_done; + /* A packet arriving the vf's mac should be accepted + * with any vlan, unless a vlan has already been + * configured. + */ + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); - default: - bnx2x_vfop_default(state); + /* set rx-mode */ + rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); + if (rc) + goto op_err; } -} -static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, - bnx2x_vfop_mbx_qfilters, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, - cmd->block); + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vf_mcast(bp, vf, msg->multicast, + msg->n_multicast, false); + if (rc) + goto op_err; } - return -ENOMEM; +op_err: + if (rc) + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, rc); + return rc; } static int bnx2x_filters_validate_mac(struct bnx2x *bp, @@ -1710,7 +1631,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, if (filters->n_mac_vlan_filters > 1) { BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", vf->abs_vfid); - vf->op_rc = -EPERM; rc = -EPERM; goto response; } @@ -1721,7 +1641,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", vf->abs_vfid); - vf->op_rc = -EPERM; rc = -EPERM; goto response; } @@ -1748,7 +1667,6 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp, VFPF_Q_FILTER_VLAN_TAG_VALID) { BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", vf->abs_vfid); - vf->op_rc = -EPERM; rc = -EPERM; goto response; } @@ -1770,15 +1688,14 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, struct bnx2x_vf_mbx *mbx) { struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; - if (bnx2x_filters_validate_mac(bp, vf, filters)) + rc = bnx2x_filters_validate_mac(bp, vf, filters); + if (rc) goto response; - if (bnx2x_filters_validate_vlan(bp, vf, filters)) + rc = bnx2x_filters_validate_vlan(bp, vf, filters); + if (rc) goto response; DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", @@ -1788,125 +1705,105 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, /* print q_filter message */ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); - vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); - if (vf->op_rc) - goto response; - return; - + rc = bnx2x_vf_mbx_qfilters(bp, vf); response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { int qid = mbx->msg->req.q_op.vf_qid; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", vf->abs_vfid, qid); - vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_queue_teardown(bp, vf, qid); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_close(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_free(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; - struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct bnx2x_config_rss_params rss; struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + int rc = 0; if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || rss_tlv->rss_key_size != T_ETH_RSS_KEY) { BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", vf->index); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } + memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); + /* set vfop params according to rss tlv */ - memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + memcpy(rss.ind_table, rss_tlv->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); - memcpy(vf_op_params->rss_key, rss_tlv->rss_key, - sizeof(rss_tlv->rss_key)); - vf_op_params->rss_obj = &vf->rss_conf_obj; - vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); + rss.rss_obj = &vf->rss_conf_obj; + rss.rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ - vf_op_params->rss_flags = 0; - vf_op_params->ramrod_flags = 0; + rss.rss_flags = 0; + rss.ramrod_flags = 0; if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) - __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) - __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) - __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4) - __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) - __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) - __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6) - __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) - __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) - __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { BNX2X_ERR("about to hit a FW assert. aborting...\n"); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } - vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); - + rc = bnx2x_vf_rss_update(bp, vf, &rss); mbx_resp: - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static int bnx2x_validate_tpa_params(struct bnx2x *bp, @@ -1935,47 +1832,42 @@ static int bnx2x_validate_tpa_params(struct bnx2x *bp, static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; - struct bnx2x_queue_update_tpa_params *vf_op_params = - &vf->op_params.qstate.params.update_tpa; + struct bnx2x_queue_update_tpa_params vf_op_params; struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; + int rc = 0; - memset(vf_op_params, 0, sizeof(*vf_op_params)); + memset(&vf_op_params, 0, sizeof(vf_op_params)); if (bnx2x_validate_tpa_params(bp, tpa_tlv)) goto mbx_resp; - vf_op_params->complete_on_both_clients = + vf_op_params.complete_on_both_clients = tpa_tlv->tpa_client_info.complete_on_both_clients; - vf_op_params->dont_verify_thr = + vf_op_params.dont_verify_thr = tpa_tlv->tpa_client_info.dont_verify_thr; - vf_op_params->max_agg_sz = + vf_op_params.max_agg_sz = tpa_tlv->tpa_client_info.max_agg_size; - vf_op_params->max_sges_pkt = + vf_op_params.max_sges_pkt = tpa_tlv->tpa_client_info.max_sges_for_packet; - vf_op_params->max_tpa_queues = + vf_op_params.max_tpa_queues = tpa_tlv->tpa_client_info.max_tpa_queues; - vf_op_params->sge_buff_sz = + vf_op_params.sge_buff_sz = tpa_tlv->tpa_client_info.sge_buff_size; - vf_op_params->sge_pause_thr_high = + vf_op_params.sge_pause_thr_high = tpa_tlv->tpa_client_info.sge_pause_thr_high; - vf_op_params->sge_pause_thr_low = + vf_op_params.sge_pause_thr_low = tpa_tlv->tpa_client_info.sge_pause_thr_low; - vf_op_params->tpa_mode = + vf_op_params.tpa_mode = tpa_tlv->tpa_client_info.tpa_mode; - vf_op_params->update_ipv4 = + vf_op_params.update_ipv4 = tpa_tlv->tpa_client_info.update_ipv4; - vf_op_params->update_ipv6 = + vf_op_params.update_ipv6 = tpa_tlv->tpa_client_info.update_ipv6; - vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv); + rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); mbx_resp: - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* dispatch request */ @@ -2039,11 +1931,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* can we respond to VF (do we have an address for it?) */ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { - /* mbx_resp uses the op_rc of the VF */ - vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; - /* notify the VF that we do not support this request */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); } else { /* can't send a response since this VF is unknown to us * just ack the FW to release the mailbox and unlock @@ -2123,7 +2012,7 @@ void bnx2x_vf_mbx(struct bnx2x *bp) if (rc) { BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); - bnx2x_vf_release(bp, vf, false); /* non blocking */ + bnx2x_vf_release(bp, vf); return; } -- cgit From d8361051000f27d2c9467c1f18985f9ce2123415 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 23 Mar 2014 18:12:26 +0200 Subject: bnx2x: Don't show port statistics for VFs VFs are currently showing port statistics, although they can't really access those - thus all such statistics will always show a value of 0. This patch removes said statistics from the VF's view as to not confuse the user. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 38fc794c1655..b6de05e3149b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev, #define IS_PORT_STAT(i) \ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ - (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +#define HIDE_PORT_STAT(bp) \ + ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ + IS_VF(bp)) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled @@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) BNX2X_NUM_Q_STATS; } else num_strings = 0; - if (IS_MF_MODE_STAT(bp)) { + if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) num_strings++; @@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; strcpy(buf + (k + j)*ETH_GSTRING_LEN, bnx2x_stats_arr[i].string); @@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, hw_stats = (u32 *)&bp->eth_stats; for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; if (bnx2x_stats_arr[i].size == 0) { /* skip this counter */ -- cgit From 16bd41dda9c95c813f7e24b3b7300614a156b2d8 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 23 Mar 2014 18:12:27 +0200 Subject: bnx2x: Don't allow VFs to become promiscuous Currently, if a VF's Rx Mode will be configured to support promiscuous mode the PF will comply, causing the VF to actually become promiscuous. This will enable the VF to see all unicast traffic which might be intended for other VMs, which we believe should not be possible. This patch will cause the hypervisor to ignore the VF's request for changes in its Rx mode (other than disabling it), preventing it from becoming promiscuous. Reported-by: Yoann Juet Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 42 ++++++------------------ 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index fe3737e56d08..0622884596b2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -896,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); - switch (mode) { - case BNX2X_RX_MODE_NONE: /* no Rx */ + /* Ignore everything accept MODE_NONE */ + if (mode == BNX2X_RX_MODE_NONE) { req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; - break; - case BNX2X_RX_MODE_NORMAL: + } else { + /* Current PF driver will not look at the specific flags, + * but they are required when working with older drivers on hv. + */ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_ALLMULTI: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_PROMISC: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - default: - BNX2X_ERR("BAD rx mode (%d)\n", mode); - rc = -EINVAL; - goto out; } req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; @@ -939,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); rc = -EINVAL; } -out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return rc; @@ -1571,21 +1558,12 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); - /* covert VF-PF if mask to bnx2x accept flags */ - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) + /* Ignore VF requested mode; instead set a regular mode */ + if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) { __set_bit(BNX2X_ACCEPT_UNICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + } /* A packet arriving the vf's mac should be accepted * with any vlan, unless a vlan has already been -- cgit