diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 93 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 21 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 228 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.h | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | 22 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 4 | 
9 files changed, 169 insertions, 212 deletions
| diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c62589c266b2..7463a1847ceb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)   */  void bnxt_set_ring_params(struct bnxt *bp)  { -	u32 ring_size, rx_size, rx_space; +	u32 ring_size, rx_size, rx_space, max_rx_cmpl;  	u32 agg_factor = 0, agg_ring_size = 0;  	/* 8 for CRC and VLAN */ @@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp)  	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);  	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; -	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; +	max_rx_cmpl = bp->rx_ring_size; +	/* MAX TPA needs to be added because TPA_START completions are +	 * immediately recycled, so the TPA completions are not bound by +	 * the RX ring size. +	 */ +	if (bp->flags & BNXT_FLAG_TPA) +		max_rx_cmpl += bp->max_tpa; +	/* RX and TPA completions are 32-byte, all others are 16-byte */ +	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;  	bp->cp_ring_size = ring_size;  	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -6292,6 +6300,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)  static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)  { +	struct hwrm_stat_ctx_clr_stats_input req0 = {0};  	struct hwrm_stat_ctx_free_input req = {0};  	int i; @@ -6301,6 +6310,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)  	if (BNXT_CHIP_TYPE_NITRO_A0(bp))  		return; +	bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);  	mutex_lock(&bp->hwrm_cmd_lock); @@ -6310,7 +6320,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)  		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {  			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - +			if (BNXT_FW_MAJ(bp) <= 20) { +				req0.stat_ctx_id = req.stat_ctx_id; +				_hwrm_send_message(bp, &req0, sizeof(req0), +						   HWRM_CMD_TIMEOUT); +			}  			_hwrm_send_message(bp, &req, sizeof(req),  					   HWRM_CMD_TIMEOUT); @@ -6976,7 +6990,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)  		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;  	bp->tx_push_thresh = 0; -	if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) +	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && +	    BNXT_FW_MAJ(bp) > 217)  		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;  	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -7240,8 +7255,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)  static int bnxt_hwrm_ver_get(struct bnxt *bp)  {  	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; +	u16 fw_maj, fw_min, fw_bld, fw_rsv;  	u32 dev_caps_cfg, hwrm_ver; -	int rc; +	int rc, len;  	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;  	mutex_lock(&bp->hwrm_cmd_lock); @@ -7273,9 +7289,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)  			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,  			 resp->hwrm_intf_upd_8b); -	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", -		 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, -		 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); +	fw_maj = le16_to_cpu(resp->hwrm_fw_major); +	if (bp->hwrm_spec_code > 0x10803 && fw_maj) { +		fw_min = le16_to_cpu(resp->hwrm_fw_minor); +		fw_bld = le16_to_cpu(resp->hwrm_fw_build); +		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); +		len = FW_VER_STR_LEN; +	} else { +		fw_maj = resp->hwrm_fw_maj_8b; +		fw_min = resp->hwrm_fw_min_8b; +		fw_bld = resp->hwrm_fw_bld_8b; +		fw_rsv = resp->hwrm_fw_rsvd_8b; +		len = BC_HWRM_STR_LEN; +	} +	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); +	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, +		 fw_rsv);  	if (strlen(resp->active_pkg_name)) {  		int fw_ver_len = strlen(bp->fw_ver_str); @@ -10037,7 +10066,7 @@ static void bnxt_timer(struct timer_list *t)  	struct bnxt *bp = from_timer(bp, t, timer);  	struct net_device *dev = bp->dev; -	if (!netif_running(dev)) +	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))  		return;  	if (atomic_read(&bp->intr_sem) != 0) @@ -10364,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work)  				       &bp->sp_event))  			bnxt_hwrm_phy_qcaps(bp); -		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, -				       &bp->sp_event)) -			bnxt_init_ethtool_link_settings(bp); -  		rc = bnxt_update_link(bp, true); -		mutex_unlock(&bp->link_lock);  		if (rc)  			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",  				   rc); + +		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, +				       &bp->sp_event)) +			bnxt_init_ethtool_link_settings(bp); +		mutex_unlock(&bp->link_lock);  	}  	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {  		int rc; @@ -11892,7 +11921,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	dev->ethtool_ops = &bnxt_ethtool_ops;  	pci_set_drvdata(pdev, dev); -	bnxt_vpd_read_info(bp); +	if (BNXT_PF(bp)) +		bnxt_vpd_read_info(bp);  	rc = bnxt_alloc_hwrm_resources(bp);  	if (rc) @@ -12133,19 +12163,9 @@ static int bnxt_resume(struct device *device)  		goto resume_exit;  	} -	if (bnxt_hwrm_queue_qportcfg(bp)) { -		rc = -ENODEV; +	rc = bnxt_hwrm_func_qcaps(bp); +	if (rc)  		goto resume_exit; -	} - -	if (bp->hwrm_spec_code >= 0x10803) { -		if (bnxt_alloc_ctx_mem(bp)) { -			rc = -ENODEV; -			goto resume_exit; -		} -	} -	if (BNXT_NEW_RM(bp)) -		bnxt_hwrm_func_resc_qcaps(bp, false);  	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {  		rc = -ENODEV; @@ -12161,6 +12181,8 @@ static int bnxt_resume(struct device *device)  resume_exit:  	bnxt_ulp_start(bp, rc); +	if (!rc) +		bnxt_reenable_sriov(bp);  	rtnl_unlock();  	return rc;  } @@ -12204,6 +12226,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,  		bnxt_close(netdev);  	pci_disable_device(pdev); +	bnxt_free_ctx_mem(bp); +	kfree(bp->ctx); +	bp->ctx = NULL;  	rtnl_unlock();  	/* Request a slot slot reset. */ @@ -12237,12 +12262,16 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)  		pci_set_master(pdev);  		err = bnxt_hwrm_func_reset(bp); -		if (!err && netif_running(netdev)) -			err = bnxt_open(netdev); - -		if (!err) -			result = PCI_ERS_RESULT_RECOVERED; +		if (!err) { +			err = bnxt_hwrm_func_qcaps(bp); +			if (!err && netif_running(netdev)) +				err = bnxt_open(netdev); +		}  		bnxt_ulp_start(bp, err); +		if (!err) { +			bnxt_reenable_sriov(bp); +			result = PCI_ERS_RESULT_RECOVERED; +		}  	}  	if (result != PCI_ERS_RESULT_RECOVERED) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9e173d74b72a..78e2fd63ac3d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1746,6 +1746,11 @@ struct bnxt {  #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)  	char			fw_ver_str[FW_VER_STR_LEN];  	char			hwrm_ver_supp[FW_VER_STR_LEN]; +	u64			fw_ver_code; +#define BNXT_FW_VER_CODE(maj, min, bld, rsv)			\ +	((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) +#define BNXT_FW_MAJ(bp)		((bp)->fw_ver_code >> 48) +  	__be16			vxlan_port;  	u8			vxlan_port_cnt;  	__le16			vxlan_fw_dst_port_id; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b88143af5ea..b4aa56dc4f9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,  	if (epause->tx_pause)  		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; -	if (netif_running(dev)) +	if (netif_running(dev)) { +		mutex_lock(&bp->link_lock);  		rc = bnxt_hwrm_set_pause(bp); +		mutex_unlock(&bp->link_lock); +	}  	return rc;  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3a9a51f7063a..392e32c7122a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)  		}  	} +	bp->pf.active_vfs = 0;  	kfree(bp->pf.vf);  	bp->pf.vf = NULL;  } @@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp)  	bnxt_free_vf_resources(bp); -	bp->pf.active_vfs = 0;  	/* Reclaim all resources for the PF. */  	rtnl_lock();  	bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4a11c1e7cc02 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)  }  static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, -				    struct flow_block_offload *f) +				    struct flow_block_offload *f, void *data, +				    void (*cleanup)(struct flow_block_cb *block_cb))  {  	struct bnxt_flower_indr_block_cb_priv *cb_priv;  	struct flow_block_cb *block_cb; @@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,  		cb_priv->bp = bp;  		list_add(&cb_priv->list, &bp->tc_indr_block_list); -		block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb, -					       cb_priv, cb_priv, -					       bnxt_tc_setup_indr_rel); +		block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, +						    cb_priv, cb_priv, +						    bnxt_tc_setup_indr_rel, f, +						    netdev, data, bp, cleanup);  		if (IS_ERR(block_cb)) {  			list_del(&cb_priv->list);  			kfree(cb_priv); @@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,  		if (!block_cb)  			return -ENOENT; -		flow_block_cb_remove(block_cb, f); +		flow_indr_block_cb_remove(block_cb, f);  		list_del(&block_cb->driver_list);  		break;  	default: @@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)  }  static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, -				 enum tc_setup_type type, void *type_data) +				 enum tc_setup_type type, void *type_data, +				 void *data, +				 void (*cleanup)(struct flow_block_cb *block_cb))  {  	if (!bnxt_is_netdev_indr_offload(netdev))  		return -EOPNOTSUPP;  	switch (type) {  	case TC_SETUP_BLOCK: -		return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data); +		return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, +						cleanup);  	default:  		break;  	} @@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)  		return;  	flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, -				 bnxt_tc_setup_indr_block_cb); +				 bnxt_tc_setup_indr_rel);  	rhashtable_destroy(&tc_info->flow_table);  	rhashtable_destroy(&tc_info->l2_table);  	rhashtable_destroy(&tc_info->decap_l2_table); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff31da0ed846..e471b14fc6e9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,  			genet_dma_ring_regs[r]);  } -static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, -					   u32 f_index) -{ -	u32 offset; -	u32 reg; - -	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); -	reg = bcmgenet_hfb_reg_readl(priv, offset); -	return !!(reg & (1 << (f_index % 32))); -} -  static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)  {  	u32 offset; @@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,  	bcmgenet_hfb_reg_writel(priv, reg, offset);  } -static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) -{ -	u32 f_index; - -	/* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */ -	for (f_index = MAX_NUM_OF_FS_RULES; -	     f_index < priv->hw_params->hfb_filter_cnt; f_index++) -		if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) -			return f_index; - -	return -ENOMEM; -} -  static int bcmgenet_hfb_validate_mask(void *mask, size_t size)  {  	while (size) { @@ -567,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size)  #define VALIDATE_MASK(x) \  	bcmgenet_hfb_validate_mask(&(x), sizeof(x)) -static int bcmgenet_hfb_insert_data(u32 *f, int offset, -				    void *val, void *mask, size_t size) +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, +				    u32 offset, void *val, void *mask, +				    size_t size)  { -	int index; -	u32 tmp; +	u32 index, tmp; -	index = offset / 2; -	tmp = f[index]; +	index = f_index * priv->hw_params->hfb_filter_size + offset / 2; +	tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));  	while (size--) {  		if (offset++ & 1) { @@ -591,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,  				tmp |= 0x10000;  				break;  			} -			f[index++] = tmp; +			bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));  			if (size) -				tmp = f[index]; +				tmp = bcmgenet_hfb_readl(priv, +							 index * sizeof(u32));  		} else {  			tmp &= ~0xCFF00;  			tmp |= (*(unsigned char *)val++) << 8; @@ -609,43 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,  				break;  			}  			if (!size) -				f[index] = tmp; +				bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));  		}  	}  	return 0;  } -static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data, -				    u32 f_length, u32 rx_queue, int f_index) -{ -	u32 base = f_index * priv->hw_params->hfb_filter_size; -	int i; - -	for (i = 0; i < f_length; i++) -		bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32)); - -	bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); -	bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); -} - -static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, -					    struct bcmgenet_rxnfc_rule *rule) +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, +					     struct bcmgenet_rxnfc_rule *rule)  {  	struct ethtool_rx_flow_spec *fs = &rule->fs; -	int err = 0, offset = 0, f_length = 0; -	u16 val_16, mask_16; +	u32 offset = 0, f_length = 0, f;  	u8 val_8, mask_8; +	__be16 val_16; +	u16 mask_16;  	size_t size; -	u32 *f_data; - -	f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32), -			 GFP_KERNEL); -	if (!f_data) -		return -ENOMEM; +	f = fs->location;  	if (fs->flow_type & FLOW_MAC_EXT) { -		bcmgenet_hfb_insert_data(f_data, 0, +		bcmgenet_hfb_insert_data(priv, f, 0,  					 &fs->h_ext.h_dest, &fs->m_ext.h_dest,  					 sizeof(fs->h_ext.h_dest));  	} @@ -653,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,  	if (fs->flow_type & FLOW_EXT) {  		if (fs->m_ext.vlan_etype ||  		    fs->m_ext.vlan_tci) { -			bcmgenet_hfb_insert_data(f_data, 12, +			bcmgenet_hfb_insert_data(priv, f, 12,  						 &fs->h_ext.vlan_etype,  						 &fs->m_ext.vlan_etype,  						 sizeof(fs->h_ext.vlan_etype)); -			bcmgenet_hfb_insert_data(f_data, 14, +			bcmgenet_hfb_insert_data(priv, f, 14,  						 &fs->h_ext.vlan_tci,  						 &fs->m_ext.vlan_tci,  						 sizeof(fs->h_ext.vlan_tci)); @@ -669,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,  	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {  	case ETHER_FLOW:  		f_length += DIV_ROUND_UP(ETH_HLEN, 2); -		bcmgenet_hfb_insert_data(f_data, 0, +		bcmgenet_hfb_insert_data(priv, f, 0,  					 &fs->h_u.ether_spec.h_dest,  					 &fs->m_u.ether_spec.h_dest,  					 sizeof(fs->h_u.ether_spec.h_dest)); -		bcmgenet_hfb_insert_data(f_data, ETH_ALEN, +		bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,  					 &fs->h_u.ether_spec.h_source,  					 &fs->m_u.ether_spec.h_source,  					 sizeof(fs->h_u.ether_spec.h_source)); -		bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, +		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,  					 &fs->h_u.ether_spec.h_proto,  					 &fs->m_u.ether_spec.h_proto,  					 sizeof(fs->h_u.ether_spec.h_proto)); @@ -687,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,  		/* Specify IP Ether Type */  		val_16 = htons(ETH_P_IP);  		mask_16 = 0xFFFF; -		bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, +		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,  					 &val_16, &mask_16, sizeof(val_16)); -		bcmgenet_hfb_insert_data(f_data, 15 + offset, +		bcmgenet_hfb_insert_data(priv, f, 15 + offset,  					 &fs->h_u.usr_ip4_spec.tos,  					 &fs->m_u.usr_ip4_spec.tos,  					 sizeof(fs->h_u.usr_ip4_spec.tos)); -		bcmgenet_hfb_insert_data(f_data, 23 + offset, +		bcmgenet_hfb_insert_data(priv, f, 23 + offset,  					 &fs->h_u.usr_ip4_spec.proto,  					 &fs->m_u.usr_ip4_spec.proto,  					 sizeof(fs->h_u.usr_ip4_spec.proto)); -		bcmgenet_hfb_insert_data(f_data, 26 + offset, +		bcmgenet_hfb_insert_data(priv, f, 26 + offset,  					 &fs->h_u.usr_ip4_spec.ip4src,  					 &fs->m_u.usr_ip4_spec.ip4src,  					 sizeof(fs->h_u.usr_ip4_spec.ip4src)); -		bcmgenet_hfb_insert_data(f_data, 30 + offset, +		bcmgenet_hfb_insert_data(priv, f, 30 + offset,  					 &fs->h_u.usr_ip4_spec.ip4dst,  					 &fs->m_u.usr_ip4_spec.ip4dst,  					 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); @@ -711,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,  		/* Only supports 20 byte IPv4 header */  		val_8 = 0x45;  		mask_8 = 0xFF; -		bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset, +		bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,  					 &val_8, &mask_8,  					 sizeof(val_8));  		size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); -		bcmgenet_hfb_insert_data(f_data, +		bcmgenet_hfb_insert_data(priv, f,  					 ETH_HLEN + 20 + offset,  					 &fs->h_u.usr_ip4_spec.l4_4_bytes,  					 &fs->m_u.usr_ip4_spec.l4_4_bytes, @@ -724,87 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,  		break;  	} +	bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);  	if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {  		/* Ring 0 flows can be handled by the default Descriptor Ring  		 * We'll map them to ring 0, but don't enable the filter  		 */ -		bcmgenet_hfb_set_filter(priv, f_data, f_length,	0, -					fs->location); +		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);  		rule->state = BCMGENET_RXNFC_STATE_DISABLED;  	} else {  		/* Other Rx rings are direct mapped here */ -		bcmgenet_hfb_set_filter(priv, f_data, f_length, -					fs->ring_cookie, fs->location); -		bcmgenet_hfb_enable_filter(priv, fs->location); +		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, +							 fs->ring_cookie); +		bcmgenet_hfb_enable_filter(priv, f);  		rule->state = BCMGENET_RXNFC_STATE_ENABLED;  	} - -	kfree(f_data); - -	return err;  } -/* bcmgenet_hfb_add_filter - * - * Add new filter to Hardware Filter Block to match and direct Rx traffic to - * desired Rx queue. - * - * f_data is an array of unsigned 32-bit integers where each 32-bit integer - * provides filter data for 2 bytes (4 nibbles) of Rx frame: - * - * bits 31:20 - unused - * bit  19    - nibble 0 match enable - * bit  18    - nibble 1 match enable - * bit  17    - nibble 2 match enable - * bit  16    - nibble 3 match enable - * bits 15:12 - nibble 0 data - * bits 11:8  - nibble 1 data - * bits 7:4   - nibble 2 data - * bits 3:0   - nibble 3 data - * - * Example: - * In order to match: - * - Ethernet frame type = 0x0800 (IP) - * - IP version field = 4 - * - IP protocol field = 0x11 (UDP) - * - * The following filter is needed: - * u32 hfb_filter_ipv4_udp[] = { - *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, - *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, - *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, - * }; +/* bcmgenet_hfb_clear   * - * To add the filter to HFB and direct the traffic to Rx queue 0, call: - * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, - *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0); + * Clear Hardware Filter Block and disable all filtering.   */ -int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, -			    u32 f_length, u32 rx_queue) +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)  { -	int f_index; - -	f_index = bcmgenet_hfb_find_unused_filter(priv); -	if (f_index < 0) -		return -ENOMEM; +	u32 base, i; -	if (f_length > priv->hw_params->hfb_filter_size) -		return -EINVAL; - -	bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index); -	bcmgenet_hfb_enable_filter(priv, f_index); - -	return 0; +	base = f_index * priv->hw_params->hfb_filter_size; +	for (i = 0; i < priv->hw_params->hfb_filter_size; i++) +		bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));  } -/* bcmgenet_hfb_clear - * - * Clear Hardware Filter Block and disable all filtering. - */  static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)  {  	u32 i; +	if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) +		return; +  	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);  	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);  	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); @@ -816,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)  		bcmgenet_hfb_reg_writel(priv, 0x0,  					HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); -	for (i = 0; i < priv->hw_params->hfb_filter_cnt * -			priv->hw_params->hfb_filter_size; i++) -		bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); +	for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) +		bcmgenet_hfb_clear_filter(priv, i);  }  static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)  {  	int i; +	INIT_LIST_HEAD(&priv->rxnfc_list);  	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))  		return; -	INIT_LIST_HEAD(&priv->rxnfc_list);  	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {  		INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);  		priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; @@ -1513,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev,  	loc_rule = &priv->rxnfc_rules[cmd->fs.location];  	if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)  		bcmgenet_hfb_disable_filter(priv, cmd->fs.location); -	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) +	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {  		list_del(&loc_rule->list); +		bcmgenet_hfb_clear_filter(priv, cmd->fs.location); +	}  	loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;  	memcpy(&loc_rule->fs, &cmd->fs,  	       sizeof(struct ethtool_rx_flow_spec)); -	err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); -	if (err) { -		netdev_err(dev, "rxnfc: Could not install rule (%d)\n", -			   err); -		return err; -	} +	bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);  	list_add_tail(&loc_rule->list, &priv->rxnfc_list); @@ -1549,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,  	if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)  		bcmgenet_hfb_disable_filter(priv, cmd->fs.location); -	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) +	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {  		list_del(&rule->list); +		bcmgenet_hfb_clear_filter(priv, cmd->fs.location); +	}  	rule->state = BCMGENET_RXNFC_STATE_UNUSED;  	memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); @@ -2118,11 +2031,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)  		goto out;  	} -	if (skb_padto(skb, ETH_ZLEN)) { -		ret = NETDEV_TX_OK; -		goto out; -	} -  	/* Retain how many bytes will be sent on the wire, without TSB inserted  	 * by transmit checksum offload  	 */ @@ -2169,6 +2077,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)  		len_stat = (size << DMA_BUFLENGTH_SHIFT) |  			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); +		/* Note: if we ever change from DMA_TX_APPEND_CRC below we +		 * will need to restore software padding of "runt" packets +		 */  		if (!i) {  			len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;  			if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -4077,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)  	if (err)  		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));  	if (err) -		goto err; +		goto err_clk_disable;  	/* Mii wait queue */  	init_waitqueue_head(&priv->wq); @@ -4089,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev)  	if (IS_ERR(priv->clk_wol)) {  		dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");  		err = PTR_ERR(priv->clk_wol); -		goto err; +		goto err_clk_disable;  	}  	priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");  	if (IS_ERR(priv->clk_eee)) {  		dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");  		err = PTR_ERR(priv->clk_eee); -		goto err; +		goto err_clk_disable;  	}  	/* If this is an internal GPHY, power it on now, before UniMAC is @@ -4207,8 +4118,9 @@ static int bcmgenet_resume(struct device *d)  {  	struct net_device *dev = dev_get_drvdata(d);  	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct bcmgenet_rxnfc_rule *rule;  	unsigned long dma_ctrl; -	u32 offset, reg; +	u32 reg;  	int ret;  	if (!netif_running(dev)) @@ -4239,10 +4151,11 @@ static int bcmgenet_resume(struct device *d)  	bcmgenet_set_hw_addr(priv, dev->dev_addr); -	offset = HFB_FLT_ENABLE_V3PLUS; -	bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset); -	bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32)); -	bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL); +	/* Restore hardware filters */ +	bcmgenet_hfb_clear(priv); +	list_for_each_entry(rule, &priv->rxnfc_list, list) +		if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) +			bcmgenet_hfb_create_rxnfc_filter(priv, rule);  	if (priv->internal_phy) {  		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); @@ -4286,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d)  {  	struct net_device *dev = dev_get_drvdata(d);  	struct bcmgenet_priv *priv = netdev_priv(dev); -	u32 offset;  	if (!netif_running(dev))  		return 0; @@ -4298,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d)  	if (!device_may_wakeup(d))  		phy_suspend(dev->phydev); -	/* Preserve filter state and disable filtering */ -	priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); -	offset = HFB_FLT_ENABLE_V3PLUS; -	priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset); -	priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); +	/* Disable filtering */  	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);  	return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index a12cb59298f4..f6ca01da141d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -696,7 +696,6 @@ struct bcmgenet_priv {  	u32 wolopts;  	u8 sopass[SOPASS_MAX];  	bool wol_active; -	u32 hfb_en[3];  	struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 4ea6a26b04f7..1c86eddb1b51 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,  	priv->wol_active = 0;  	clk_disable_unprepare(priv->clk_wol); +	priv->crc_fwd_en = 0;  	/* Disable Magic Packet Detection */ -	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); -	reg &= ~(MPD_EN | MPD_PW_EN); -	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); +	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { +		reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); +		if (!(reg & MPD_EN)) +			return;	/* already reset so skip the rest */ +		reg &= ~(MPD_EN | MPD_PW_EN); +		bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); +	}  	/* Disable WAKE_FILTER Detection */ -	reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); -	reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); -	bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +	if (priv->wolopts & WAKE_FILTER) { +		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); +		if (!(reg & RBUF_ACPI_EN)) +			return;	/* already reset so skip the rest */ +		reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); +		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +	}  	/* Disable CRC Forward */  	reg = bcmgenet_umac_readl(priv, UMAC_CMD);  	reg &= ~CMD_CRC_FWD;  	bcmgenet_umac_writel(priv, reg, UMAC_CMD); -	priv->crc_fwd_en = 0;  } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 7a3b22b35238..ebff1fc0d8ce 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,  	rtnl_lock(); -	/* We probably don't have netdev yet */ -	if (!netdev || !netif_running(netdev)) +	/* Could be second call or maybe we don't have netdev yet */ +	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))  		goto done;  	/* We needn't recover from permanent error */ |