diff options
Diffstat (limited to 'drivers/net/ethernet')
60 files changed, 525 insertions, 259 deletions
| diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 691e1475d55e..0fbecd093fa1 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)  		dev_err(&nic->pdev->dev,  			"Request for #%d msix vectors failed, returned %d\n",  			   nic->num_vec, ret); -		return 1; +		return ret;  	}  	/* Register mailbox interrupt handler */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index d1667b759522..a27227aeae88 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)  	if (ret < 0) {  		netdev_err(nic->netdev,  			   "Req for #%d msix vectors failed\n", nic->num_vec); -		return 1; +		return ret;  	}  	sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); @@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)  	if (!nicvf_check_pf_ready(nic)) {  		nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);  		nicvf_unregister_interrupts(nic); -		return 1; +		return -EIO;  	}  	return 0; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 9690e36e9e85..910b9f722504 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -157,7 +157,7 @@ static const struct {  	{ ENETC_PM0_TFRM,   "MAC tx frames" },  	{ ENETC_PM0_TFCS,   "MAC tx fcs errors" },  	{ ENETC_PM0_TVLAN,  "MAC tx VLAN frames" }, -	{ ENETC_PM0_TERR,   "MAC tx frames" }, +	{ ENETC_PM0_TERR,   "MAC tx frame errors" },  	{ ENETC_PM0_TUCA,   "MAC tx unicast frames" },  	{ ENETC_PM0_TMCA,   "MAC tx multicast frames" },  	{ ENETC_PM0_TBCA,   "MAC tx broadcast frames" }, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4c977dfc44f0..d522bd5c90b4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)  static void enetc_configure_port_mac(struct enetc_hw *hw)  { +	int tc; +  	enetc_port_wr(hw, ENETC_PM0_MAXFRM,  		      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); -	enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); +	for (tc = 0; tc < 8; tc++) +		enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);  	enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |  		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index eef1b2764d34..67b0bf310daa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);  static LIST_HEAD(hnae3_client_list);  static LIST_HEAD(hnae3_ae_dev_list); +void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo) +{ +	const struct pci_device_id *pci_id; +	struct hnae3_ae_dev *ae_dev; + +	if (!ae_algo) +		return; + +	list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { +		if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) +			continue; + +		pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); +		if (!pci_id) +			continue; +		if (IS_ENABLED(CONFIG_PCI_IOV)) +			pci_disable_sriov(ae_dev->pdev); +	} +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); +  /* we are keeping things simple and using single lock for all the   * list. This is a non-critical code so other updations, if happen   * in parallel, can wait. diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8ba21d6dc220..d701451596c8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -853,6 +853,7 @@ struct hnae3_handle {  int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);  void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);  void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);  void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 2b66c59f5eaf..e54f96251fea 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {  		.name = "uc",  		.cmd = HNAE3_DBG_CMD_MAC_UC,  		.dentry = HNS3_DBG_DENTRY_MAC, -		.buf_len = HNS3_DBG_READ_LEN, +		.buf_len = HNS3_DBG_READ_LEN_128KB,  		.init = hns3_dbg_common_file_init,  	},  	{ @@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {  		.name = "tqp",  		.cmd = HNAE3_DBG_CMD_REG_TQP,  		.dentry = HNS3_DBG_DENTRY_REG, -		.buf_len = HNS3_DBG_READ_LEN, +		.buf_len = HNS3_DBG_READ_LEN_128KB,  		.init = hns3_dbg_common_file_init,  	},  	{ @@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {  		.name = "fd_tcam",  		.cmd = HNAE3_DBG_CMD_FD_TCAM,  		.dentry = HNS3_DBG_DENTRY_FD, -		.buf_len = HNS3_DBG_READ_LEN, +		.buf_len = HNS3_DBG_READ_LEN_1MB,  		.init = hns3_dbg_common_file_init,  	},  	{ @@ -462,7 +462,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {  	{ "TAIL", 2 },  	{ "HEAD", 2 },  	{ "FBDNUM", 2 }, -	{ "PKTNUM", 2 }, +	{ "PKTNUM", 5 },  	{ "COPYBREAK", 2 },  	{ "RING_EN", 2 },  	{ "RX_RING_EN", 2 }, @@ -565,7 +565,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {  	{ "HEAD", 2 },  	{ "FBDNUM", 2 },  	{ "OFFSET", 2 }, -	{ "PKTNUM", 2 }, +	{ "PKTNUM", 5 },  	{ "RING_EN", 2 },  	{ "TX_RING_EN", 2 },  	{ "BASE_ADDR", 10 }, @@ -790,13 +790,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)  }  static const struct hns3_dbg_item tx_bd_info_items[] = { -	{ "BD_IDX", 5 }, -	{ "ADDRESS", 2 }, +	{ "BD_IDX", 2 }, +	{ "ADDRESS", 13 },  	{ "VLAN_TAG", 2 },  	{ "SIZE", 2 },  	{ "T_CS_VLAN_TSO", 2 },  	{ "OT_VLAN_TAG", 3 }, -	{ "TV", 2 }, +	{ "TV", 5 },  	{ "OLT_VLAN_LEN", 2 },  	{ "PAYLEN_OL4CS", 2 },  	{ "BD_FE_SC_VLD", 2 }, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 468b8f07bf47..4b886a13e079 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)  static int hns3_skb_linearize(struct hns3_enet_ring *ring,  			      struct sk_buff *skb, -			      u8 max_non_tso_bd_num,  			      unsigned int bd_num)  {  	/* 'bd_num == UINT_MAX' means the skb' fraglist has a @@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,  	 * will not help.  	 */  	if (skb->len > HNS3_MAX_TSO_SIZE || -	    (!skb_is_gso(skb) && skb->len > -	     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) { +	    (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {  		u64_stats_update_begin(&ring->syncp);  		ring->stats.hw_limitation++;  		u64_stats_update_end(&ring->syncp); @@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,  			goto out;  		} -		if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num, -				       bd_num)) +		if (hns3_skb_linearize(ring, skb, bd_num))  			return -ENOMEM;  		bd_num = hns3_tx_bd_count(skb->len); @@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)  {  	hns3_unmap_buffer(ring, &ring->desc_cb[i]);  	ring->desc[i].addr = 0; +	ring->desc_cb[i].refill = 0;  }  static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, @@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)  	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +  					 ring->desc_cb[i].page_offset); +	ring->desc_cb[i].refill = 1;  	return 0;  } @@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,  {  	hns3_unmap_buffer(ring, &ring->desc_cb[i]);  	ring->desc_cb[i] = *res_cb; +	ring->desc_cb[i].refill = 1;  	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +  					 ring->desc_cb[i].page_offset);  	ring->desc[i].rx.bd_base_info = 0; @@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,  static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)  {  	ring->desc_cb[i].reuse_flag = 0; +	ring->desc_cb[i].refill = 1;  	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +  					 ring->desc_cb[i].page_offset);  	ring->desc[i].rx.bd_base_info = 0; @@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)  	int ntc = ring->next_to_clean;  	int ntu = ring->next_to_use; +	if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) +		return ring->desc_num; +  	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;  } -static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, +/* Return true if there is any allocation failure */ +static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,  				      int cleand_count)  {  	struct hns3_desc_cb *desc_cb; @@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,  				hns3_rl_err(ring_to_netdev(ring),  					    "alloc rx buffer failed: %d\n",  					    ret); -				break; + +				writel(i, ring->tqp->io_base + +				       HNS3_RING_RX_RING_HEAD_REG); +				return true;  			}  			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); @@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,  	}  	writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +	return false;  }  static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) @@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)  {  	ring->desc[ring->next_to_clean].rx.bd_base_info &=  		cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); +	ring->desc_cb[ring->next_to_clean].refill = 0;  	ring->next_to_clean += 1;  	if (unlikely(ring->next_to_clean == ring->desc_num)) @@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,  {  #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16  	int unused_count = hns3_desc_unused(ring); +	bool failure = false;  	int recv_pkts = 0;  	int err; @@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,  	while (recv_pkts < budget) {  		/* Reuse or realloc buffers */  		if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { -			hns3_nic_alloc_rx_buffers(ring, unused_count); -			unused_count = hns3_desc_unused(ring) - -					ring->pending_buf; +			failure = failure || +				hns3_nic_alloc_rx_buffers(ring, unused_count); +			unused_count = 0;  		}  		/* Poll one pkt */ @@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,  	}  out: -	/* Make all data has been write before submit */ -	if (unused_count > 0) -		hns3_nic_alloc_rx_buffers(ring, unused_count); - -	return recv_pkts; +	return failure ? budget : recv_pkts;  }  static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 6162d9f88e37..f09a61d9c626 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -186,11 +186,9 @@ enum hns3_nic_state {  #define HNS3_MAX_BD_SIZE			65535  #define HNS3_MAX_TSO_BD_NUM			63U -#define HNS3_MAX_TSO_SIZE \ -	(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) +#define HNS3_MAX_TSO_SIZE			1048576U +#define HNS3_MAX_NON_TSO_SIZE			9728U -#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \ -	(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))  #define HNS3_VECTOR_GL0_OFFSET			0x100  #define HNS3_VECTOR_GL1_OFFSET			0x200 @@ -332,6 +330,7 @@ struct hns3_desc_cb {  	u32 length;     /* length of the buffer */  	u16 reuse_flag; +	u16 refill;  	/* desc type, used by the ring user to mark the type of the priv data */  	u16 type; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 307c9e830510..91cb578f56b8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,  				*changed = true;  			break;  		case IEEE_8021QAZ_TSA_ETS: +			/* The hardware will switch to sp mode if bandwidth is +			 * 0, so limit ets bandwidth must be greater than 0. +			 */ +			if (!ets->tc_tx_bw[i]) { +				dev_err(&hdev->pdev->dev, +					"tc%u ets bw cannot be 0\n", i); +				return -EINVAL; +			} +  			if (hdev->tm_info.tc_info[i].tc_sch_mode !=  				HCLGE_SCH_MODE_DWRR)  				*changed = true; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 32f62cd2dd99..9cda8b3562b8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)  static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,  				   int *pos)  { -	struct hclge_dbg_bitmap_cmd *bitmap; +	struct hclge_dbg_bitmap_cmd req;  	struct hclge_desc desc;  	u16 qset_id, qset_num;  	int ret; @@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,  		if (ret)  			return ret; -		bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; +		req.bitmap = (u8)le32_to_cpu(desc.data[1]);  		*pos += scnprintf(buf + *pos, len - *pos,  				  "%04u           %#x            %#x             %#x               %#x\n", -				  qset_id, bitmap->bit0, bitmap->bit1, -				  bitmap->bit2, bitmap->bit3); +				  qset_id, req.bit0, req.bit1, req.bit2, +				  req.bit3);  	}  	return 0; @@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,  static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,  				  int *pos)  { -	struct hclge_dbg_bitmap_cmd *bitmap; +	struct hclge_dbg_bitmap_cmd req;  	struct hclge_desc desc;  	u8 pri_id, pri_num;  	int ret; @@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,  		if (ret)  			return ret; -		bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; +		req.bitmap = (u8)le32_to_cpu(desc.data[1]);  		*pos += scnprintf(buf + *pos, len - *pos,  				  "%03u       %#x           %#x                %#x\n", -				  pri_id, bitmap->bit0, bitmap->bit1, -				  bitmap->bit2); +				  pri_id, req.bit0, req.bit1, req.bit2);  	}  	return 0; @@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,  static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,  				 int *pos)  { -	struct hclge_dbg_bitmap_cmd *bitmap; +	struct hclge_dbg_bitmap_cmd req;  	struct hclge_desc desc;  	u8 pg_id;  	int ret; @@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,  		if (ret)  			return ret; -		bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; +		req.bitmap = (u8)le32_to_cpu(desc.data[1]);  		*pos += scnprintf(buf + *pos, len - *pos,  				  "%03u      %#x           %#x               %#x\n", -				  pg_id, bitmap->bit0, bitmap->bit1, -				  bitmap->bit2); +				  pg_id, req.bit0, req.bit1, req.bit2);  	}  	return 0; @@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,  static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,  				   int *pos)  { -	struct hclge_dbg_bitmap_cmd *bitmap; +	struct hclge_dbg_bitmap_cmd req;  	struct hclge_desc desc;  	u8 port_id = 0;  	int ret; @@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,  	if (ret)  		return ret; -	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; +	req.bitmap = (u8)le32_to_cpu(desc.data[1]);  	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", -			 bitmap->bit0); +			 req.bit0);  	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", -			 bitmap->bit1); +			 req.bit1);  	return 0;  } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index bb9b026ae88e..93aa7f2bdc13 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)  	/* configure TM QCN hw errors */  	hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false); -	if (en) +	desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE); +	if (en) { +		desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);  		desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); +	}  	ret = hclge_cmd_send(&hdev->hw, &desc, 1);  	if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 07987fb8332e..d811eeefe2c0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -50,6 +50,8 @@  #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN	0x003F  #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK	0x003F  #define HCLGE_TM_SCH_ECC_ERR_INT_EN	0x3 +#define HCLGE_TM_QCN_ERR_INT_TYPE	0x29 +#define HCLGE_TM_QCN_FIFO_INT_EN	0xFFFF00  #define HCLGE_TM_QCN_MEM_ERR_INT_EN	0xFFFFFF  #define HCLGE_NCSI_ERR_INT_EN	0x3  #define HCLGE_NCSI_ERR_INT_TYPE	0x9 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f5b8d1fee0f1..d891390d492f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2847,33 +2847,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)  {  	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&  	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) -		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), -				    hclge_wq, &hdev->service_task, 0); +		mod_delayed_work(hclge_wq, &hdev->service_task, 0);  }  static void hclge_reset_task_schedule(struct hclge_dev *hdev)  {  	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && +	    test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&  	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) -		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), -				    hclge_wq, &hdev->service_task, 0); +		mod_delayed_work(hclge_wq, &hdev->service_task, 0);  }  static void hclge_errhand_task_schedule(struct hclge_dev *hdev)  {  	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&  	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) -		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), -				    hclge_wq, &hdev->service_task, 0); +		mod_delayed_work(hclge_wq, &hdev->service_task, 0);  }  void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)  {  	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&  	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) -		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), -				    hclge_wq, &hdev->service_task, -				    delay_time); +		mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);  }  static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) @@ -3491,33 +3487,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)  	hdev->num_msi_used += 1;  } -static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, -				      const cpumask_t *mask) -{ -	struct hclge_dev *hdev = container_of(notify, struct hclge_dev, -					      affinity_notify); - -	cpumask_copy(&hdev->affinity_mask, mask); -} - -static void hclge_irq_affinity_release(struct kref *ref) -{ -} -  static void hclge_misc_affinity_setup(struct hclge_dev *hdev)  {  	irq_set_affinity_hint(hdev->misc_vector.vector_irq,  			      &hdev->affinity_mask); - -	hdev->affinity_notify.notify = hclge_irq_affinity_notify; -	hdev->affinity_notify.release = hclge_irq_affinity_release; -	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, -				  &hdev->affinity_notify);  }  static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)  { -	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);  	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);  } @@ -13052,7 +13029,7 @@ static int hclge_init(void)  {  	pr_info("%s is initializing\n", HCLGE_NAME); -	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); +	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);  	if (!hclge_wq) {  		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);  		return -ENOMEM; @@ -13065,6 +13042,7 @@ static int hclge_init(void)  static void hclge_exit(void)  { +	hnae3_unregister_ae_algo_prepare(&ae_algo);  	hnae3_unregister_ae_algo(&ae_algo);  	destroy_workqueue(hclge_wq);  } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index de6afbcbfbac..69cd8f87b4c8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -944,7 +944,6 @@ struct hclge_dev {  	/* affinity mask and notify for misc interrupt */  	cpumask_t affinity_mask; -	struct irq_affinity_notify affinity_notify;  	struct hclge_ptp *ptp;  	struct devlink *devlink;  }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index f314dbd3ce11..95074e91a846 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)  		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;  		for (k = 0; k < hdev->tm_info.num_tc; k++)  			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; +		for (; k < HNAE3_MAX_TC; k++) +			hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;  	}  } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 5fdac8685f95..cf00ad7bb881 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)  void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)  {  	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && +	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&  	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,  			      &hdev->state))  		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); @@ -2273,9 +2274,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)  		hdev->reset_attempts = 0;  		hdev->last_reset_time = jiffies; -		while ((hdev->reset_type = -			hclgevf_get_reset_level(hdev, &hdev->reset_pending)) -		       != HNAE3_NONE_RESET) +		hdev->reset_type = +			hclgevf_get_reset_level(hdev, &hdev->reset_pending); +		if (hdev->reset_type != HNAE3_NONE_RESET)  			hclgevf_reset(hdev);  	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,  				      &hdev->reset_state)) { @@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)  	hclgevf_init_rxd_adv_layout(hdev); +	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); +  	hdev->last_reset_time = jiffies;  	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",  		 HCLGEVF_DRIVER_NAME); @@ -3899,7 +3902,7 @@ static int hclgevf_init(void)  {  	pr_info("%s is initializing\n", HCLGEVF_NAME); -	hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); +	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);  	if (!hclgevf_wq) {  		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);  		return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 883130a9b48f..28288d7e3303 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -146,6 +146,7 @@ enum hclgevf_states {  	HCLGEVF_STATE_REMOVING,  	HCLGEVF_STATE_NIC_REGISTERED,  	HCLGEVF_STATE_ROCE_REGISTERED, +	HCLGEVF_STATE_SERVICE_INITED,  	/* task states */  	HCLGEVF_STATE_RST_SERVICE_SCHED,  	HCLGEVF_STATE_RST_HANDLING, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 5b2143f4b1f8..3178efd98006 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -113,7 +113,8 @@ enum e1000_boards {  	board_pch2lan,  	board_pch_lpt,  	board_pch_spt, -	board_pch_cnp +	board_pch_cnp, +	board_pch_tgp  };  struct e1000_ps_page { @@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;  extern const struct e1000_info e1000_pch_lpt_info;  extern const struct e1000_info e1000_pch_spt_info;  extern const struct e1000_info e1000_pch_cnp_info; +extern const struct e1000_info e1000_pch_tgp_info;  extern const struct e1000_info e1000_es2_info;  void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 60c582a16821..5e4fc9b4e2ad 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)  static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)  {  	struct e1000_mac_info *mac = &hw->mac; -	u32 ctrl_ext, txdctl, snoop; +	u32 ctrl_ext, txdctl, snoop, fflt_dbg;  	s32 ret_val;  	u16 i; @@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)  		snoop = (u32)~(PCIE_NO_SNOOP_ALL);  	e1000e_set_pcie_no_snoop(hw, snoop); +	/* Enable workaround for packet loss issue on TGP PCH +	 * Do not gate DMA clock from the modPHY block +	 */ +	if (mac->type >= e1000_pch_tgp) { +		fflt_dbg = er32(FFLT_DBG); +		fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK; +		ew32(FFLT_DBG, fflt_dbg); +	} +  	ctrl_ext = er32(CTRL_EXT);  	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;  	ew32(CTRL_EXT, ctrl_ext); @@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {  	.phy_ops		= &ich8_phy_ops,  	.nvm_ops		= &spt_nvm_ops,  }; + +const struct e1000_info e1000_pch_tgp_info = { +	.mac			= e1000_pch_tgp, +	.flags			= FLAG_IS_ICH +				  | FLAG_HAS_WOL +				  | FLAG_HAS_HW_TIMESTAMP +				  | FLAG_HAS_CTRLEXT_ON_LOAD +				  | FLAG_HAS_AMT +				  | FLAG_HAS_FLASH +				  | FLAG_HAS_JUMBO_FRAMES +				  | FLAG_APME_IN_WUC, +	.flags2			= FLAG2_HAS_PHY_STATS +				  | FLAG2_HAS_EEE, +	.pba			= 26, +	.max_hw_frame_size	= 9022, +	.get_variants		= e1000_get_variants_ich8lan, +	.mac_ops		= &ich8_mac_ops, +	.phy_ops		= &ich8_phy_ops, +	.nvm_ops		= &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index d6a092e5ee74..2504b11c3169 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -289,6 +289,9 @@  /* Proprietary Latency Tolerance Reporting PCI Capability */  #define E1000_PCI_LTR_CAP_LPT		0xA8 +/* Don't gate wake DMA clock */ +#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK	0x1000 +  void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);  void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,  						  bool state); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 900b3ab998bd..ebcb2a30add0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {  	[board_pch_lpt]		= &e1000_pch_lpt_info,  	[board_pch_spt]		= &e1000_pch_spt_info,  	[board_pch_cnp]		= &e1000_pch_cnp_info, +	[board_pch_tgp]		= &e1000_pch_tgp_info,  };  struct e1000_reg_info { @@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },  	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */  }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2fb81e359cdf..df5ad4de1f00 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)  	case ICE_DEV_ID_E810C_BACKPLANE:  	case ICE_DEV_ID_E810C_QSFP:  	case ICE_DEV_ID_E810C_SFP: +	case ICE_DEV_ID_E810_XXV_BACKPLANE: +	case ICE_DEV_ID_E810_XXV_QSFP:  	case ICE_DEV_ID_E810_XXV_SFP:  		hw->mac_type = ICE_MAC_E810;  		break; diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 9d8194671f6a..ef4392e6e244 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -21,6 +21,10 @@  #define ICE_DEV_ID_E810C_QSFP		0x1592  /* Intel(R) Ethernet Controller E810-C for SFP */  #define ICE_DEV_ID_E810C_SFP		0x1593 +/* Intel(R) Ethernet Controller E810-XXV for backplane */ +#define ICE_DEV_ID_E810_XXV_BACKPLANE	0x1599 +/* Intel(R) Ethernet Controller E810-XXV for QSFP */ +#define ICE_DEV_ID_E810_XXV_QSFP	0x159A  /* Intel(R) Ethernet Controller E810-XXV for SFP */  #define ICE_DEV_ID_E810_XXV_SFP		0x159B  /* Intel(R) Ethernet Connection E823-C for backplane */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 14afce82ef63..da7288bdc9a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)  {  	struct ice_hw *hw = &pf->hw; -	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver); +	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, +		 hw->api_min_ver, hw->api_patch);  	return 0;  } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 06ac9badee77..1ac96dc66d0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,  	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)  		if (hw->tnl.tbl[i].valid &&  		    hw->tnl.tbl[i].type == type && -		    idx--) +		    idx-- == 0)  			return i;  	WARN_ON_ONCE(1); @@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,  	u16 index;  	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; -	index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type); +	index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);  	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));  	if (status) { diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 37c18c66b5c7..e375ac849aec 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)   */  static void ice_lag_info_event(struct ice_lag *lag, void *ptr)  { -	struct net_device *event_netdev, *netdev_tmp;  	struct netdev_notifier_bonding_info *info;  	struct netdev_bonding_info *bonding_info; +	struct net_device *event_netdev;  	const char *lag_netdev_name;  	event_netdev = netdev_notifier_info_to_dev(ptr); @@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)  		goto lag_out;  	} -	rcu_read_lock(); -	for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) { -		if (!netif_is_ice(netdev_tmp)) -			continue; - -		if (netdev_tmp && netdev_tmp != lag->netdev && -		    lag->peer_netdev != netdev_tmp) { -			dev_hold(netdev_tmp); -			lag->peer_netdev = netdev_tmp; -		} -	} -	rcu_read_unlock(); -  	if (bonding_info->slave.state)  		ice_lag_set_backup(lag);  	else @@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,  	case NETDEV_BONDING_INFO:  		ice_lag_info_event(lag, ptr);  		break; +	case NETDEV_UNREGISTER: +		ice_lag_unlink(lag, ptr); +		break;  	default:  		break;  	} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index dde9802c6c72..b718e196af2a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)   */  int ice_vsi_release(struct ice_vsi *vsi)  { +	enum ice_status err;  	struct ice_pf *pf;  	if (!vsi->back) @@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)  	ice_fltr_remove_all(vsi);  	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); +	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); +	if (err) +		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", +			vsi->vsi_num, err);  	ice_vsi_delete(vsi);  	ice_vsi_free_q_vectors(vsi); @@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)  	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);  	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); +	ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); +	if (ret) +		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", +			vsi->vsi_num, ret);  	ice_vsi_free_q_vectors(vsi);  	/* SR-IOV determines needed MSIX resources all at once instead of per diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0d6c143f6653..06fa93e597fb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)  	if (!pf)  		return -ENOMEM; +	/* initialize Auxiliary index to invalid value */ +	pf->aux_idx = -1; +  	/* set up for high or low DMA */  	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));  	if (err) @@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)  	ice_aq_cancel_waiting_tasks(pf);  	ice_unplug_aux_dev(pf); -	ida_free(&ice_aux_ida, pf->aux_idx); +	if (pf->aux_idx >= 0) +		ida_free(&ice_aux_ida, pf->aux_idx);  	set_bit(ICE_DOWN, pf->state);  	mutex_destroy(&(&pf->hw)->fdir_fltr_lock); @@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, +	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, +	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 80380aed8882..d1ef3d48a4b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1571,6 +1571,9 @@ err_kworker:   */  void ice_ptp_release(struct ice_pf *pf)  { +	if (!test_bit(ICE_FLAG_PTP, pf->flags)) +		return; +  	/* Disable timestamping for both Tx and Rx */  	ice_ptp_cfg_timestamp(pf, false); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 9f07b6641705..2d9b10277186 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)  }  /** + * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function clears the VSI and its RDMA children nodes from scheduler tree + * for all TCs. + */ +enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +{ +	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); +} + +/**   * ice_get_agg_info - get the aggregator ID   * @hw: pointer to the hardware structure   * @agg_id: aggregator ID diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 9beef8f0ec76..fdf7a5882f07 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -89,6 +89,7 @@ enum ice_status  ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,  		  u8 owner, bool enable);  enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);  /* Tx scheduler rate limiter functions */  enum ice_status diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 4461f8b9a864..4e0203336c6b 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -22,8 +22,8 @@  #define IGC_DEV_ID_I220_V			0x15F7  #define IGC_DEV_ID_I225_K			0x3100  #define IGC_DEV_ID_I225_K2			0x3101 +#define IGC_DEV_ID_I226_K			0x3102  #define IGC_DEV_ID_I225_LMVP			0x5502 -#define IGC_DEV_ID_I226_K			0x5504  #define IGC_DEV_ID_I225_IT			0x0D9F  #define IGC_DEV_ID_I226_LM			0x125B  #define IGC_DEV_ID_I226_V			0x125C diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 9338765da048..49d822a98ada 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -226,18 +226,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \  static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); +static void get_lf_str_list(struct rvu_block block, int pcifunc, +			    char *lfs) +{ +	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; + +	for_each_set_bit(lf, block.lf.bmap, block.lf.max) { +		if (lf >= block.lf.max) +			break; + +		if (block.fn_map[lf] != pcifunc) +			continue; + +		if (lf == prev_lf + 1) { +			prev_lf = lf; +			seq = 1; +			continue; +		} + +		if (seq) +			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); +		else +			len += (len ? sprintf(lfs + len, ",%d", lf) : +				      sprintf(lfs + len, "%d", lf)); + +		prev_lf = lf; +		seq = 0; +	} + +	if (seq) +		len += sprintf(lfs + len, "-%d", prev_lf); + +	lfs[len] = '\0'; +} + +static int get_max_column_width(struct rvu *rvu) +{ +	int index, pf, vf, lf_str_size = 12, buf_size = 256; +	struct rvu_block block; +	u16 pcifunc; +	char *buf; + +	buf = kzalloc(buf_size, GFP_KERNEL); +	if (!buf) +		return -ENOMEM; + +	for (pf = 0; pf < rvu->hw->total_pfs; pf++) { +		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { +			pcifunc = pf << 10 | vf; +			if (!pcifunc) +				continue; + +			for (index = 0; index < BLK_COUNT; index++) { +				block = rvu->hw->block[index]; +				if (!strlen(block.name)) +					continue; + +				get_lf_str_list(block, pcifunc, buf); +				if (lf_str_size <= strlen(buf)) +					lf_str_size = strlen(buf) + 1; +			} +		} +	} + +	kfree(buf); +	return lf_str_size; +} +  /* Dumps current provisioning status of all RVU block LFs */  static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,  					  char __user *buffer,  					  size_t count, loff_t *ppos)  { -	int index, off = 0, flag = 0, go_back = 0, len = 0; +	int index, off = 0, flag = 0, len = 0, i = 0;  	struct rvu *rvu = filp->private_data; -	int lf, pf, vf, pcifunc; +	int bytes_not_copied = 0;  	struct rvu_block block; -	int bytes_not_copied; -	int lf_str_size = 12; +	int pf, vf, pcifunc;  	int buf_size = 2048; +	int lf_str_size;  	char *lfs;  	char *buf; @@ -249,6 +316,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,  	if (!buf)  		return -ENOSPC; +	/* Get the maximum width of a column */ +	lf_str_size = get_max_column_width(rvu); +  	lfs = kzalloc(lf_str_size, GFP_KERNEL);  	if (!lfs) {  		kfree(buf); @@ -262,65 +332,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,  					 "%-*s", lf_str_size,  					 rvu->hw->block[index].name);  		} +  	off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); +	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); +	if (bytes_not_copied) +		goto out; + +	i++; +	*ppos += off;  	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {  		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { +			off = 0; +			flag = 0;  			pcifunc = pf << 10 | vf;  			if (!pcifunc)  				continue;  			if (vf) {  				sprintf(lfs, "PF%d:VF%d", pf, vf - 1); -				go_back = scnprintf(&buf[off], -						    buf_size - 1 - off, -						    "%-*s", lf_str_size, lfs); +				off = scnprintf(&buf[off], +						buf_size - 1 - off, +						"%-*s", lf_str_size, lfs);  			} else {  				sprintf(lfs, "PF%d", pf); -				go_back = scnprintf(&buf[off], -						    buf_size - 1 - off, -						    "%-*s", lf_str_size, lfs); +				off = scnprintf(&buf[off], +						buf_size - 1 - off, +						"%-*s", lf_str_size, lfs);  			} -			off += go_back; -			for (index = 0; index < BLKTYPE_MAX; index++) { +			for (index = 0; index < BLK_COUNT; index++) {  				block = rvu->hw->block[index];  				if (!strlen(block.name))  					continue;  				len = 0;  				lfs[len] = '\0'; -				for (lf = 0; lf < block.lf.max; lf++) { -					if (block.fn_map[lf] != pcifunc) -						continue; +				get_lf_str_list(block, pcifunc, lfs); +				if (strlen(lfs))  					flag = 1; -					len += sprintf(&lfs[len], "%d,", lf); -				} -				if (flag) -					len--; -				lfs[len] = '\0';  				off += scnprintf(&buf[off], buf_size - 1 - off,  						 "%-*s", lf_str_size, lfs); -				if (!strlen(lfs)) -					go_back += lf_str_size;  			} -			if (!flag) -				off -= go_back; -			else -				flag = 0; -			off--; -			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n"); +			if (flag) { +				off +=	scnprintf(&buf[off], +						  buf_size - 1 - off, "\n"); +				bytes_not_copied = copy_to_user(buffer + +								(i * off), +								buf, off); +				if (bytes_not_copied) +					goto out; + +				i++; +				*ppos += off; +			}  		}  	} -	bytes_not_copied = copy_to_user(buffer, buf, off); +out:  	kfree(lfs);  	kfree(buf); -  	if (bytes_not_copied)  		return -EFAULT; -	*ppos = off; -	return off; +	return *ppos;  }  RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); @@ -504,7 +578,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,  	if (cmd_buf)  		ret = -EINVAL; -	if (!strncmp(subtoken, "help", 4) || ret < 0) { +	if (ret < 0 || !strncmp(subtoken, "help", 4)) {  		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);  		goto qsize_write_done;  	} @@ -1719,6 +1793,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)  	u16 pcifunc;  	char *str; +	/* Ingress policers do not exist on all platforms */ +	if (!nix_hw->ipolicer) +		return 0; +  	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {  		if (layer == BAND_PROF_INVAL_LAYER)  			continue; @@ -1768,6 +1846,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)  	int layer;  	char *str; +	/* Ingress policers do not exist on all platforms */ +	if (!nix_hw->ipolicer) +		return 0; +  	seq_puts(m, "\nBandwidth profile resource free count\n");  	seq_puts(m, "=====================================\n");  	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 9ef4e942e31e..6970540dc470 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2507,6 +2507,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)  		return;  	nix_hw = get_nix_hw(rvu->hw, blkaddr); +	if (!nix_hw) +		return; +  	vlan = &nix_hw->txvlan;  	mutex_lock(&vlan->rsrc_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 41684a6c44e9..a88a1a48229f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);  int mlx5e_create_flow_steering(struct mlx5e_priv *priv);  void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); +int mlx5e_fs_init(struct mlx5e_priv *priv); +void mlx5e_fs_cleanup(struct mlx5e_priv *priv); +  int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);  void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);  int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index b4e986818794..4a13ef561587 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -10,6 +10,8 @@  #include "en_tc.h"  #include "rep/tc.h"  #include "rep/neigh.h" +#include "lag.h" +#include "lag_mp.h"  struct mlx5e_tc_tun_route_attr {  	struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 33de8f0092a6..fb5397324aa4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,  	 * Pkt: MAC  IP     ESP  IP    L4  	 *  	 * Transport Mode: -	 * SWP:      OutL3       InL4 -	 *           InL3 +	 * SWP:      OutL3       OutL4  	 * Pkt: MAC  IP     ESP  L4  	 *  	 * Tunnel(VXLAN TCP/UDP) over Transport Mode @@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,  		return;  	if (!xo->inner_ipproto) { -		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; -		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; -		if (skb->protocol == htons(ETH_P_IPV6)) -			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; -		if (xo->proto == IPPROTO_UDP) +		switch (xo->proto) { +		case IPPROTO_UDP: +			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; +			fallthrough; +		case IPPROTO_TCP: +			/* IP | ESP | TCP */ +			eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2; +			break; +		default: +			break; +		} +	} else { +		/* Tunnel(VXLAN TCP/UDP) over Transport Mode */ +		switch (xo->inner_ipproto) { +		case IPPROTO_UDP:  			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; -		return; -	} - -	/* Tunnel(VXLAN TCP/UDP) over Transport Mode */ -	switch (xo->inner_ipproto) { -	case IPPROTO_UDP: -		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; -		fallthrough; -	case IPPROTO_TCP: -		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; -		eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2; -		if (skb->protocol == htons(ETH_P_IPV6)) -			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; -		break; -	default: -		break; +			fallthrough; +		case IPPROTO_TCP: +			eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; +			eseg->swp_inner_l4_offset = +				(skb->csum_start + skb->head - skb->data) / 2; +			if (skb->protocol == htons(ETH_P_IPV6)) +				eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; +			break; +		default: +			break; +		}  	} -	return;  }  void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index c06b4b938ae7..d226cc5ab1d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)  	struct mlx5e_flow_table *ft;  	int err; -	priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL); -	if (!priv->fs.vlan) -		return -ENOMEM; -  	ft = &priv->fs.vlan->ft;  	ft->num_groups = 0; @@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)  	ft_attr.prio = MLX5E_NIC_PRIO;  	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); -	if (IS_ERR(ft->t)) { -		err = PTR_ERR(ft->t); -		goto err_free_t; -	} +	if (IS_ERR(ft->t)) +		return PTR_ERR(ft->t);  	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);  	if (!ft->g) { @@ -1221,9 +1215,6 @@ err_free_g:  	kfree(ft->g);  err_destroy_vlan_table:  	mlx5_destroy_flow_table(ft->t); -err_free_t: -	kvfree(priv->fs.vlan); -	priv->fs.vlan = NULL;  	return err;  } @@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)  {  	mlx5e_del_vlan_rules(priv);  	mlx5e_destroy_flow_table(&priv->fs.vlan->ft); -	kvfree(priv->fs.vlan);  }  static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) @@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)  	mlx5e_arfs_destroy_tables(priv);  	mlx5e_ethtool_cleanup_steering(priv);  } + +int mlx5e_fs_init(struct mlx5e_priv *priv) +{ +	priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL); +	if (!priv->fs.vlan) +		return -ENOMEM; +	return 0; +} + +void mlx5e_fs_cleanup(struct mlx5e_priv *priv) +{ +	kvfree(priv->fs.vlan); +	priv->fs.vlan = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 09c8b71b186c..41ef6eb70a58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,  	mlx5e_timestamp_init(priv); +	err = mlx5e_fs_init(priv); +	if (err) { +		mlx5_core_err(mdev, "FS initialization failed, %d\n", err); +		return err; +	} +  	err = mlx5e_ipsec_init(priv);  	if (err)  		mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); @@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)  	mlx5e_health_destroy_reporters(priv);  	mlx5e_tls_cleanup(priv);  	mlx5e_ipsec_cleanup(priv); +	mlx5e_fs_cleanup(priv);  }  static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ba8164792016..129ff7e0d65c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -67,6 +67,8 @@  #include "lib/fs_chains.h"  #include "diag/en_tc_tracepoint.h"  #include <asm/div64.h> +#include "lag.h" +#include "lag_mp.h"  #define nic_chains(priv) ((priv)->fs.tc.chains)  #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index c63d78eda606..188994d091c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)  	memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);  } -/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet), - * need to set L3 checksum flag for IPsec - */  static void  ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,  			    struct mlx5_wqe_eth_seg *eseg)  { +	struct xfrm_offload *xo = xfrm_offload(skb); +  	eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; -	if (skb->encapsulation) { -		eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; +	if (xo->inner_ipproto) { +		eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; +	} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { +		eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;  		sq->stats->csum_partial_inner++; -	} else { -		sq->stats->csum_partial++;  	}  } @@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,  			    struct mlx5e_accel_tx_state *accel,  			    struct mlx5_wqe_eth_seg *eseg)  { +	if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { +		ipsec_txwqe_build_eseg_csum(sq, skb, eseg); +		return; +	} +  	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {  		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;  		if (skb->encapsulation) { @@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,  		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;  		sq->stats->csum_partial++;  #endif -	} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { -		ipsec_txwqe_build_eseg_csum(sq, skb, eseg);  	} else  		sq->stats->csum_none++;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 985e305179d1..c6cc67cb4f6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta  err_min_rate:  	list_del(&group->list); -	err = mlx5_destroy_scheduling_element_cmd(esw->dev, -						  SCHEDULING_HIERARCHY_E_SWITCH, -						  group->tsar_ix); -	if (err) +	if (mlx5_destroy_scheduling_element_cmd(esw->dev, +						SCHEDULING_HIERARCHY_E_SWITCH, +						group->tsar_ix))  		NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");  err_sched_elem:  	kfree(group); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index ca5690b0a7ab..d2105c1635c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)  	if (!mlx5_lag_is_ready(ldev)) {  		do_bond = false;  	} else { +		/* VF LAG is in multipath mode, ignore bond change requests */ +		if (mlx5_lag_is_multipath(dev0)) +			return; +  		tracker = ldev->tracker;  		do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index f239b352a58a..21fdaf708f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -9,20 +9,23 @@  #include "eswitch.h"  #include "lib/mlx5.h" +static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) +{ +	return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH); +} +  static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)  {  	if (!mlx5_lag_is_ready(ldev))  		return false; +	if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev)) +		return false; +  	return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,  					 ldev->pf[MLX5_LAG_P2].dev);  } -static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) -{ -	return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH); -} -  bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)  {  	struct mlx5_lag *ldev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h index 729c839397a8..dea199e79bed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h @@ -24,12 +24,14 @@ struct lag_mp {  void mlx5_lag_mp_reset(struct mlx5_lag *ldev);  int mlx5_lag_mp_init(struct mlx5_lag *ldev);  void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev); +bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);  #else /* CONFIG_MLX5_ESWITCH */  static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};  static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }  static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} +bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }  #endif /* CONFIG_MLX5_ESWITCH */  #endif /* __MLX5_LAG_MP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 13b0259f7ea6..fcace73eae40 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,  	struct sk_buff *skb;  	int err; -	elem_info->u.rdq.skb = NULL;  	skb = netdev_alloc_skb_ip_align(NULL, buf_len);  	if (!skb)  		return -ENOMEM; -	/* Assume that wqe was previously zeroed. */ -  	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,  				     buf_len, DMA_FROM_DEVICE);  	if (err) @@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,  	struct pci_dev *pdev = mlxsw_pci->pdev;  	struct mlxsw_pci_queue_elem_info *elem_info;  	struct mlxsw_rx_info rx_info = {}; -	char *wqe; +	char wqe[MLXSW_PCI_WQE_SIZE];  	struct sk_buff *skb;  	u16 byte_count;  	int err;  	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); -	skb = elem_info->u.sdq.skb; -	if (!skb) -		return; -	wqe = elem_info->elem; -	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); +	skb = elem_info->u.rdq.skb; +	memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);  	if (q->consumer_counter++ != consumer_counter_limit)  		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); +	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); +	if (err) { +		dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); +		goto out; +	} + +	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); +  	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {  		rx_info.is_lag = true;  		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe); @@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,  	skb_put(skb, byte_count);  	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); -	memset(wqe, 0, q->elem_size); -	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); -	if (err) -		dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); +out:  	/* Everything is set up, ring doorbell to pass elem to HW */  	q->producer_counter++;  	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 9e8561cdc32a..4d5a5d6595b3 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)  		ret = -EINVAL;  		goto cleanup;  	} +	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, +				      DMA_BIT_MASK(64))) { +		if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, +					      DMA_BIT_MASK(32))) { +			dev_warn(&tx->adapter->pdev->dev, +				 "lan743x_: No suitable DMA available\n"); +			ret = -ENOMEM; +			goto cleanup; +		} +	}  	ring_allocation_size = ALIGN(tx->ring_size *  				     sizeof(struct lan743x_tx_descriptor),  				     PAGE_SIZE); @@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)  				  index);  } -static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index) +static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, +					gfp_t gfp)  {  	struct net_device *netdev = rx->adapter->netdev;  	struct device *dev = &rx->adapter->pdev->dev; @@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)  	descriptor = &rx->ring_cpu_ptr[index];  	buffer_info = &rx->buffer_info[index]; -	skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA); +	skb = __netdev_alloc_skb(netdev, buffer_length, gfp);  	if (!skb)  		return -ENOMEM;  	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); @@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)  	/* save existing skb, allocate new skb and map to dma */  	skb = buffer_info->skb; -	if (lan743x_rx_init_ring_element(rx, rx->last_head)) { +	if (lan743x_rx_init_ring_element(rx, rx->last_head, +					 GFP_ATOMIC | GFP_DMA)) {  		/* failed to allocate next skb.  		 * Memory is very low.  		 * Drop this packet and reuse buffer. @@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)  		ret = -EINVAL;  		goto cleanup;  	} +	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, +				      DMA_BIT_MASK(64))) { +		if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, +					      DMA_BIT_MASK(32))) { +			dev_warn(&rx->adapter->pdev->dev, +				 "lan743x_: No suitable DMA available\n"); +			ret = -ENOMEM; +			goto cleanup; +		} +	}  	ring_allocation_size = ALIGN(rx->ring_size *  				     sizeof(struct lan743x_rx_descriptor),  				     PAGE_SIZE); @@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)  	rx->last_head = 0;  	for (index = 0; index < rx->ring_size; index++) { -		ret = lan743x_rx_init_ring_element(rx, index); +		ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);  		if (ret)  			goto cleanup;  	}  	return 0;  cleanup: +	netif_warn(rx->adapter, ifup, rx->adapter->netdev, +		   "Error allocating memory for LAN743x\n"); +  	lan743x_rx_ring_cleanup(rx);  	return ret;  } @@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)  	if (ret) {  		netif_err(adapter, probe, adapter->netdev,  			  "lan743x_hardware_init returned %d\n", ret); +		lan743x_pci_cleanup(adapter); +		return ret;  	}  	/* open netdev when netdev is at running state while resume. diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index cbece6e9bff2..5030dfca3879 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)  			err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),  					    "port %u: missing serdes\n",  					    portno); +			of_node_put(portnp);  			goto cleanup_config;  		}  		config->portno = portno; diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 291ae6817c26..d51f799e4e86 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,  		target = ocelot_regmap_init(ocelot, res);  		if (IS_ERR(target)) {  			err = PTR_ERR(target); +			of_node_put(portnp);  			goto out_teardown;  		} diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 11c83a99b014..f469950c7265 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -182,15 +182,21 @@ static int  nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)  {  	struct nfp_net *nn = netdev_priv(netdev); -	unsigned int max_mtu; +	struct nfp_bpf_vnic *bv; +	struct bpf_prog *prog;  	if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)  		return 0; -	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; -	if (new_mtu > max_mtu) { -		nn_info(nn, "BPF offload active, MTU over %u not supported\n", -			max_mtu); +	if (nn->xdp_hw.prog) { +		prog = nn->xdp_hw.prog; +	} else { +		bv = nn->app_priv; +		prog = bv->tc_prog; +	} + +	if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) { +		nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");  		return -EBUSY;  	}  	return 0; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index d0e17eebddd9..16841bb750b7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);  void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);  int nfp_bpf_jit(struct nfp_prog *prog);  bool nfp_bpf_supported_opcode(u8 code); +bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, +			       unsigned int mtu);  int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,  		    int prev_insn_idx); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 53851853562c..9d97cd281f18 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,  	return 0;  } +bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, +			       unsigned int mtu) +{ +	unsigned int fw_mtu, pkt_off; + +	fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; +	pkt_off = min(prog->aux->max_pkt_offset, mtu); + +	return fw_mtu < pkt_off; +} +  static int  nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,  		 struct netlink_ext_ack *extack)  {  	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; -	unsigned int fw_mtu, pkt_off, max_stack, max_prog_len; +	unsigned int max_stack, max_prog_len;  	dma_addr_t dma_addr;  	void *img;  	int err; -	fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; -	pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu); -	if (fw_mtu < pkt_off) { +	if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {  		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");  		return -EOPNOTSUPP;  	} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 2643ea5948f4..154399c5453f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,  	}  	reg->dst_lmextn = swreg_lmextn(dst); -	reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); +	reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);  	return 0;  } @@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,  	}  	reg->dst_lmextn = swreg_lmextn(dst); -	reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); +	reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);  	return 0;  } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index d29fe562b3de..c910fa2f40a4 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)  	napi_disable(&pldat->napi);  	netif_stop_queue(ndev); -	if (ndev->phydev) -		phy_stop(ndev->phydev); -  	spin_lock_irqsave(&pldat->lock, flags);  	__lpc_eth_reset(pldat);  	netif_carrier_off(ndev); @@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)  	writel(0, LPC_ENET_MAC2(pldat->net_base));  	spin_unlock_irqrestore(&pldat->lock, flags); +	if (ndev->phydev) +		phy_stop(ndev->phydev);  	clk_disable_unprepare(pldat->clk);  	return 0; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 46a6ff9a782d..2918947dd57c 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -157,6 +157,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {  	{ PCI_VDEVICE(REALTEK,	0x8129) },  	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_NO_GBIT },  	{ PCI_VDEVICE(REALTEK,	0x8161) }, +	{ PCI_VDEVICE(REALTEK,	0x8162) },  	{ PCI_VDEVICE(REALTEK,	0x8167) },  	{ PCI_VDEVICE(REALTEK,	0x8168) },  	{ PCI_VDEVICE(NCUBE,	0x8168) }, diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 4bd3ef8f3384..c4fe3c48ac46 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)  	case MC_CMD_MEDIA_SFP_PLUS:  	case MC_CMD_MEDIA_QSFP_PLUS:  		SET_BIT(FIBRE); -		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) +		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {  			SET_BIT(1000baseT_Full); -		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) -			SET_BIT(10000baseT_Full); -		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) +			SET_BIT(1000baseX_Full); +		} +		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) { +			SET_BIT(10000baseCR_Full); +			SET_BIT(10000baseLR_Full); +			SET_BIT(10000baseSR_Full); +		} +		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {  			SET_BIT(40000baseCR4_Full); -		if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) +			SET_BIT(40000baseSR4_Full); +		} +		if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {  			SET_BIT(100000baseCR4_Full); -		if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) +			SET_BIT(100000baseSR4_Full); +		} +		if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {  			SET_BIT(25000baseCR_Full); +			SET_BIT(25000baseSR_Full); +		}  		if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))  			SET_BIT(50000baseCR2_Full);  		break; @@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)  		result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);  	if (TEST_BIT(1000baseT_Half))  		result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); -	if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full)) +	if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) || +			TEST_BIT(1000baseX_Full))  		result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); -	if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full)) +	if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) || +			TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) || +			TEST_BIT(10000baseSR_Full))  		result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); -	if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full)) +	if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) || +			TEST_BIT(40000baseSR4_Full))  		result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); -	if (TEST_BIT(100000baseCR4_Full)) +	if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))  		result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); -	if (TEST_BIT(25000baseCR_Full)) +	if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))  		result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);  	if (TEST_BIT(50000baseCR2_Full))  		result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index a39c5143b386..797e51802ccb 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)  	} else if (rc == -EINVAL) {  		fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;  	} else if (rc == -EPERM) { -		netif_info(efx, probe, efx->net_dev, "no PTP support\n"); +		pci_info(efx->pci_dev, "no PTP support\n");  		return rc;  	} else {  		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), @@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)  	 * should only have been called during probe.  	 */  	if (rc == -ENOSYS || rc == -EPERM) -		netif_info(efx, probe, efx->net_dev, "no PTP support\n"); +		pci_info(efx->pci_dev, "no PTP support\n");  	else if (rc)  		efx_mcdi_display_error(efx, MC_CMD_PTP,  				       MC_CMD_PTP_IN_DISABLE_LEN, diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 83dcfcae3d4b..441e7f3e5375 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)  		return;  	if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) { -		netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n"); +		pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");  		return;  	}  	if (count > 0 && count > max_vfs) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eb3b7bf771d7..3d67d1fa3690 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)  			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;  			ptp_v2 = PTP_TCR_TSVER2ENA;  			snap_type_sel = PTP_TCR_SNAPTYPSEL_1; -			if (priv->synopsys_id != DWMAC_CORE_5_10) +			if (priv->synopsys_id < DWMAC_CORE_4_10)  				ts_event_en = PTP_TCR_TSEVNTENA;  			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;  			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |