diff options
Diffstat (limited to 'drivers/net/ethernet')
176 files changed, 2664 insertions, 1555 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 3d6f0a466a9e..f9f886289b97 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, * compare it to the stored version, just create the meta */ if (io_sq->disable_meta_caching) { - if (unlikely(!ena_tx_ctx->meta_valid)) - return -EINVAL; - *have_meta = true; return ena_com_create_meta(io_sq, ena_meta); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index b5bca4814830..c44c44e26ddf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info); static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); +static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count); /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ static void ena_increase_stat(u64 *statp, u64 cnt, @@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter) static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) { + u32 xdp_first_ring = adapter->xdp_first_ring; + u32 xdp_num_queues = adapter->xdp_num_queues; int rc = 0; - rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto setup_err; - rc = ena_create_io_tx_queues_in_range(adapter, - adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto create_err; return 0; create_err: - ena_free_all_io_tx_resources(adapter); + ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); setup_err: return rc; } @@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, if (unlikely(!skb)) return NULL; - /* sync this buffer for CPU use */ - dma_sync_single_for_cpu(rx_ring->dev, - dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, - len, - DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, buf_addr + buf_offset, len); dma_sync_single_for_device(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, @@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); - pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); - /* If XDP isn't loaded try to reuse part of the RX buffer */ reuse_rx_buf_page = !is_xdp_loaded && ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); - dma_sync_single_for_cpu(rx_ring->dev, - pre_reuse_paddr + pkt_offset, - len, - DMA_FROM_DEVICE); - if (!reuse_rx_buf_page) ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); @@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring, } } -static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) +static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) { struct ena_rx_buffer *rx_info; int ret; + /* XDP multi-buffer packets not supported */ + if (unlikely(num_descs > 1)) { + netdev_err_once(rx_ring->adapter->netdev, + "xdp: dropped unsupported multi-buffer packets\n"); + ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); + return ENA_XDP_DROP; + } + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; xdp_prepare_buff(xdp, page_address(rx_info->page), rx_info->buf_offset, rx_ring->ena_bufs[0].len, false); - /* If for some reason we received a bigger packet than - * we expect, then we simply drop it - */ - if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) - return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); @@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, int xdp_flags = 0; int total_len = 0; int xdp_verdict; + u8 pkt_offset; int rc = 0; int i; @@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, /* First descriptor might have an offset set by the device */ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; - rx_info->buf_offset += ena_rx_ctx.pkt_offset; + pkt_offset = ena_rx_ctx.pkt_offset; + rx_info->buf_offset += pkt_offset; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); + dma_sync_single_for_cpu(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, + rx_ring->ena_bufs[0].len, + DMA_FROM_DEVICE); + if (ena_xdp_present_ring(rx_ring)) - xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); + xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs); /* allocate skb and fill it */ if (xdp_verdict == ENA_XDP_PASS) @@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, if (xdp_verdict & ENA_XDP_FORWARDED) { ena_unmap_rx_buff_attrs(rx_ring, &rx_ring->rx_buffer_info[req_id], - 0); + DMA_ATTR_SKIP_CPU_SYNC); rx_ring->rx_buffer_info[req_id].page = NULL; } } diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 045fe133f6ee..5beadabc2136 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data) } queue_work(pdsc->wq, &qcq->work); - pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR); + pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index f3a7deda9972..e35d3e7006bf 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -15,7 +15,7 @@ #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver" #define PDSC_WATCHDOG_SECS 5 -#define PDSC_QUEUE_NAME_MAX_SZ 32 +#define PDSC_QUEUE_NAME_MAX_SZ 16 #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */ #define PDSC_TEARDOWN_RECOVERY false diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index 7c1b965d61a9..31940b857e0e 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -261,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc) struct pds_core_drv_identity drv = {}; size_t sz; int err; + int n; drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX); - snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str), - "%s %s", PDS_CORE_DRV_NAME, utsname()->release); + /* Catching the return quiets a Wformat-truncation complaint */ + n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str), + "%s %s", PDS_CORE_DRV_NAME, utsname()->release); + if (n > sizeof(drv.driver_ver_str)) + dev_dbg(pdsc->dev, "release name truncated, don't care\n"); /* Next let's get some info about the device * We use the devcmd_lock at this level in order to diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index 57f88c8b37de..e9948ea5bbcd 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct pds_core_fw_list_info fw_list; struct pdsc *pdsc = devlink_priv(dl); union pds_core_dev_comp comp; - char buf[16]; + char buf[32]; int listlen; int err; int i; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 614c0278419b..6b73648b3779 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work) static void xgbe_service_timer(struct timer_list *t) { struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); + struct xgbe_channel *channel; + unsigned int i; queue_work(pdata->dev_workqueue, &pdata->service_work); mod_timer(&pdata->service_timer, jiffies + HZ); + + if (!pdata->tx_usecs) + return; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel[i]; + if (!channel->tx_ring || channel->tx_timer_active) + break; + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } } static void xgbe_init_timers(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6e83ff59172a..32fab5e77246 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = pdata->phy.address; - cmd->base.autoneg = pdata->phy.autoneg; - cmd->base.speed = pdata->phy.speed; - cmd->base.duplex = pdata->phy.duplex; + if (netif_carrier_ok(netdev)) { + cmd->base.speed = pdata->phy.speed; + cmd->base.duplex = pdata->phy.duplex; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.autoneg = pdata->phy.autoneg; cmd->base.port = PORT_NONE; XGBE_LM_COPY(cmd, supported, lks, supported); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 32d2c6fac652..4a2dc705b528 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) if (pdata->phy.duplex != DUPLEX_FULL) return -EINVAL; - xgbe_set_mode(pdata, mode); + /* Force the mode change for SFI in Fixed PHY config. + * Fixed PHY configs needs PLL to be enabled while doing mode set. + * When the SFP module isn't connected during boot, driver assumes + * AN is ON and attempts autonegotiation. However, if the connected + * SFP comes up in Fixed PHY config, the link will not come up as + * PLL isn't enabled while the initial mode set command is issued. + * So, force the mode change for SFI in Fixed PHY configuration to + * fix link issues. + */ + if (mode == XGBE_MODE_SFI) + xgbe_change_mode(pdata, mode); + else + xgbe_set_mode(pdata, mode); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 80b44043e6c5..28c9b6f1a54f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @adapter: pointer to adapter struct - * @skb: particular skb to send timestamp with + * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the hwtstamps structure which * is passed up the network stack */ -static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb, +static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps, u64 timestamp) { timestamp -= atomic_read(&aq_ptp->offset_ingress); - aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp); + aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp); } void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, @@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring) &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring; } -u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, +u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len) { struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; @@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, p, len, ×tamp); if (ret > 0) - aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp); + aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp); return ret; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h index 28ccb7ca2df9..210b723f2207 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h @@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, /* Return either ring is belong to PTP or not*/ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring); -u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, +u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len); struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp); @@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring) } static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, - struct sk_buff *skb, u8 *p, + struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len) { return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4de22eed099a..e1885c1eb100 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, } if (is_ptp_ring) buff->len -= - aq_ptp_extract_ts(self->aq_nic, skb, + aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb), aq_buf_vaddr(&buff->rxdata), buff->len); @@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); struct aq_ring_buff_s *buff_ = NULL; + u16 ptp_hwtstamp_len = 0; + struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb = NULL; unsigned int next_ = 0U; struct xdp_buff xdp; @@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, hard_start = page_address(buff->rxdata.page) + buff->rxdata.pg_off - rx_ring->page_offset; - if (is_ptp_ring) - buff->len -= - aq_ptp_extract_ts(rx_ring->aq_nic, skb, - aq_buf_vaddr(&buff->rxdata), - buff->len); + if (is_ptp_ring) { + ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps, + aq_buf_vaddr(&buff->rxdata), + buff->len); + buff->len -= ptp_hwtstamp_len; + } xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, @@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, if (IS_ERR(skb) || !skb) continue; + if (ptp_hwtstamp_len > 0) + *skb_hwtstamps(skb) = shhwtstamps; + if (buff->is_vlan) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), buff->vlan_rx_tag); @@ -932,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self) return; kfree(self->buff_ring); + self->buff_ring = NULL; - if (self->dx_ring) + if (self->dx_ring) { dma_free_coherent(aq_nic_get_dev(self->aq_nic), self->size * self->dx_size, self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } } unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 5935be190b9e..5f2a6fcba967 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; - goto failed; + goto free_buffer; } return 0; +free_buffer: + kfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; failed: if (adapter->ring_vir_addr != NULL) { dma_free_coherent(&pdev->dev, adapter->ring_size, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d0359b569afe..e1f1e646cf48 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1748,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb) { + skb_mark_for_recycle(skb); + if (skb->dev != bp->dev) { /* this packet belongs to a vf-rep */ bnxt_vf_rep_rx(bp, skb); return; } skb_record_rx_queue(skb, bnapi->index); - skb_mark_for_recycle(skb); napi_gro_receive(&bnapi->napi, skb); } +static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, + struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) +{ + u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); + + if (BNXT_PTP_RX_TS_VALID(flags)) + goto ts_valid; + if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) + return false; + +ts_valid: + *cmpl_ts = ts; + return true; +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1783,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct sk_buff *skb; struct xdp_buff xdp; u32 flags, misc; + u32 cmpl_ts; void *data; int rc = 0; @@ -2005,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == - RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { + if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); u64 ns, ts; if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { @@ -10731,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_mem(bp, irq_re_init); } -int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { - int rc = 0; - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { /* If we get here, it means firmware reset is in progress * while we are trying to close. We can safely proceed with @@ -10749,15 +10762,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) #ifdef CONFIG_BNXT_SRIOV if (bp->sriov_cfg) { + int rc; + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, !bp->sriov_cfg, BNXT_SRIOV_CFG_WAIT_TMO); - if (rc) - netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + if (!rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); + else if (rc < 0) + netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); } #endif __bnxt_close_nic(bp, irq_re_init, link_re_init); - return rc; } static int bnxt_close(struct net_device *dev) @@ -12077,6 +12093,8 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!\n"); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp, 0); bnxt_hwrm_port_qstats_ext(bp, 0); @@ -13077,8 +13095,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } } - if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!\n"); } #else @@ -13940,6 +13956,8 @@ static int bnxt_resume(struct device *device) if (rc) goto resume_exit; + bnxt_clear_reservations(bp, true); + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { rc = -ENODEV; goto resume_exit; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index e702dbc3e6b1..a7d7b09ea162 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -161,7 +161,7 @@ struct rx_cmp { #define RX_CMP_FLAGS_ERROR (1 << 6) #define RX_CMP_FLAGS_PLACEMENT (7 << 7) #define RX_CMP_FLAGS_RSS_VALID (1 << 10) - #define RX_CMP_FLAGS_UNUSED (1 << 11) + #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11) #define RX_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_CMP_FLAGS_ITYPES_MASK 0xf000 #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) @@ -188,6 +188,12 @@ struct rx_cmp { __le32 rx_cmp_rss_hash; }; +#define BNXT_PTP_RX_TS_VALID(flags) \ + (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS) + +#define BNXT_ALL_RX_TS_VALID(flags) \ + !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT) + #define RX_CMP_HASH_VALID(rxcmp) \ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) @@ -2375,7 +2381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); void bnxt_reenable_sriov(struct bnxt *bp); -int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_close_nic(struct bnxt *, bool, bool); void bnxt_get_ring_err_stats(struct bnxt *bp, struct bnxt_total_ring_err_stats *stats); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f302dac56599..89809f1b129c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -449,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, return -ENODEV; } bnxt_ulp_stop(bp); - if (netif_running(bp->dev)) { - rc = bnxt_close_nic(bp, true, true); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Failed to close"); - dev_close(bp->dev); - rtnl_unlock(); - break; - } - } + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, true); bnxt_vf_reps_free(bp); rc = bnxt_hwrm_func_drv_unrgtr(bp); if (rc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f3f384773ac0..5f67a7f94e7d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -165,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev, reset_coalesce: if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { - rc = bnxt_close_nic(bp, true, false); - if (!rc) - rc = bnxt_open_nic(bp, true, false); + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); } else { rc = bnxt_hwrm_set_coal(bp); } @@ -972,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev, * before PF unload */ } - rc = bnxt_close_nic(bp, true, false); - if (rc) { - netdev_err(bp->dev, "Set channel failure rc :%x\n", - rc); - return rc; - } + bnxt_close_nic(bp, true, false); } if (sh) { @@ -4042,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_run_fw_tests(bp, test_mask, &test_results); } else { bnxt_ulp_stop(bp); - rc = bnxt_close_nic(bp, true, false); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - bnxt_ulp_start(bp, rc); - return; - } + bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f3886710e778..6e3da3362bd6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (netif_running(bp->dev)) { if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) { - rc = bnxt_close_nic(bp, false, false); - if (!rc) - rc = bnxt_open_nic(bp, false, false); + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); } else { bnxt_ptp_cfg_tstamp_filters(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 38d89d80b4a9..273c9ba48f09 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -2075,6 +2075,7 @@ destroy_flow_table: rhashtable_destroy(&tc_info->flow_table); free_tc_info: kfree(tc_info); + bp->tc_info = NULL; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 96f5ca778c67..8cb9a99154aa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, for (i = 0; i < num_frags ; i++) { skb_frag_t *frag = &sinfo->frags[i]; struct bnxt_sw_tx_bd *frag_tx_buf; - struct pci_dev *pdev = bp->pdev; dma_addr_t frag_mapping; int frag_len; @@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; frag_len = skb_frag_size(frag); - frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, - frag_len, DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) - return NULL; - - dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); - flags = frag_len << TX_BD_LEN_SHIFT; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) + + skb_frag_off(frag); txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); len = frag_len; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9282403d1bf6..2d7ae71287b1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) /* Note: if we ever change from DMA_TX_APPEND_CRC below we * will need to restore software padding of "runt" packets */ + len_stat |= DMA_TX_APPEND_CRC; + if (!i) { - len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + len_stat |= DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) len_stat |= DMA_TX_DO_CSUM; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1dee27349367..f52830dfb26a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6474,6 +6474,14 @@ static void tg3_dump_state(struct tg3 *tp) int i; u32 *regs; + /* If it is a PCI error, all registers will be 0xffff, + * we don't dump them out, just report the error and return + */ + if (tp->pdev->error_state != pci_channel_io_normal) { + netdev_err(tp->dev, "PCI channel ERROR!\n"); + return; + } + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); if (!regs) return; @@ -6889,7 +6897,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ - tp->rx_dropped++; + tnapi->rx_dropped++; goto next_pkt; } @@ -7918,8 +7926,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); - if (IS_ERR(segs) || !segs) + if (IS_ERR(segs) || !segs) { + tnapi->tx_dropped++; goto tg3_tso_bug_end; + } skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); @@ -8190,7 +8200,7 @@ dma_error: drop: dev_kfree_skb_any(skb); drop_nofree: - tp->tx_dropped++; + tnapi->tx_dropped++; return NETDEV_TX_OK; } @@ -9405,7 +9415,7 @@ static void __tg3_set_rx_mode(struct net_device *); /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, bool silent) { - int err; + int err, i; tg3_stop_fw(tp); @@ -9426,6 +9436,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent) /* And make sure the next sample is new data */ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->rx_dropped = 0; + tnapi->tx_dropped = 0; + } } return err; @@ -11250,7 +11267,8 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (tp->pcierr_recovery || !netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev) || + tp->pdev->error_state != pci_channel_io_normal) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -11975,6 +11993,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) { struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; + unsigned long rx_dropped; + unsigned long tx_dropped; + int i; stats->rx_packets = old_stats->rx_packets + get_stat64(&hw_stats->rx_ucast_packets) + @@ -12021,8 +12042,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); - stats->rx_dropped = tp->rx_dropped; - stats->tx_dropped = tp->tx_dropped; + /* Aggregate per-queue counters. The per-queue counters are updated + * by a single writer, race-free. The result computed by this loop + * might not be 100% accurate (counters can be updated in the middle of + * the loop) but the next tg3_get_nstats() will recompute the current + * value so it is acceptable. + * + * Note that these counters wrap around at 4G on 32bit machines. + */ + rx_dropped = (unsigned long)(old_stats->rx_dropped); + tx_dropped = (unsigned long)(old_stats->tx_dropped); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + rx_dropped += tnapi->rx_dropped; + tx_dropped += tnapi->tx_dropped; + } + + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; } static int tg3_get_regs_len(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ae5c01bd1110..5016475e5005 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3018,6 +3018,7 @@ struct tg3_napi { u16 *rx_rcb_prod_idx; struct tg3_rx_prodring_set prodring; struct tg3_rx_buffer_desc *rx_rcb; + unsigned long rx_dropped; u32 tx_prod ____cacheline_aligned; u32 tx_cons; @@ -3026,6 +3027,7 @@ struct tg3_napi { u32 prodmbox; struct tg3_tx_buffer_desc *tx_ring; struct tg3_tx_ring_info *tx_buffers; + unsigned long tx_dropped; dma_addr_t status_mapping; dma_addr_t rx_rcb_mapping; @@ -3220,8 +3222,6 @@ struct tg3 { /* begin "everything else" cacheline(s) section */ - unsigned long rx_dropped; - unsigned long tx_dropped; struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats_prev; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5423fe26b4ef..78287cfcbf63 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = { .val = CONFIG0_MAXLEN_1536, }, { - .max_l3_len = 1542, - .val = CONFIG0_MAXLEN_1542, + .max_l3_len = 1548, + .val = CONFIG0_MAXLEN_1548, }, { .max_l3_len = 9212, @@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, dma_addr_t mapping; unsigned short mtu; void *buffer; + int ret; mtu = ETH_HLEN; mtu += netdev->mtu; @@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, word3 |= mtu; } - if (skb->ip_summed != CHECKSUM_NONE) { + if (skb->len >= ETH_FRAME_LEN) { + /* Hardware offloaded checksumming isn't working on frames + * bigger than 1514 bytes. A hypothesis about this is that the + * checksum buffer is only 1518 bytes, so when the frames get + * bigger they get truncated, or the last few bytes get + * overwritten by the FCS. + * + * Just use software checksumming and bypass on bigger frames. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = skb_checksum_help(skb); + if (ret) + return ret; + } + word1 |= TSS_BYPASS_BIT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; + /* We do not switch off the checksumming on non TCP/UDP + * frames: as is shown from tests, the checksumming engine + * is smart enough to see that a frame is not actually TCP + * or UDP and then just pass it through without any changes + * to the frame. + */ if (skb->protocol == htons(ETH_P_IP)) { word1 |= TSS_IP_CHKSUM_BIT; tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; @@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static netdev_features_t gmac_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK) - features &= ~GMAC_OFFLOAD_FEATURES; - - return features; -} - static int gmac_set_features(struct net_device *netdev, netdev_features_t features) { @@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = { .ndo_set_mac_address = gmac_set_mac_address, .ndo_get_stats64 = gmac_get_stats64, .ndo_change_mtu = gmac_change_mtu, - .ndo_fix_features = gmac_fix_features, .ndo_set_features = gmac_set_features, }; @@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; - /* We can handle jumbo frames up to 10236 bytes so, let's accept - * payloads of 10236 bytes minus VLAN and ethernet header + /* We can receive jumbo frames up to 10236 bytes but only + * transmit 2047 bytes so, let's accept payloads of 2047 + * bytes minus VLAN and ethernet header */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = 10236 - VLAN_ETH_HLEN; + netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll); diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h index 9fdf77d5eb37..24bb989981f2 100644 --- a/drivers/net/ethernet/cortina/gemini.h +++ b/drivers/net/ethernet/cortina/gemini.h @@ -502,7 +502,7 @@ union gmac_txdesc_3 { #define SOF_BIT 0x80000000 #define EOF_BIT 0x40000000 #define EOFIE_BIT BIT(29) -#define MTU_SIZE_BIT_MASK 0x1fff +#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */ /* GMAC Tx Descriptor */ struct gmac_txdesc { @@ -787,7 +787,7 @@ union gmac_config0 { #define CONFIG0_MAXLEN_1536 0 #define CONFIG0_MAXLEN_1518 1 #define CONFIG0_MAXLEN_1522 2 -#define CONFIG0_MAXLEN_1542 3 +#define CONFIG0_MAXLEN_1548 3 #define CONFIG0_MAXLEN_9k 4 /* 9212 */ #define CONFIG0_MAXLEN_10k 5 /* 10236 */ #define CONFIG0_MAXLEN_1518__6 6 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 15bab41cee48..888509cf1f21 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, memcpy(skb->data, fd_vaddr + fd_offset, fd_length); - dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); - return skb; } @@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; + bool recycle_rx_buf = false; void *buf_data; u32 xdp_act; @@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + } else { + recycle_rx_buf = true; } } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); @@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, goto err_build_skb; dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + if (recycle_rx_buf) + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); return; err_build_skb: @@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); - - /* If there's enough room to align the FD address, do it. - * It will help hardware optimize accesses. - */ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, DPAA2_ETH_TX_BUF_ALIGN); if (aligned_start >= skb->head) buffer_start = aligned_start; + else + return -ENOMEM; /* Store a backpointer to the skb at the beginning of the buffer * (in the private data area) such that we can release it @@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_dl_port_add; + net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; + err = register_netdev(net_dev); if (err < 0) { dev_err(dev, "register_netdev() failed\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index bfb6c96c3b2f..834cba8c3a41 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options) static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) { - unsigned int headroom = DPAA2_ETH_SWA_SIZE; + unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; /* If we don't have an skb (e.g. XDP buffer), we only need space for * the software annotation area diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index 4798fb7fe35d..b6a534a3e0b1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, filter_block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); @@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), - DMA_TO_DEVICE); + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); kfree(cmd_buff); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 97d3151076d5..e01a246124ac 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, return notifier_from_errno(err); } -static struct notifier_block dpaa2_switch_port_switchdev_nb; -static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; - static int dpaa2_switch_port_bridge_join(struct net_device *netdev, struct net_device *upper_dev, struct netlink_ext_ack *extack) @@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, goto err_egress_flood; err = switchdev_bridge_port_offload(netdev, netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb, - false, extack); + NULL, NULL, false, extack); if (err) goto err_switchdev_offload; @@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) { - switchdev_bridge_port_unoffload(netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb); + switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL); } static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c3b7694a7485..e08c7b572497 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3731,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev, return 0; } -static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) -{ - struct vlan_ethhdr *vhdr; - unsigned short vlan_TCI = 0; - - if (skb->protocol == htons(ETH_P_ALL)) { - vhdr = (struct vlan_ethhdr *)(skb->data); - vlan_TCI = ntohs(vhdr->h_vlan_TCI); - } - - return vlan_TCI; -} - static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, struct net_device *sb_dev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 vlan_tag; + u16 vlan_tag = 0; if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return netdev_pick_tx(ndev, skb, NULL); - vlan_tag = fec_enet_get_raw_vlan_tci(skb); - if (!vlan_tag) + /* VLAN is present in the payload.*/ + if (eth_type_vlan(skb->protocol)) { + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + /* VLAN is present in the skb but not yet pushed in the payload.*/ + } else if (skb_vlan_tag_present(skb)) { + vlan_tag = skb->vlan_tci; + } else { return vlan_tag; + } return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 276f996f95dc..2d42e733837b 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) if (block->tx) { if (block->tx->q_num < priv->tx_cfg.num_queues) reschedule |= gve_tx_poll(block, budget); - else + else if (budget) reschedule |= gve_xdp_poll(block, budget); } + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll(block, budget); reschedule |= work_done == budget; @@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) if (block->tx) reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll_dqo(block, budget); reschedule |= work_done == budget; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index e84a066aa1a4..73655347902d 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget) feat = block->napi.dev->features; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - if (budget > 0) work_done = gve_clean_rx_done(rx, budget, feat); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 6957a865cff3..9f6ffc4a54f0 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget) bool repoll; u32 to_do; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - /* Find out how much work there is to be done */ nic_done = gve_tx_load_event_counter(priv, tx); to_do = min_t(u32, (nic_done - tx->done), budget); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 928d934cb21a..f75668c47935 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } +static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv) +{ +#define HNS_MAC_LINK_WAIT_TIME 5 +#define HNS_MAC_LINK_WAIT_CNT 40 + + u32 link_status = 0; + int i; + + if (!mac_ctrl_drv->get_link_status) + return link_status; + + for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) { + msleep(HNS_MAC_LINK_WAIT_TIME); + mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status); + if (!link_status) + break; + } + + return link_status; +} + void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; + + /* for FIBER port, it may have a fake link up. + * when the link status changes from down to up, we need to do + * anti-shake. the anti-shake time is base on tests. + * only FIBER port need to do this. + */ + if (*link_status && !mac_cb->link) + *link_status = hns_mac_link_anti_shake(mac_ctrl_drv); } mac_cb->link = *link_status; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 0900abf5c508..8a713eed4465 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu) + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso( return 0; } +static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum, + struct hnae_ring *ring) +{ + if (skb_is_gso(*out_skb)) + return hns_nic_maybe_stop_tso(out_skb, bnum, ring); + else + return hns_nic_maybe_stop_tx(out_skb, bnum, ring); +} + static void fill_tso_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, mtu); } +static void fill_desc_v2(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) +{ + if (is_gso) + fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); + else + fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); +} + netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, struct sk_buff *skb, struct hns_nic_ring_data *ring_data) @@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, int seg_num; dma_addr_t dma; int size, next_to_use; + bool is_gso; int i; switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { @@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } + is_gso = skb_is_gso(skb); priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - buf_num, DESC_TYPE_SKB, ndev->mtu); + buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso); /* fill the fragments */ for (i = 1; i < seg_num; i++) { @@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, } priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, seg_num - 1 == i ? 1 : 0, buf_num, - DESC_TYPE_PAGE, ndev->mtu); + DESC_TYPE_PAGE, ndev->mtu, is_gso); } /*complete translate all packets*/ @@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev, netdev_info(netdev, "enet v1 do not support tso!\n"); break; default: - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* The chip only support 7*4096 */ - netif_set_tso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } break; } netdev->features = features; @@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; } else { priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* This chip only support 7*4096 */ - netif_set_tso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } + priv->ops.fill_desc = fill_desc_v2; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2; + netif_set_tso_max_size(netdev, 7 * 4096); /* enable tso when init * control tso on/off through TSE bit in bd */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index ffa9d6573f54..3f3ee032f631 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -44,7 +44,8 @@ struct hns_nic_ring_data { struct hns_nic_ops { void (*fill_desc)(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu); + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring); void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 0b138635bafa..c083d1d10767 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, } sprintf(result[j++], "%d", i); - sprintf(result[j++], "%s", dim_state_str[dim->state]); + sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ? + dim_state_str[dim->state] : "unknown"); sprintf(result[j++], "%u", dim->profile_ix); - sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); + sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ? + dim_cqe_mode_str[dim->mode] : "unknown"); sprintf(result[j++], "%s", - dim_tune_stat_str[dim->tune_state]); + dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ? + dim_tune_stat_str[dim->tune_state] : "unknown"); sprintf(result[j++], "%u", dim->steps_left); sprintf(result[j++], "%u", dim->steps_right); sprintf(result[j++], "%u", dim->tired); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 06117502001f..b618797a7e8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev) struct hns3_nic_priv *priv = netdev_priv(netdev); char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; + u8 mac_addr_temp[ETH_ALEN] = {0}; int ret = 0; if (h->ae_algo->ops->get_mac_addr) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 66e5807903a0..5ea9e59569ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev); static void hclge_update_fec_stats(struct hclge_dev *hdev); static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, int wait_cnt); +static int hclge_update_port_info(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev) if (state != hdev->hw.mac.link) { hdev->hw.mac.link = state; + if (state == HCLGE_LINK_STATUS_UP) + hclge_update_port_info(hdev); + client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) @@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; - mutex_lock(&hdev->vport_lock); - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->vlan_id == vlan_id) { if (is_write_tbl && vlan->hd_tbl_status) @@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, break; } } - - mutex_unlock(&hdev->vport_lock); } void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) @@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ + mutex_lock(&hdev->vport_lock); if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vport->vlan_del_fail_bmap); } + mutex_unlock(&hdev->vport_lock); /* when port base vlan enabled, we use port base vlan as the vlan * filter entry. In this case, we don't update vlan filter table @@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, } if (!ret) { - if (!is_kill) + if (!is_kill) { hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); - else if (is_kill && vlan_id != 0) + } else if (is_kill && vlan_id != 0) { + mutex_lock(&hdev->vport_lock); hclge_rm_vport_vlan_table(vport, vlan_id, false); + mutex_unlock(&hdev->vport_lock); + } } else if (is_kill) { /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack */ + mutex_lock(&hdev->vport_lock); set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); } hclge_set_vport_vlan_fltr_change(vport); @@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) int i, ret, sync_cnt = 0; u16 vlan_id; + mutex_lock(&hdev->vport_lock); /* start from vport 1 for PF is always alive */ for (i = 0; i < hdev->num_alloc_vport; i++) { struct hclge_vport *vport = &hdev->vport[i]; @@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, vlan_id, true); - if (ret && ret != -EINVAL) + if (ret && ret != -EINVAL) { + mutex_unlock(&hdev->vport_lock); return; + } clear_bit(vlan_id, vport->vlan_del_fail_bmap); hclge_rm_vport_vlan_table(vport, vlan_id, false); hclge_set_vport_vlan_fltr_change(vport); sync_cnt++; - if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) + if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { + mutex_unlock(&hdev->vport_lock); return; + } vlan_id = find_first_bit(vport->vlan_del_fail_bmap, VLAN_N_VID); } } + mutex_unlock(&hdev->vport_lock); hclge_sync_vlan_fltr_state(hdev); } @@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_msi_irq_uninit; if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); if (hnae3_dev_phy_imp_supported(hdev)) ret = hclge_update_tp_port_info(hdev); else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a4d68fb216fb..0aa9beefd1c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); } hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) int ret, sync_cnt = 0; u16 vlan_id; + if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) + return; + + rtnl_lock(); vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); while (vlan_id != VLAN_N_VID) { ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), vlan_id, true); if (ret) - return; + break; clear_bit(vlan_id, hdev->vlan_del_fail_bmap); sync_cnt++; if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) - return; + break; vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); } + rtnl_unlock(); } static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) @@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, return HCLGEVF_VECTOR0_EVENT_OTHER; } +static void hclgevf_reset_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer); + + hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); + hclgevf_reset_task_schedule(hdev); +} + static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { +#define HCLGEVF_RESET_DELAY 5 + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; @@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: - hclgevf_reset_task_schedule(hdev); + mod_timer(&hdev->reset_timer, + jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); break; case HCLGEVF_VECTOR0_EVENT_MBX: hclgevf_mbx_handler(hdev); @@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) HCLGEVF_DRIVER_NAME); hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 81c16b8c8da2..a73f2bf3a56a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -219,6 +219,7 @@ struct hclgevf_dev { enum hnae3_reset_type reset_level; unsigned long reset_pending; enum hnae3_reset_type reset_type; + struct timer_list reset_timer; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index bbf7b14079de..85c2a634c8f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, i++; } + /* ensure additional_info will be seen after received_resp */ + smp_rmb(); + if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", @@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + + /* ensure additional_info will be seen before setting received_resp */ + smp_wmb(); + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f7a332e51524..d5519af34657 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -107,12 +107,18 @@ static struct workqueue_struct *i40e_wq; static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, struct net_device *netdev, int delta) { + struct netdev_hw_addr_list *ha_list; struct netdev_hw_addr *ha; if (!f || !netdev) return; - netdev_for_each_mc_addr(ha, netdev) { + if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) + ha_list = &netdev->uc; + else + ha_list = &netdev->mc; + + netdev_hw_addr_list_for_each(ha, ha_list) { if (ether_addr_equal(ha->addr, f->macaddr)) { ha->refcount += delta; if (ha->refcount <= 0) @@ -16224,7 +16230,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; if (val < MAX_FRAME_SIZE_DEFAULT) dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", - i, val); + pf->hw.port, val); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -16512,6 +16518,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) return; i40e_reset_and_rebuild(pf, false, false); +#ifdef CONFIG_PCI_IOV + i40e_restore_all_vfs_msi_state(pdev); +#endif /* CONFIG_PCI_IOV */ } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index f408fcf23ce8..f6671ac79735 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -207,7 +207,7 @@ #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 #define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index aff6dc6afbe2..f95bc2a4a838 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -37,11 +37,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 -#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK +#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1) #define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) #define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2) -#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK +#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0) #define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 08d7edccfb8d..de5ec4e6bedf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -154,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + u16 pos; + + /* Continue only if this is a PF */ + if (!pdev->is_physfn) + return; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vf_dev = NULL; + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { + if (vf_dev->is_virtfn && vf_dev->physfn == pdev) + pci_restore_msi_state(vf_dev); + } + } +} +#endif /* CONFIG_PCI_IOV */ + /** * i40e_vc_notify_vf_reset * @vf: pointer to the VF structure @@ -3521,16 +3547,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, bool found = false; int bkt; - if (!tc_filter->action) { + if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { dev_info(&pf->pdev->dev, - "VF %d: Currently ADq doesn't support Drop Action\n", - vf->vf_id); + "VF %d: ADQ doesn't support this action (%d)\n", + vf->vf_id, tc_filter->action); goto err; } /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > I40E_MAX_VF_VSI) { + tc_filter->action_meta > vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; @@ -3844,7 +3870,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; - int i, ret; + int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; @@ -3868,8 +3894,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); - if (!cfilter) - return -ENOMEM; + if (!cfilter) { + aq_ret = -ENOMEM; + goto err_out; + } /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) @@ -3917,13 +3945,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) /* Adding cloud filter programmed as TC filter */ if (tcf.dst_port) - ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); + aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else - ret = i40e_add_del_cloud_filter(vsi, cfilter, true); - if (ret) { + aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + if (aq_ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", - vf->vf_id, ERR_PTR(ret), + vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err_free; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 2ee0f8a23248..5fd607c0de0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -137,6 +137,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev); +#endif /* CONFIG_PCI_IOV */ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e7ab89dc883a..63b45c61cc4a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -292,6 +292,7 @@ struct iavf_adapter { #define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) #define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define IAVF_FLAG_FDIR_ENABLED BIT(21) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 6f236d1a6444..dc499fe7734e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -827,18 +827,10 @@ static int __iavf_set_coalesce(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); int i; - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) || - (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { + if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; - } else if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || - (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) { + } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) { netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } @@ -1069,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, struct iavf_fdir_fltr *rule = NULL; int ret = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1211,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, unsigned int cnt = 0; int val = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; cmd->data = IAVF_MAX_FDIR_FILTERS; @@ -1403,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx int count = 50; int err; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; if (fsp->flow_type & FLOW_MAC_EXT) @@ -1444,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; - fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + if (adapter->link_up) { + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } else { + fltr->state = IAVF_FDIR_FLTR_INACTIVE; + } spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - + if (adapter->link_up) + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) kfree(fltr); @@ -1471,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx struct iavf_fdir_fltr *fltr = NULL; int err = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1480,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { + list_del(&fltr->list); + kfree(fltr); + adapter->fdir_active_fltr--; + fltr = NULL; } else { err = -EBUSY; } @@ -1788,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) break; spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 9eb9f73f6adf..d31bd923ba8c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -6,12 +6,25 @@ struct iavf_adapter; -/* State of Flow Director filter */ +/* State of Flow Director filter + * + * *_REQUEST states are used to mark filter to be sent to PF driver to perform + * an action (either add or delete filter). *_PENDING states are an indication + * that request was sent to PF and the driver is waiting for response. + * + * Both DELETE and DISABLE states are being used to delete a filter in PF. + * The difference is that after a successful response filter in DEL_PENDING + * state is being deleted from VF driver as well and filter in DIS_PENDING state + * is being changed to INACTIVE state. + */ enum iavf_fdir_fltr_state_t { IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */ + IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */ + IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index c862ebcd2e39..e8d5b889addc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -277,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem) } /** - * iavf_lock_timeout - try to lock mutex but give up after timeout - * @lock: mutex that should be locked - * @msecs: timeout in msecs - * - * Returns 0 on success, negative on failure - **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) -{ - unsigned int wait, delay = 10; - - for (wait = 0; wait < msecs; wait += delay) { - if (mutex_trylock(lock)) - return 0; - - msleep(delay); - } - - return -1; -} - -/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED @@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) **/ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) { - struct iavf_fdir_fltr *fdir, *fdirtmp; + struct iavf_fdir_fltr *fdir; /* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, - list) { + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { - list_del(&fdir->list); - kfree(fdir); - adapter->fdir_active_fltr--; - } else { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + /* Cancel a request, keep filter as inactive */ + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Disable filters which are active or have a pending + * request to PF to be added + */ + fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -4113,6 +4094,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, } /** + * iavf_restore_fdir_filters + * @adapter: board private structure + * + * Restore existing FDIR filters when VF netdev comes back up. + **/ +static void iavf_restore_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(f, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + /* Cancel a request, keep filter as active */ + f->state = IAVF_FDIR_FLTR_ACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING || + f->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Add filters which are inactive or have a pending + * request to PF to be deleted + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); +} + +/** * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); - /* Restore VLAN filters that were removed with IFF_DOWN */ + /* Restore filters that were removed with IFF_DOWN */ iavf_restore_filters(adapter); + iavf_restore_fdir_filters(adapter); iavf_configure(adapter); @@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) return ret; } +/** + * iavf_disable_fdir - disable Flow Director and clear existing filters + * @adapter: board private structure + **/ +static void iavf_disable_fdir(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp; + bool del_filters = false; + + adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED; + + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST || + fdir->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Delete filters not registered in PF */ + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Filters registered in PF, schedule their deletion */ + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + del_filters = true; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Request to delete filter already sent to PF, change + * state to DEL_PENDING to delete filter after PF's + * response, not set as INACTIVE + */ + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (del_filters) { + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + } +} + #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_RX | \ @@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev, ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS))) iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); + if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) { + if (features & NETIF_F_NTUPLE) + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + else + iavf_disable_fdir(adapter); + } + return 0; } @@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, features = iavf_fix_netdev_vlan_features(adapter, features); + if (!FDIR_FLTR_SUPPORT(adapter)) + features &= ~NETIF_F_NTUPLE; + return iavf_fix_strip_features(adapter, features); } @@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (FDIR_FLTR_SUPPORT(adapter)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + } + netdev->priv_flags |= IFF_UNICAST_FLT; /* Do not turn on offloads when they are requested to be turned off. @@ -4826,34 +4894,6 @@ int iavf_process_config(struct iavf_adapter *adapter) } /** - * iavf_shutdown - Shutdown the device in preparation for a reboot - * @pdev: pci device structure - **/ -static void iavf_shutdown(struct pci_dev *pdev) -{ - struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - iavf_close(netdev); - - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) - dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__); - /* Prevent the watchdog from running. */ - iavf_change_state(adapter, __IAVF_REMOVE); - adapter->aq_required = 0; - mutex_unlock(&adapter->crit_lock); - -#ifdef CONFIG_PM - pci_save_state(pdev); - -#endif - pci_disable_device(pdev); -} - -/** * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in iavf_pci_tbl @@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d) **/ static void iavf_remove(struct pci_dev *pdev) { - struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_cloud_filter *cf, *cftmp; struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; + struct iavf_adapter *adapter; struct net_device *netdev; struct iavf_hw *hw; - netdev = adapter->netdev; + /* Don't proceed with remove if netdev is already freed */ + netdev = pci_get_drvdata(pdev); + if (!netdev) + return; + + adapter = iavf_pdev_to_adapter(pdev); hw = &adapter->hw; if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) @@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev) destroy_workqueue(adapter->wq); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); pci_disable_device(pdev); } +/** + * iavf_shutdown - Shutdown the device in preparation for a reboot + * @pdev: pci device structure + **/ +static void iavf_shutdown(struct pci_dev *pdev) +{ + iavf_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); static struct pci_driver iavf_driver = { diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 7e6ee32d19b6..10ba36602c0c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -15,7 +15,6 @@ */ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ -#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */ #define IAVF_ITR_100K 10 /* all values below must be even */ #define IAVF_ITR_50K 20 #define IAVF_ITR_20K 50 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 64c4443dbef9..2d9366be0ec5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter) **/ void iavf_del_fdir_filter(struct iavf_adapter *adapter) { + struct virtchnl_fdir_del f = {}; struct iavf_fdir_fltr *fdir; - struct virtchnl_fdir_del f; bool process_fltr = false; int len; @@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter) list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { process_fltr = true; - memset(&f, 0, len); f.vsi_id = fdir->vc_add_msg.vsi_id; f.flow_id = fdir->flow_id; fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; break; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + process_fltr = true; + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DIS_PENDING; + break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1902,6 +1907,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, } /** + * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset + * @adapter: private adapter structure + * + * Called after a reset to re-add all FDIR filters and delete some of them + * if they were pending to be deleted. + */ +static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f, *ftmp; + bool add_filters = false; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST || + f->state == IAVF_FDIR_FLTR_ADD_PENDING || + f->state == IAVF_FDIR_FLTR_ACTIVE) { + /* All filters and requests have been removed in PF, + * restore them + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + add_filters = true; + } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST || + f->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Link down state, leave filters as inactive */ + f->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST || + f->state == IAVF_FDIR_FLTR_DEL_PENDING) { + /* Delete filters that were pending to be deleted, the + * list on PF is already cleared after a reset + */ + list_del(&f->list); + kfree(f); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (add_filters) + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; +} + +/** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF @@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", iavf_stat_str(&adapter->hw, @@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->mac_vlan_list_lock); + iavf_activate_fdir_filters(adapter); + iavf_parse_vf_resource_msg(adapter); /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the @@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { - if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", fdir->loc); list_del(&fdir->list); @@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, del_fltr->status); iavf_print_fdir_fltr(adapter, fdir); } + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } } } spin_unlock_bh(&adapter->fdir_fltr_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index d7fdb7ba7268..fbd5d92182d3 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1359,8 +1359,9 @@ struct ice_aqc_get_link_status_data { u8 lp_flowcontrol; #define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0) #define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1) + u8 reserved5[5]; #define ICE_AQC_LS_DATA_SIZE_V2 \ - offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol) + offsetofend(struct ice_aqc_get_link_status_data, reserved5) } __packed; /* Set event mask command (direct 0x0613) */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9a6c25f98632..edac34c796ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -5332,7 +5332,6 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, u8 *eec_mode) { struct ice_aqc_get_cgu_dpll_status *cmd; - const s64 nsec_per_psec = 1000LL; struct ice_aq_desc desc; int status; @@ -5348,8 +5347,7 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, *phase_offset = le32_to_cpu(cmd->phase_offset_h); *phase_offset <<= 32; *phase_offset += le32_to_cpu(cmd->phase_offset_l); - *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), - nsec_per_psec); + *phase_offset = sign_extend64(*phase_offset, 47); *eec_mode = cmd->eec_mode; } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index cfb1580f5850..8b7504a9df31 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -1479,14 +1479,14 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw) } /** - * ice_download_pkg + * ice_download_pkg_with_sig_seg * @hw: pointer to the hardware structure * @pkg_hdr: pointer to package header * * Handles the download of a complete package. */ static enum ice_ddp_state -ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { enum ice_aq_err aq_err = hw->adminq.sq_last_status; enum ice_ddp_state state = ICE_DDP_PKG_ERR; @@ -1519,6 +1519,103 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) state = ice_post_dwnld_pkg_actions(hw); ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state; + struct ice_buf_hdr *bh; + int status; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_download_pkg_without_sig_seg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package without signature segment. + */ +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) +{ + enum ice_ddp_state state; + + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); + ice_post_pkg_dwnld_vlan_mode_cfg(hw); return state; @@ -2083,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - state = ice_download_pkg(hw, pkg); + state = ice_download_pkg(hw, pkg, seg); if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 835c419ccc74..86b180cb32a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -815,12 +815,6 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; - if (prio > ICE_DPLL_PRIO_MAX) { - NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d", - ICE_DPLL_PRIO_MAX); - return -EINVAL; - } - mutex_lock(&pf->dplls.lock); ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); mutex_unlock(&pf->dplls.lock); @@ -1756,6 +1750,7 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu, } d->pf = pf; if (cgu) { + ice_dpll_update_state(pf, d, true); ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d); if (ret) { dpll_device_put(d->dpll); @@ -1796,8 +1791,6 @@ static int ice_dpll_init_worker(struct ice_pf *pf) struct ice_dplls *d = &pf->dplls; struct kthread_worker *kworker; - ice_dpll_update_state(pf, &d->eec, true); - ice_dpll_update_state(pf, &d->pps, true); kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); kworker = kthread_create_worker(0, "ice-dplls-%s", dev_name(ice_pf_to_dev(pf))); @@ -1830,6 +1823,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, int num_pins, i, ret = -EINVAL; struct ice_hw *hw = &pf->hw; struct ice_dpll_pin *pins; + unsigned long caps; u8 freq_supp_num; bool input; @@ -1849,6 +1843,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, } for (i = 0; i < num_pins; i++) { + caps = 0; pins[i].idx = i; pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input); pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input); @@ -1861,8 +1856,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, &dp->input_prio[i]); if (ret) return ret; - pins[i].prop.capabilities |= - DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE; + caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | + DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); pins[i].prop.phase_range.min = pf->dplls.input_phase_adj_max; pins[i].prop.phase_range.max = @@ -1872,9 +1867,11 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, pf->dplls.output_phase_adj_max; pins[i].prop.phase_range.max = -pf->dplls.output_phase_adj_max; + ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); + if (ret) + return ret; } - pins[i].prop.capabilities |= - DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + pins[i].prop.capabilities = caps; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); if (ret) return ret; diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h index bb32b6d88373..93172e93995b 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.h +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -6,7 +6,6 @@ #include "ice.h" -#define ICE_DPLL_PRIO_MAX 0xF #define ICE_DPLL_RCLK_NUM_MAX 4 /** ice_dpll_pin - store info about pins diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a34083567e6f..bde9bc74f928 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1850,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev, linkmode_zero(ks->link_modes.supported); linkmode_zero(ks->link_modes.advertising); - for (i = 0; i < BITS_PER_TYPE(u64); i++) { + for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) { if (phy_types_low & BIT_ULL(i)) ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, req_speeds, advert_phy_type_lo, i); } - for (i = 0; i < BITS_PER_TYPE(u64); i++) { + for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) { if (phy_types_high & BIT_ULL(i)) ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, req_speeds, advert_phy_type_hi, diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index cd065ec48c87..b47cd43ae871 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -570,6 +570,50 @@ resume_traffic: } /** + * ice_lag_build_netdev_list - populate the lag struct's netdev list + * @lag: local lag struct + * @ndlist: pointer to netdev list to populate + */ +static void ice_lag_build_netdev_list(struct ice_lag *lag, + struct ice_lag_netdev_list *ndlist) +{ + struct ice_lag_netdev_list *nl; + struct net_device *tmp_nd; + + INIT_LIST_HEAD(&ndlist->node); + rcu_read_lock(); + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); + if (!nl) + break; + + nl->netdev = tmp_nd; + list_add(&nl->node, &ndlist->node); + } + rcu_read_unlock(); + lag->netdev_head = &ndlist->node; +} + +/** + * ice_lag_destroy_netdev_list - free lag struct's netdev list + * @lag: pointer to local lag struct + * @ndlist: pointer to lag struct netdev list + */ +static void ice_lag_destroy_netdev_list(struct ice_lag *lag, + struct ice_lag_netdev_list *ndlist) +{ + struct ice_lag_netdev_list *entry, *n; + + rcu_read_lock(); + list_for_each_entry_safe(entry, n, &ndlist->node, node) { + list_del(&entry->node); + kfree(entry); + } + rcu_read_unlock(); + lag->netdev_head = NULL; +} + +/** * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF * @lag: primary interface LAG struct * @oldport: lport of previous interface @@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport, void ice_lag_move_new_vf_nodes(struct ice_vf *vf) { struct ice_lag_netdev_list ndlist; - struct list_head *tmp, *n; u8 pri_port, act_port; struct ice_lag *lag; struct ice_vsi *vsi; @@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf) pri_port = pf->hw.port_info->lport; act_port = lag->active_port; - if (lag->upper_netdev) { - struct ice_lag_netdev_list *nl; - struct net_device *tmp_nd; - - INIT_LIST_HEAD(&ndlist.node); - rcu_read_lock(); - for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { - nl = kzalloc(sizeof(*nl), GFP_ATOMIC); - if (!nl) - break; - - nl->netdev = tmp_nd; - list_add(&nl->node, &ndlist.node); - } - rcu_read_unlock(); - } - - lag->netdev_head = &ndlist.node; + if (lag->upper_netdev) + ice_lag_build_netdev_list(lag, &ndlist); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) && lag->bonded && lag->primary && pri_port != act_port && !list_empty(lag->netdev_head)) ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx); - list_for_each_safe(tmp, n, &ndlist.node) { - struct ice_lag_netdev_list *entry; - - entry = list_entry(tmp, struct ice_lag_netdev_list, node); - list_del(&entry->node); - kfree(entry); - } - lag->netdev_head = NULL; + ice_lag_destroy_netdev_list(lag, &ndlist); new_vf_unlock: mutex_unlock(&pf->lag_mutex); @@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) ice_lag_move_single_vf_nodes(lag, oldport, newport, i); } +/** + * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context + * @lag: local lag struct + * @src_prt: lport value for source port + * @dst_prt: lport value for destination port + * + * This function is used to move nodes during an out-of-netdev-event situation, + * primarily when the driver needs to reconfigure or recreate resources. + * + * Must be called while holding the lag_mutex to avoid lag events from + * processing while out-of-sync moves are happening. Also, paired moves, + * such as used in a reset flow, should both be called under the same mutex + * lock to avoid changes between start of reset and end of reset. + */ +void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt) +{ + struct ice_lag_netdev_list ndlist; + + ice_lag_build_netdev_list(lag, &ndlist); + ice_lag_move_vf_nodes(lag, src_prt, dst_prt); + ice_lag_destroy_netdev_list(lag, &ndlist); +} + #define ICE_LAG_SRIOV_CP_RECIPE 10 #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16 @@ -1938,6 +1981,8 @@ int ice_init_lag(struct ice_pf *pf) int n, err; ice_lag_init_feature_support_flag(pf); + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) + return 0; pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL); if (!pf->lag) @@ -2051,7 +2096,6 @@ void ice_lag_rebuild(struct ice_pf *pf) { struct ice_lag_netdev_list ndlist; struct ice_lag *lag, *prim_lag; - struct list_head *tmp, *n; u8 act_port, loc_port; if (!pf->lag || !pf->lag->bonded) @@ -2063,21 +2107,7 @@ void ice_lag_rebuild(struct ice_pf *pf) if (lag->primary) { prim_lag = lag; } else { - struct ice_lag_netdev_list *nl; - struct net_device *tmp_nd; - - INIT_LIST_HEAD(&ndlist.node); - rcu_read_lock(); - for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { - nl = kzalloc(sizeof(*nl), GFP_ATOMIC); - if (!nl) - break; - - nl->netdev = tmp_nd; - list_add(&nl->node, &ndlist.node); - } - rcu_read_unlock(); - lag->netdev_head = &ndlist.node; + ice_lag_build_netdev_list(lag, &ndlist); prim_lag = ice_lag_find_primary(lag); } @@ -2107,13 +2137,7 @@ void ice_lag_rebuild(struct ice_pf *pf) ice_clear_rdma_cap(pf); lag_rebuild_out: - list_for_each_safe(tmp, n, &ndlist.node) { - struct ice_lag_netdev_list *entry; - - entry = list_entry(tmp, struct ice_lag_netdev_list, node); - list_del(&entry->node); - kfree(entry); - } + ice_lag_destroy_netdev_list(lag, &ndlist); mutex_unlock(&pf->lag_mutex); } diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 9557e8605a07..ede833dfa658 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf); void ice_lag_rebuild(struct ice_pf *pf); bool ice_lag_is_switchdev_running(struct ice_pf *pf); +void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt); #endif /* _ICE_LAG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 4b1e56396293..1bad6e17f9be 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2371,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) } else { max_txqs[i] = vsi->alloc_txq; } + + if (vsi->type == ICE_VSI_PF) + max_txqs[i] += vsi->num_xdp_txq; } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); @@ -2620,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_VF && vsi->agg_node && vsi->agg_node->valid) vsi->agg_node->num_vsis--; - if (vsi->agg_node) { - vsi->agg_node->valid = false; - vsi->agg_node->agg_id = 0; - } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 6607fa6fe556..adfdea1e2805 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2146,7 +2146,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) - return -EPERM; + return -ENOMEDIUM; ice_print_topo_conflict(vsi); @@ -7401,15 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - /* configure PTP timestamping after VSI rebuild */ - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) { - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) - ice_ptp_cfg_timestamp(pf, false); - else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) - /* for E82x PHC owner always need to have interrupts */ - ice_ptp_cfg_timestamp(pf, true); - } - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); @@ -7461,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); + + /* Restore timestamp mode settings after VSI rebuild */ + ice_ptp_restore_timestamp_mode(pf); return; err_vsi_rebuild: @@ -9193,8 +9187,14 @@ int ice_stop(struct net_device *netdev) int link_err = ice_force_phys_link_state(vsi, false); if (link_err) { - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); + if (link_err == -ENOMEDIUM) + netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", + vsi->vsi_num); + else + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + + ice_vsi_close(vsi); return -EIO; } } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 1eddcbe89b0c..71f405f8a6fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -256,48 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, } /** - * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt - * @pf: The PF pointer to search in - * @on: bool value for whether timestamp interrupt is enabled or disabled + * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device + * @pf: Board private structure + * + * Program the device to respond appropriately to the Tx timestamp interrupt + * cause. */ -static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on) +static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) { + struct ice_hw *hw = &pf->hw; + bool enable; u32 val; + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_ALL: + /* React to interrupts across all quads. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); + enable = true; + break; + case ICE_PTP_TX_INTERRUPT_NONE: + /* Do not react to interrupts on any quad. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); + enable = false; + break; + case ICE_PTP_TX_INTERRUPT_SELF: + default: + enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; + break; + } + /* Configure the Tx timestamp interrupt */ - val = rd32(&pf->hw, PFINT_OICR_ENA); - if (on) + val = rd32(hw, PFINT_OICR_ENA); + if (enable) val |= PFINT_OICR_TSYN_TX_M; else val &= ~PFINT_OICR_TSYN_TX_M; - wr32(&pf->hw, PFINT_OICR_ENA, val); -} - -/** - * ice_set_tx_tstamp - Enable or disable Tx timestamping - * @pf: The PF pointer to search in - * @on: bool value for whether timestamps are enabled or disabled - */ -static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) -{ - struct ice_vsi *vsi; - u16 i; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - - /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_txq(vsi, i) { - if (!vsi->tx_rings[i]) - continue; - vsi->tx_rings[i]->ptp_tx = on; - } - - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) - ice_ptp_configure_tx_tstamp(pf, on); - - pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + wr32(hw, PFINT_OICR_ENA, val); } /** @@ -311,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) u16 i; vsi = ice_get_main_vsi(pf); - if (!vsi) + if (!vsi || !vsi->rx_rings) return; /* Set the timestamp flag for all the Rx rings */ @@ -320,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) continue; vsi->rx_rings[i]->ptp_rx = on; } +} + +/** + * ice_ptp_disable_timestamp_mode - Disable current timestamp mode + * @pf: Board private structure + * + * Called during preparation for reset to temporarily disable timestamping on + * the device. Called during remove to disable timestamping while cleaning up + * driver resources. + */ +static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 val; + + val = rd32(hw, PFINT_OICR_ENA); + val &= ~PFINT_OICR_TSYN_TX_M; + wr32(hw, PFINT_OICR_ENA, val); - pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : - HWTSTAMP_FILTER_NONE; + ice_set_rx_tstamp(pf, false); } /** - * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit + * ice_ptp_restore_timestamp_mode - Restore timestamp configuration * @pf: Board private structure - * @ena: bool value to enable or disable time stamp * - * This function will configure timestamping during PTP initialization - * and deinitialization + * Called at the end of rebuild to restore timestamp configuration after + * a device reset. */ -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { - ice_set_tx_tstamp(pf, ena); - ice_set_rx_tstamp(pf, ena); + struct ice_hw *hw = &pf->hw; + bool enable_rx; + + ice_ptp_cfg_tx_interrupt(pf); + + enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; + ice_set_rx_tstamp(pf, enable_rx); + + /* Trigger an immediate software interrupt to ensure that timestamps + * which occurred during reset are handled now. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); } /** @@ -2037,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { switch (config->tx_type) { case HWTSTAMP_TX_OFF: - ice_set_tx_tstamp(pf, false); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ON: - ice_set_tx_tstamp(pf, true); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; break; default: return -ERANGE; @@ -2048,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - ice_set_rx_tstamp(pf, false); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: @@ -2064,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - ice_set_rx_tstamp(pf, true); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } + /* Immediately update the device timestamping mode */ + ice_ptp_restore_timestamp_mode(pf); + return 0; } @@ -2737,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf) clear_bit(ICE_FLAG_PTP, pf->flags); /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); kthread_cancel_delayed_work_sync(&ptp->work); @@ -2803,15 +2827,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Release the global hardware lock */ ice_ptp_unlock(hw); - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) { - /* The clock owner for this device type handles the timestamp - * interrupt for all ports. - */ - ice_ptp_configure_tx_tstamp(pf, true); - - /* React on all quads interrupts for E82x */ - wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); - + if (!ice_is_e810(hw)) { /* Enable quad interrupts */ err = ice_ptp_tx_ena_intr(pf, true, itr); if (err) @@ -2881,13 +2897,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); case ICE_PHY_E822: - /* Non-owner PFs don't react to any interrupts on E82x, - * neither on own quad nor on others - */ - if (!ice_ptp_pf_handles_tx_interrupt(pf)) { - ice_ptp_configure_tx_tstamp(pf, false); - wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); - } kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); @@ -3032,6 +3041,9 @@ void ice_ptp_init(struct ice_pf *pf) /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); + /* Configure initial Tx interrupt settings */ + ice_ptp_cfg_tx_interrupt(pf); + set_bit(ICE_FLAG_PTP, pf->flags); err = ice_ptp_init_work(pf, ptp); if (err) @@ -3067,7 +3079,7 @@ void ice_ptp_release(struct ice_pf *pf) return; /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); ice_ptp_remove_auxbus_device(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 8f6f94392756..06a330867fc9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -292,7 +292,7 @@ int ice_ptp_clock_index(struct ice_pf *pf); struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); @@ -317,8 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) return -EOPNOTSUPP; } -static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } - +static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { } static inline void ice_ptp_extts_event(struct ice_pf *pf) { } static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 6d573908de7a..a00b55e14aac 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -3961,3 +3961,57 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) return ret; } + +/** + * ice_cgu_get_output_pin_state_caps - get output pin state capabilities + * @hw: pointer to the hw struct + * @pin_id: id of a pin + * @caps: capabilities to modify + * + * Return: + * * 0 - success, state capabilities were modified + * * negative - failure, capabilities were not modified + */ +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps) +{ + bool can_change = true; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3) + can_change = false; + break; + case ICE_DEV_ID_E810C_QSFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4) + can_change = false; + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 && + pin_id == ZL_OUT2) + can_change = false; + else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 && + pin_id == SI_OUT1) + can_change = false; + break; + default: + return -EINVAL; + } + if (can_change) + *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + else + *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 36aeeef99ec0..cf76701566c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -282,6 +282,8 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); void ice_ptp_init_phy_model(struct ice_hw *hw); +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps); #define PFTSYN_SEM_BYTES 4 diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 2a5e6616cc0a..e1494f24f661 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -374,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) */ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { - struct ice_pf *pf; - if (!vf || !q_vector) return -EINVAL; - pf = vf->pf; - /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + - q_vector->v_idx + 1; + return vf->first_vector_idx + q_vector->v_idx + 1; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 52d0a126eb61..9e97ea863068 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return; - if (!tx_ring->ptp_tx) - return; - /* Tx timestamps cannot be sampled when doing TSO */ if (first->tx_flags & ICE_TX_FLAGS_TSO) return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 166413fc33f4..daf7b9dbb143 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -380,7 +380,6 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ - u8 ptp_tx; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index aca1f2ea5034..b7ae09952156 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -829,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf) int ice_reset_vf(struct ice_vf *vf, u32 flags) { struct ice_pf *pf = vf->pf; + struct ice_lag *lag; struct ice_vsi *vsi; + u8 act_prt, pri_prt; struct device *dev; int err = 0; bool rsd; dev = ice_pf_to_dev(pf); + act_prt = ICE_LAG_INVALID_PORT; + pri_prt = pf->hw.port_info->lport; if (flags & ICE_VF_RESET_NOTIFY) ice_notify_vf_reset(vf); @@ -845,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + lag = pf->lag; + mutex_lock(&pf->lag_mutex); + if (lag && lag->bonded && lag->primary) { + act_prt = lag->active_port; + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && + lag->upper_netdev) + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); + else + act_prt = ICE_LAG_INVALID_PORT; + } + if (flags & ICE_VF_RESET_LOCK) mutex_lock(&vf->cfg_lock); else @@ -937,6 +952,11 @@ out_unlock: if (flags & ICE_VF_RESET_LOCK) mutex_unlock(&vf->cfg_lock); + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); + return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index d7b10dc67f03..80dc4bcdd3a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -32,7 +32,6 @@ static void ice_port_vlan_on(struct ice_vsi *vsi) /* setup outer VLAN ops */ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; - vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; /* setup inner VLAN ops */ vlan_ops = &vsi->inner_vlan_ops; @@ -47,8 +46,13 @@ static void ice_port_vlan_on(struct ice_vsi *vsi) vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; - vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; } + + /* all Rx traffic should be in the domain of the assigned port VLAN, + * so prevent disabling Rx VLAN filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; + vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; } @@ -77,6 +81,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi) vlan_ops->del_vlan = ice_vsi_del_vlan; } + vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else @@ -141,7 +147,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) &vsi->outer_vlan_ops : &vsi->inner_vlan_ops; vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index cdf17b1e2f25..1c7b4ded948b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1523,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) u16 num_q_vectors_mapped, vsi_id, vector_id; struct virtchnl_irq_map_info *irqmap_info; struct virtchnl_vector_map *map; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -1535,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->vfs.num_msix_per < num_q_vectors_mapped || + vf->num_msix < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -1557,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->vfs.num_msix_per) || + if (!(vector_id < vf->num_msix) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1603,9 +1602,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; + struct ice_lag *lag; struct ice_vsi *vsi; + u8 act_prt, pri_prt; int i = -1, q_idx; + lag = pf->lag; + mutex_lock(&pf->lag_mutex); + act_prt = ICE_LAG_INVALID_PORT; + pri_prt = pf->hw.port_info->lport; + if (lag && lag->bonded && lag->primary) { + act_prt = lag->active_port; + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && + lag->upper_netdev) + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); + else + act_prt = ICE_LAG_INVALID_PORT; + } + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; @@ -1729,6 +1743,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } } + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); + /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_STATUS_SUCCESS, NULL, 0); @@ -1743,6 +1762,11 @@ error_param: vf->vf_id, i); } + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); + ice_lag_move_new_vf_nodes(vf); /* send the response to the VF */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 81288a17da2a..20c4b3a64710 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1044,7 +1044,6 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) } idpf_rx_sync_for_cpu(rx_buf, fields.size); - skb = rx_q->skb; if (skb) idpf_rx_add_frag(rx_buf, skb, fields.size); else diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1f728a9004d9..9e942e5baf39 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -396,7 +396,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) if (!rxq) return; - if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) { + if (rxq->skb) { dev_kfree_skb_any(rxq->skb); rxq->skb = NULL; } diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 07e72c72d156..8dc837889723 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -1104,9 +1104,9 @@ struct virtchnl2_rss_key { __le32 vport_id; __le16 key_len; u8 pad; - __DECLARE_FLEX_ARRAY(u8, key_flex); -}; -VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key); + u8 key_flex[]; +} __packed; +VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key); /** * struct virtchnl2_queue_chunk - chunk of contiguous queues diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index f48f82d5e274..85cc16396506 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -568,6 +568,7 @@ struct igc_nfc_filter { u16 etype; __be16 vlan_etype; u16 vlan_tci; + u16 vlan_tci_mask; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; u8 user_data[8]; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 785eaa8e0ba8..859b2636f3d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -958,6 +958,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, } #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_TCI_FULL_MASK ((__force __be16)~0) static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -980,10 +981,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_etype = rule->filter.vlan_etype; + fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { fsp->flow_type |= FLOW_EXT; fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); - fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask); } if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { @@ -1218,6 +1225,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci); rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; } @@ -1255,11 +1263,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); } - /* When multiple filter options or user data or vlan etype is set, use a - * flex filter. + /* The i225/i226 has various different filters. Flex filters provide a + * way to match up to the first 128 bytes of a packet. Use them for: + * a) For specific user data + * b) For VLAN EtherType + * c) For full TCI match + * d) Or in case multiple filter criteria are set + * + * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters. */ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) && + rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) || (rule->filter.match_flags & (rule->filter.match_flags - 1))) rule->flex = true; else @@ -1329,6 +1345,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, return -EINVAL; } + /* There are two ways to match the VLAN TCI: + * 1. Match on PCP field and use vlan prio filter for it + * 2. Match on complete TCI field and use flex filter for it + */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) && + fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; + } + + /* VLAN EtherType can only be matched by full mask. */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_etype && + fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) { + netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); + return -EOPNOTSUPP; + } + if (fsp->location >= IGC_MAX_RXNFC_RULES) { netdev_dbg(netdev, "Invalid location\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index a9c08321aca9..22cefb1eeedf 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCC(i), tqavcc); wr32(IGC_TQAVHC(i), - 0x80000000 + ring->hicredit * 0x7735); + 0x80000000 + ring->hicredit * 0x7736); } else { /* Disable any CBS for the queue */ txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 90817136808d..29aac327574d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) { + struct mvneta_port *pp = netdev_priv(netdev); int i; for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, mvneta_statistics[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); - page_pool_ethtool_stats_get_strings(data); + if (!pp->bm_priv) { + data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); + page_pool_ethtool_stats_get_strings(data); + } } } @@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) struct page_pool_stats stats = {}; int i; - for (i = 0; i < rxq_number; i++) - page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + for (i = 0; i < rxq_number; i++) { + if (pp->rxqs[i].page_pool) + page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + } page_pool_ethtool_stats_get(data, &stats); } @@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) *data++ = pp->ethtool_stats[i]; - mvneta_ethtool_pp_stats(pp, data); + if (!pp->bm_priv) + mvneta_ethtool_pp_stats(pp, data); } static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) { - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvneta_statistics) + - page_pool_ethtool_stats_get_count(); + if (sset == ETH_SS_STATS) { + int count = ARRAY_SIZE(mvneta_statistics); + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->bm_priv) + count += page_pool_ethtool_stats_get_count(); + + return count; + } return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 552970c7dec0..a9bdf3283a85 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1193,6 +1193,13 @@ int octep_device_setup(struct octep_device *oct) if (ret) return ret; + INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); + INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); + oct->poll_non_ioq_intr = true; + queue_delayed_work(octep_wq, &oct->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + atomic_set(&oct->hb_miss_cnt, 0); INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); @@ -1258,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev) pci_read_config_byte(pdev, (pos + 8), &status); dev_info(&pdev->dev, "Firmware ready status = %u\n", status); - return status; +#define FW_STATUS_READY 1ULL + return status == FW_STATUS_READY; } return false; } @@ -1325,21 +1333,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_octep_config; } - octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, - &octep_dev->conf->fw_info); + err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, + &octep_dev->conf->fw_info); + if (err) { + dev_err(&pdev->dev, "Failed to get firmware info\n"); + goto register_dev_err; + } dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", octep_dev->conf->fw_info.hb_interval, octep_dev->conf->fw_info.hb_miss_count); queue_delayed_work(octep_wq, &octep_dev->hb_task, msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); - INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); - INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); - INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task); - octep_dev->poll_non_ioq_intr = true; - queue_delayed_work(octep_wq, &octep_dev->intr_poll_task, - msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); - netdev->netdev_ops = &octep_netdev_ops; octep_set_ethtool_ops(netdev); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 6845556581c3..5df42634ceb8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1945,7 +1945,7 @@ struct mcs_hw_info { u8 tcam_entries; /* RX/TX Tcam entries per mcs block */ u8 secy_entries; /* RX/TX SECY entries per mcs block */ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */ - u8 sa_entries; /* PN table entries = SA entries */ + u16 sa_entries; /* PN table entries = SA entries */ u64 rsvd[16]; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c index c43f19dfbd74..c1775bd01c2b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id); stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg); - reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id); + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id); stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id); @@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id); stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); - reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id); + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id); stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { @@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id) return NULL; } +bool is_mcs_bypass(int mcs_id) +{ + struct mcs *mcs_dev; + + list_for_each_entry(mcs_dev, &mcs_list, mcs_list) { + if (mcs_dev->mcs_id == mcs_id) + return mcs_dev->bypass; + } + return true; +} + void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req) { u64 val = 0; @@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs) return err; } -static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) +static void mcs_set_external_bypass(struct mcs *mcs, bool bypass) { u64 val; @@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) else val &= ~BIT_ULL(6); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); + mcs->bypass = bypass; } static void mcs_global_cfg(struct mcs *mcs) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h index 0f89dcb76465..f927cc61dfd2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h @@ -149,6 +149,7 @@ struct mcs { u16 num_vec; void *rvu; u16 *tx_sa_active; + bool bypass; }; struct mcs_ops { @@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req * int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc); int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req); int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req); +bool is_mcs_bypass(int mcs_id); /* CN10K-B APIs */ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h index f3ab01fc363c..f4c6de89002c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h @@ -810,14 +810,37 @@ offset = 0x9d8ull; \ offset; }) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xee80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe818ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xa680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xd018ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xf680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe018ull; \ + offset += (a) * 0x8ull; \ + offset; }) + #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index ab3e39eef2eb..8c0732c9a7ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -528,7 +528,7 @@ struct npc_lt_def { u8 ltype_mask; u8 ltype_match; u8 lid; -}; +} __packed; struct npc_lt_def_ipsec { u8 ltype_mask; @@ -536,7 +536,7 @@ struct npc_lt_def_ipsec { u8 lid; u8 spi_offset; u8 spi_nz; -}; +} __packed; struct npc_lt_def_apad { u8 ltype_mask; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index af21e2030cff..4728ba34b0e3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Disable forward pause to driver */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Enable channel mask for all LMACS */ if (is_dev_rpm2(rpm)) rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff); @@ -616,12 +621,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p if (rx_pause) { cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } else { cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } if (tx_pause) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 22c395c7d040..731bb82b577c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2631,6 +2631,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_npc_free_mcam_entries(rvu, pcifunc, -1); rvu_mac_reset(rvu, pcifunc); + if (rvu->mcs_blk_cnt) + rvu_mcs_flr_handler(rvu, pcifunc); + mutex_unlock(&rvu->flr_lock); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index c4d999ef5ab4..8802961b8889 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -345,6 +345,7 @@ struct nix_hw { struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; u64 *tx_credits; + u8 cc_mcs_cnt; }; /* RVU block's capabilities or functionality, @@ -904,6 +905,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable); int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en); int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 15a319684ed3..38acdc7a73bb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -465,6 +465,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); } +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) +{ + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); +} + int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) { struct mac_ops *mac_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index c70932625d0d..21b5d71c1e37 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) - goto err; + return -ENOMEM; INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work); INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work); @@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work); return 0; -err: - rvu_nix_health_reporters_destroy(rvu_dl); - return -ENOMEM; } static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl) @@ -1087,7 +1084,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) - goto err; + return -ENOMEM; INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work); INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work); @@ -1095,9 +1092,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work); return 0; -err: - rvu_npa_health_reporters_destroy(rvu_dl); - return -ENOMEM; } static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 23c2f2ed2fb8..58744313f0eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -12,6 +12,7 @@ #include "rvu_reg.h" #include "rvu.h" #include "npc.h" +#include "mcs.h" #include "cgx.h" #include "lmac_common.h" #include "rvu_npc_hash.h" @@ -4142,90 +4143,18 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } -static int -nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, - u16 pcifunc, u64 tx_credits) -{ - struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); - u8 cgx_id = 0, lmac_id = 0; - unsigned long poll_tmo; - bool restore_tx_en = 0; - struct nix_hw *nix_hw; - u64 cfg, sw_xoff = 0; - u32 schq = 0; - u32 credits; - int rc; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return NIX_AF_ERR_INVALID_NIXBLK; - - if (tx_credits == nix_hw->tx_credits[link]) - return 0; - - /* Enable cgx tx if disabled for credits to be back */ - if (is_pf_cgxmapped(rvu, pf)) { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), - lmac_id, true); - } - - mutex_lock(&rvu->rsrc_lock); - /* Disable new traffic to link */ - if (hw->cap.nix_shaping) { - schq = nix_get_tx_link(rvu, pcifunc); - sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); - rvu_write64(rvu, blkaddr, - NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); - } - - rc = NIX_AF_ERR_LINK_CREDITS; - poll_tmo = jiffies + usecs_to_jiffies(200000); - /* Wait for credits to return */ - do { - if (time_after(jiffies, poll_tmo)) - goto exit; - usleep_range(100, 200); - - cfg = rvu_read64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link)); - credits = (cfg >> 12) & 0xFFFFFULL; - } while (credits != nix_hw->tx_credits[link]); - - cfg &= ~(0xFFFFFULL << 12); - cfg |= (tx_credits << 12); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rc = 0; - - nix_hw->tx_credits[link] = tx_credits; - -exit: - /* Enable traffic back */ - if (hw->cap.nix_shaping && !sw_xoff) - rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); - - /* Restore state of cgx tx */ - if (restore_tx_en) - rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); - - mutex_unlock(&rvu->rsrc_lock); - return rc; -} - int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int pf = rvu_get_pf(pcifunc); - int blkaddr, schq, link = -1; - struct nix_txsch *txsch; - u64 cfg, lmac_fifo_len; + int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; + u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -4246,25 +4175,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) return NIX_AF_ERR_FRS_INVALID; - /* Check if requester wants to update SMQ's */ - if (!req->update_smq) - goto rx_frscfg; - - /* Update min/maxlen in each of the SMQ attached to this PF/VF */ - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; - mutex_lock(&rvu->rsrc_lock); - for (schq = 0; schq < txsch->schq.max; schq++) { - if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) - continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); - if (req->update_minlen) - cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - } - mutex_unlock(&rvu->rsrc_lock); - -rx_frscfg: /* Check if config is for SDP link */ if (req->sdp_link) { if (!hw->sdp_links) @@ -4287,7 +4197,6 @@ rx_frscfg: if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - linkcfg: nix_find_link_frs(rvu, req, pcifunc); @@ -4297,19 +4206,7 @@ linkcfg: cfg = (cfg & ~0xFFFFULL) | req->minlen; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); - if (req->sdp_link || pf == 0) - return 0; - - /* Update transmit credits for CGX links */ - lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); - if (!lmac_fifo_len) { - dev_err(rvu->dev, - "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", - __func__, cgx, lmac); - return 0; - } - return nix_config_link_credits(rvu, blkaddr, link, pcifunc, - (lmac_fifo_len - req->maxlen) / 16); + return 0; } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -4389,6 +4286,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); } + /* Get MCS external bypass status for CN10K-B */ + if (mcs_get_blkcnt() == 1) { + /* Adjust for 2 credits when external bypass is disabled */ + nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; + } + /* Set credits for Tx links assuming max packet length allowed. * This will be reconfigured based on MTU set for PF/VF. */ @@ -4412,6 +4315,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); link = iter + slink; nix_hw->tx_credits[link] = tx_credits; @@ -4833,7 +4737,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); - return rvu_cgx_start_stop_io(rvu, pcifunc, false); + err = rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (err) + return err; + + rvu_cgx_tx_enable(rvu, pcifunc, true); + + return 0; } #define RX_SA_BASE GENMASK_ULL(52, 7) @@ -5505,6 +5415,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, ipolicer = &nix_hw->ipolicer[layer]; for (idx = 0; idx < req->prof_count[layer]; idx++) { + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; prof_idx = req->prof_idx[layer][idx]; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) @@ -5518,8 +5430,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, ipolicer->pfvf_map[prof_idx] = 0x00; ipolicer->match_id[prof_idx] = 0; rvu_free_rsrc(&ipolicer->band_prof, prof_idx); - if (idx == MAX_BANDPROF_PER_PFFUNC) - break; } } mutex_unlock(&rvu->rsrc_lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 16cfc802e348..0bcf3e559280 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, int bank, nixlf, index; /* get ucast entry rule entry index */ - nix_get_nixlf(rvu, pf_func, &nixlf, NULL); + if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) { + dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n", + __func__, pf_func); + /* Action 0 is drop */ + return 0; + } + index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, NIXLF_UCAST_ENTRY); bank = npc_get_bank(mcam, index); @@ -665,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int blkaddr, ucast_idx, index; struct nix_rx_action action = { 0 }; u64 relaxed_mask; + u8 flow_key_alg; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return; @@ -695,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.op = NIX_RX_ACTIONOP_UCAST; } + flow_key_alg = action.flow_key_alg; + /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { @@ -734,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, req.vf = pcifunc; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -848,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u8 mac_addr[ETH_ALEN] = { 0 }; struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; + u8 flow_key_alg; u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ @@ -882,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); + flow_key_alg = action.flow_key_alg; if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; @@ -918,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, req.vf = pcifunc | vf_func; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -984,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, mutex_unlock(&mcam->lock); } +static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action, + struct rvu_pfvf *pfvf, int mcam_index, int blkaddr, + int alg_idx) + +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + int bank, op_rss; + + if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index)) + return; + + op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list); + + bank = npc_get_bank(mcam, mcam_index); + mcam_index &= (mcam->banksize - 1); + + /* If Rx action is MCAST update only RSS algorithm index */ + if (!op_rss) { + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank)); + + action.flow_key_alg = alg_idx; + } + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action); +} + void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; - struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf; @@ -1044,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, /* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well */ - if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && - is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); - rvu_write64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank), - *(u64 *)&action); - } + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_ALLMULTI_ENTRY); + /* If PF's allmulti entry is enabled, + * Set RSS action for that entry as well + */ + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); } void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index b3150f053291..d46ac29adb96 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, - {0x1610, 0x1618}, {0x1700, 0x17B0} } }, - {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, + {0x1610, 0x1618}, {0x1700, 0x17C8} } }, + {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index b42e631e52d0..18c1c9f361cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -437,6 +437,7 @@ #define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0) #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) +#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32) /* SSO */ #define SSO_AF_CONST (0x1000) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index a4a258da8dd5..c1c99d7054f8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, aq->prof.pebs_mantissa = 0; aq->prof_mask.pebs_mantissa = 0xFF; + aq->prof.hl_en = 0; + aq->prof_mask.hl_en = 1; + /* Fill AQ info */ aq->qidx = profile; aq->ctype = NIX_AQ_CTYPE_BANDPROF; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index e7c69b57147e..06910307085e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -1070,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); + /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index bfddbff7bcdf..28fb643d2917 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct otx2_nic *pfvf = netdev_priv(dev); + u8 old_pfc_en; int err; - /* Save PFC configuration to interface */ + old_pfc_en = pfvf->pfc_en; pfvf->pfc_en = pfc->pfc_en; if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) @@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) * supported by the tx queue configuration */ err = otx2_check_pfc_config(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } process_pfc: err = otx2_config_priority_flow_ctrl(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } /* Request Per channel Bpids */ if (pfc->pfc_en) @@ -425,6 +430,12 @@ process_pfc: err = otx2_pfc_txschq_update(pfvf); if (err) { + if (pfc->pfc_en) + otx2_nix_config_bp(pfvf, false); + + otx2_pfc_txschq_stop(pfvf); + pfvf->pfc_en = old_pfc_en; + otx2_config_priority_flow_ctrl(pfvf); dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 9efcec549834..53f6258a973c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -334,9 +334,12 @@ static void otx2_get_pauseparam(struct net_device *netdev, if (is_otx2_lbkvf(pfvf->pdev)) return; + mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); - if (!req) + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return; + } if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) @@ -344,6 +347,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } + mutex_unlock(&pfvf->mbox.lock); } static int otx2_set_pauseparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 4762dbea64a1..97a71e9b8563 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) struct ethhdr *eth_hdr; bool new = false; int err = 0; + u64 vf_num; u32 ring; if (!flow_cfg->max_flows) { @@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; - if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) + /* Number of queues on a VF can be greater or less than + * the PF's queue. Hence no need to check for the + * queue count. Hence no need to check queue count if PF + * is installing for its VF. Below is the expected vf_num value + * based on the ethtool commands. + * + * e.g. + * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255 + * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0 + * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==> + * vf_num:vf_idx+1 + */ + vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!is_otx2_vf(pfvf->pcifunc) && !vf_num && + ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; if (fsp->location >= otx2_get_maxflows(flow_cfg)) @@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow_cfg->nr_flows++; } + if (flow->is_vf) + netdev_info(pfvf->netdev, + "Make sure that VF's queue number is within its queue limit\n"); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 91b99fd70361..a57455aebff6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, TYPE_PFVF); - vfs -= 64; + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + vfs = 64; } intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); @@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); - trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); return IRQ_HANDLED; } @@ -1647,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) +{ + int vf; + + /* The AF driver will determine whether to allow the VF netdev or not */ + if (is_otx2_vf(pfvf->pcifunc)) + return true; + + /* check if there are any trusted VFs associated with the PF netdev */ + for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) + if (pfvf->vf_configs[vf].trusted) + return true; + return false; +} + static void otx2_do_set_rx_mode(struct otx2_nic *pf) { struct net_device *netdev = pf->netdev; @@ -1679,12 +1697,21 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; + if (otx2_promisc_use_mce_list(pf)) + req->mode |= NIX_RX_MODE_USE_MCE; otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); } +static void otx2_set_irq_coalesce(struct otx2_nic *pfvf) +{ + int cint; + + for (cint = 0; cint < pfvf->hw.cint_cnt; cint++) + otx2_config_irq_coalescing(pfvf, cint); +} + static void otx2_dim_work(struct work_struct *w) { struct dim_cq_moder cur_moder; @@ -1700,6 +1727,7 @@ static void otx2_dim_work(struct work_struct *w) CQ_TIMER_THRESH_MAX : cur_moder.usec; pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? NAPI_POLL_WEIGHT : cur_moder.pkts; + otx2_set_irq_coalesce(pfvf); dim->state = DIM_START_MEASURE; } @@ -1870,6 +1898,8 @@ int otx2_open(struct net_device *netdev) if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) otx2_dmacflt_reinstall_flows(pf); + otx2_tc_apply_ingress_police_rules(pf); + err = otx2_rxtx_enable(pf, true); /* If a mbox communication error happens at this point then interface * will end up in a state such that it is in down state but hardware @@ -1934,6 +1964,8 @@ int otx2_stop(struct net_device *netdev) /* Clear RSS enable flag */ rss = &pf->hw.rss_info; rss->enable = false; + if (!netif_is_rxfh_configured(netdev)) + kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, @@ -2675,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, pf->vf_configs[vf].trusted = enable; rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); - if (rc) + if (rc) { pf->vf_configs[vf].trusted = !enable; - else + } else { netdev_info(pf->netdev, "VF %d is %strusted\n", vf, enable ? "" : "not "); + otx2_set_rx_mode(netdev); + } + return rc; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 8a5e3987a482..db1e0e0e812d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -47,6 +47,9 @@ struct otx2_tc_flow { bool is_act_police; u32 prio; struct npc_install_flow_req req; + u64 rate; + u32 burst; + bool is_pps; }; static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, @@ -284,21 +287,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, return err; } -static int otx2_tc_act_set_police(struct otx2_nic *nic, - struct otx2_tc_flow *node, - struct flow_cls_offload *f, - u64 rate, u32 burst, u32 mark, - struct npc_install_flow_req *req, bool pps) +static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, + struct otx2_tc_flow *node) { - struct netlink_ext_ack *extack = f->common.extack; - struct otx2_hw *hw = &nic->hw; - int rq_idx, rc; - - rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); - if (rq_idx >= hw->rx_queues) { - NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); - return -EINVAL; - } + int rc; mutex_lock(&nic->mbox.lock); @@ -308,23 +300,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic, return rc; } - rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); + rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, + node->burst, node->rate, node->is_pps); if (rc) goto free_leaf; - rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); + rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); if (rc) goto free_leaf; mutex_unlock(&nic->mbox.lock); - req->match_id = mark & 0xFFFFULL; - req->index = rq_idx; - req->op = NIX_RX_ACTIONOP_UCAST; - set_bit(rq_idx, &nic->rq_bmap); - node->is_act_police = true; - node->rq = rq_idx; - return 0; free_leaf: @@ -336,6 +322,39 @@ free_leaf: return rc; } +static int otx2_tc_act_set_police(struct otx2_nic *nic, + struct otx2_tc_flow *node, + struct flow_cls_offload *f, + u64 rate, u32 burst, u32 mark, + struct npc_install_flow_req *req, bool pps) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct otx2_hw *hw = &nic->hw; + int rq_idx, rc; + + rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); + if (rq_idx >= hw->rx_queues) { + NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); + return -EINVAL; + } + + req->match_id = mark & 0xFFFFULL; + req->index = rq_idx; + req->op = NIX_RX_ACTIONOP_UCAST; + + node->is_act_police = true; + node->rq = rq_idx; + node->burst = burst; + node->rate = rate; + node->is_pps = pps; + + rc = otx2_tc_act_set_hw_police(nic, node); + if (!rc) + set_bit(rq_idx, &nic->rq_bmap); + + return rc; +} + static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action *flow_action, struct npc_install_flow_req *req, @@ -1044,6 +1063,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, } if (flow_node->is_act_police) { + __clear_bit(flow_node->rq, &nic->rq_bmap); + + if (nic->flags & OTX2_FLAG_INTF_DOWN) + goto free_mcam_flow; + mutex_lock(&nic->mbox.lock); err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, @@ -1059,11 +1083,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, "Unable to free leaf bandwidth profile(%d)\n", flow_node->leaf_profile); - __clear_bit(flow_node->rq, &nic->rq_bmap); - mutex_unlock(&nic->mbox.lock); } +free_mcam_flow: otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); kfree_rcu(flow_node, rcu); @@ -1083,6 +1106,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + if (flow_cfg->nr_flows == flow_cfg->max_flows) { NL_SET_ERR_MSG_MOD(extack, "Free MCAM entry not available to add the flow"); @@ -1442,3 +1470,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic) otx2_destroy_tc_flow_list(nic); } EXPORT_SYMBOL(otx2_shutdown_tc); + +static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, + struct otx2_tc_flow *node) +{ + struct npc_install_flow_req *req; + + if (otx2_tc_act_set_hw_police(nic, node)) + return; + + mutex_lock(&nic->mbox.lock); + + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!req) + goto err; + + memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); + + if (otx2_sync_mbox_msg(&nic->mbox)) + netdev_err(nic->netdev, + "Failed to install MCAM flow entry for ingress rule"); +err: + mutex_unlock(&nic->mbox.lock); +} + +void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) +{ + struct otx2_flow_config *flow_cfg = nic->flow_cfg; + struct otx2_tc_flow *node; + + /* If any ingress policer rules exist for the interface then + * apply those rules. Ingress policer rules depend on bandwidth + * profiles linked to the receive queues. Since no receive queues + * exist when interface is down, ingress policer rules are stored + * and configured in hardware after all receive queues are allocated + * in otx2_open. + */ + list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { + if (node->is_act_police) + otx2_tc_config_ingress_rule(nic, node); + } +} +EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 6ee15f3c25ed..4d519ea833b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -512,11 +512,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p { struct dim_sample dim_sample; u64 rx_frames, rx_bytes; + u64 tx_frames, tx_bytes; rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + OTX2_GET_RX_STATS(RX_UCAST); rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); - dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); + tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); + tx_frames = OTX2_GET_TX_STATS(TX_UCAST); + + dim_update_sample(pfvf->napi_events, + rx_frames + tx_frames, + rx_bytes + tx_bytes, + &dim_sample); net_dim(&cq_poll->dim, dim_sample); } @@ -558,16 +565,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; - /* Check for adaptive interrupt coalesce */ - if (workdone != 0 && - ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == - OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { - /* Adjust irq coalese using net_dim */ + /* Adjust irq coalese using net_dim */ + if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) otx2_adjust_adaptive_coalese(pfvf, cq_poll); - /* Update irq coalescing */ - for (i = 0; i < pfvf->hw.cint_cnt; i++) - otx2_config_irq_coalescing(pfvf, i); - } if (unlikely(!filled_cnt)) { struct refill_work *work; diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c index 3bd51a3d6650..ae44ad5f8ce8 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c @@ -291,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) for (i = 0; i < q->n_desc; i++) { struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; + if (!entry->buf) + continue; + dma_unmap_single(wo->hw->dev, entry->addr, entry->len, DMA_TO_DEVICE); skb_free_frag(entry->buf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f8f0a712c943..a7b1f9686c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd) return token; } -static int cmd_alloc_index(struct mlx5_cmd *cmd) +static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) { unsigned long flags; int ret; spin_lock_irqsave(&cmd->alloc_lock, flags); ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); - if (ret < cmd->vars.max_reg_cmds) + if (ret < cmd->vars.max_reg_cmds) { clear_bit(ret, &cmd->vars.bitmask); + ent->idx = ret; + cmd->ent_arr[ent->idx] = ent; + } spin_unlock_irqrestore(&cmd->alloc_lock, flags); return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; @@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work) sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index(cmd); + alloc_ret = cmd_alloc_index(cmd, ent); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work) up(sem); return; } - ent->idx = alloc_ret; } else { ent->idx = cmd->vars.max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); clear_bit(ent->idx, &cmd->vars.bitmask); + cmd->ent_arr[ent->idx] = ent; spin_unlock_irqrestore(&cmd->alloc_lock, flags); } - cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 76d27d2ee40c..080e7eab52c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) while (block_timestamp > tracer->last_timestamp) { /* Check block override if it's not the first block */ - if (!tracer->last_timestamp) { + if (tracer->last_timestamp) { u64 *ts_event; /* To avoid block override be the HW in case of buffer * wraparound, the time stamp of the previous block diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b2a5da9739d2..729a11b5fb25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -826,6 +826,7 @@ enum { MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, MLX5E_STATE_XDP_ACTIVE, + MLX5E_STATE_CHANNELS_ACTIVE, }; struct mlx5e_modify_sq_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index be83ad9db82a..e1283531e0b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty in = kvzalloc(inlen, GFP_KERNEL); if (!in || !ft->g) { kfree(ft->g); + ft->g = NULL; kvfree(in); return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index bb11e644d24f..af3928eddafd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, struct mlx5_cqe64 *cqe, + u8 *md_buff, + u8 *md_buff_sz, int budget) { struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list; @@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); out: napi_consume_skb(skb, budget); - mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id); + md_buff[*md_buff_sz++] = metadata_id; if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work); } -static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) +static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); - struct mlx5_cqwq *cqwq = &cq->wq; + int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET); + u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET]; + u8 metadata_buff_sz = 0; + struct mlx5_cqwq *cqwq; struct mlx5_cqe64 *cqe; int work_done = 0; + cqwq = &cq->wq; + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))) return false; @@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) do { mlx5_cqwq_pop(cqwq); - mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget); + mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, + metadata_buff, &metadata_buff_sz, napi_budget); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); mlx5_cqwq_update_db_record(cqwq); @@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) /* ensure cq space is freed before enabling more cqes */ wmb(); + while (metadata_buff_sz > 0) + mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, + metadata_buff[--metadata_buff_sz]); + mlx5e_txqsq_wake(&ptpsq->txqsq); return work_done == budget; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index fea8c0a5fe89..4358798d6ce1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) { - char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {}; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; struct mlx5e_icosq *icosq = rq->icosq; struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; + char icosq_str[32] = {}; err_ctx.ctx = rq; err_ctx.recover = mlx5e_rx_reporter_timeout_recover; @@ -505,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) if (icosq) snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn); snprintf(err_str, sizeof(err_str), - "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x", + "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x", rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index f63402c48028..1b418095b79a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, } esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; esw_attr->out_count++; - /* attr->dests[].rep is resolved when we handle encap */ + /* attr->dests[].vport is resolved when we handle encap */ return 0; } @@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; + esw_attr->dests[esw_attr->out_count].vport_valid = true; + esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport; esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; esw_attr->out_count++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 4e923a2874ae..86bf007fd05b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act, struct mlx5_flow_spec *spec; int err; + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po struct mlx5e_post_act_handle *handle; int err; + if (IS_ERR(post_act)) + return ERR_CAST(post_act); + handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 00a04fdd756f..8dfb57f712b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -302,6 +302,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -313,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -407,6 +408,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -418,8 +420,8 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -570,6 +572,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -581,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -674,6 +677,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -685,8 +689,8 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index b10e40e1a9c1..f1d1e1542e81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - esw_attr->dests[out_index].rep = rpriv->rep; + esw_attr->dests[out_index].vport_valid = true; + esw_attr->dests[out_index].vport = rpriv->rep->vport; esw_attr->dests[out_index].mdev = out_priv->mdev; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 7decc81ed33a..13c7ed1bb37e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -493,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, dma_addr_t dma_addr = xdptxd->dma_addr; u32 dma_len = xdptxd->len; u16 ds_cnt, inline_hdr_sz; + unsigned int frags_size; u8 num_wqebbs = 1; int num_frags = 0; bool inline_ok; @@ -503,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || dma_len >= MLX5E_XDP_MIN_INLINE; + frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0; - if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) { + if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) { stats->err++; return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 655496598c68..161c5190c236 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO) esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom)); - sa_entry->esn_state.esn = esn; + if (sa_entry->esn_state.esn_msb) + sa_entry->esn_state.esn = esn; + else + /* According to RFC4303, section "3.3.3. Sequence Number Generation", + * the first packet sent using a given SA will contain a sequence + * number of 1. + */ + sa_entry->esn_state.esn = max_t(u32, esn, 1); sa_entry->esn_state.esn_msb = esn_msb; if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { @@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->replay_esn.esn = sa_entry->esn_state.esn; attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; attrs->replay_esn.overlap = sa_entry->esn_state.overlap; + switch (x->replay_esn->replay_window) { + case 32: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_32BIT; + break; + case 64: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_64BIT; + break; + case 128: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_128BIT; + break; + case 256: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_256BIT; + break; + default: + WARN_ON(true); + return; + } } attrs->dir = x->xso.dir; @@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) return; mlx5e_accel_ipsec_fs_cleanup(ipsec); - if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) + if (ipsec->netevent_nb.notifier_call) { unregister_netevent_notifier(&ipsec->netevent_nb); - if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + ipsec->netevent_nb.notifier_call = NULL; + } + if (ipsec->aso) mlx5e_ipsec_aso_cleanup(ipsec); destroy_workqueue(ipsec->wq); kfree(ipsec); @@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, } } + if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET && + !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { + NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported"); + return -EINVAL; + } + return 0; } @@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_free = mlx5e_xfrm_free_state, .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, -}; - -static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { - .xdo_dev_state_add = mlx5e_xfrm_add_state, - .xdo_dev_state_delete = mlx5e_xfrm_del_state, - .xdo_dev_state_free = mlx5e_xfrm_free_state, - .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, - .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, .xdo_dev_policy_add = mlx5e_xfrm_add_policy, @@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) - netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops; - else - netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; - + netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; netdev->features |= NETIF_F_HW_ESP; netdev->hw_enc_features |= NETIF_F_HW_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 8f4a37bceaf4..adaea3493193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft { u32 refcnt; }; +struct mlx5e_ipsec_drop { + struct mlx5_flow_handle *rule; + struct mlx5_fc *fc; +}; + struct mlx5e_ipsec_rule { struct mlx5_flow_handle *rule; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_fc *fc; + struct mlx5e_ipsec_drop replay; + struct mlx5e_ipsec_drop auth; + struct mlx5e_ipsec_drop trailer; }; struct mlx5e_ipsec_miss { @@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss { struct mlx5_flow_handle *rule; }; -struct mlx5e_ipsec_rx { - struct mlx5e_ipsec_ft ft; - struct mlx5e_ipsec_miss pol; - struct mlx5e_ipsec_miss sa; - struct mlx5e_ipsec_rule status; - struct mlx5e_ipsec_miss status_drop; - struct mlx5_fc *status_drop_cnt; - struct mlx5e_ipsec_fc *fc; - struct mlx5_fs_chains *chains; - u8 allow_tunnel_mode : 1; - struct xarray ipsec_obj_id_map; -}; - struct mlx5e_ipsec_tx_create_attr { int prio; int pol_level; @@ -248,6 +243,7 @@ struct mlx5e_ipsec { struct mlx5_ipsec_fs *roce; u8 is_uplink_rep: 1; struct mlx5e_ipsec_mpv_work mpv_work; + struct xarray ipsec_obj_id_map; }; struct mlx5e_ipsec_esn_state { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index f41c976dc33f..41a2543a52cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx { u8 allow_tunnel_mode : 1; }; +struct mlx5e_ipsec_status_checks { + struct mlx5_flow_group *drop_all_group; + struct mlx5e_ipsec_drop all; +}; + +struct mlx5e_ipsec_rx { + struct mlx5e_ipsec_ft ft; + struct mlx5e_ipsec_miss pol; + struct mlx5e_ipsec_miss sa; + struct mlx5e_ipsec_rule status; + struct mlx5e_ipsec_status_checks status_drops; + struct mlx5e_ipsec_fc *fc; + struct mlx5_fs_chains *chains; + u8 allow_tunnel_mode : 1; +}; + /* IPsec RX flow steering */ static enum mlx5_traffic_types family2tt(u32 family) { @@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); } -static int ipsec_status_rule(struct mlx5_core_dev *mdev, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) +static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + mlx5_del_flow_rules(rx->status_drops.all.rule); + mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc); + mlx5_destroy_flow_group(rx->status_drops.drop_all_group); +} + +static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) { - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + mlx5_del_flow_rules(rx->status.rule); + + if (rx != ipsec->rx_esw) + return; + +#ifdef CONFIG_MLX5_ESWITCH + mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); +#endif +} + +static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {}; - struct mlx5_modify_hdr *modify_hdr; - struct mlx5_flow_handle *fte; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; struct mlx5_flow_spec *spec; int err; @@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev, if (!spec) return -ENOMEM; - /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */ - MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); - MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); - MLX5_SET(copy_action_in, action, src_offset, 0); - MLX5_SET(copy_action_in, action, length, 7); - MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - MLX5_SET(copy_action_in, action, dst_offset, 24); + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + sa_entry->ipsec_rule.auth.fc = flow_counter; - modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, - 1, action); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; - if (IS_ERR(modify_hdr)) { - err = PTR_ERR(modify_hdr); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); mlx5_core_err(mdev, - "fail to alloc ipsec copy modify_header_id err=%d\n", err); - goto out_spec; + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; } + sa_entry->ipsec_rule.auth.rule = rule; - /* create fte */ - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt_2; + } + sa_entry->ipsec_rule.trailer.fc = flow_counter; + + dest.counter_id = mlx5_fc_id(flow_counter); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule_2; + } + sa_entry->ipsec_rule.trailer.rule = rule; + + kvfree(spec); + return 0; + +err_rule_2: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc); +err_cnt_2: + mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule); +err_rule: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc); +err_cnt: + kvfree(spec); + return err; +} + +static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + + sa_entry->ipsec_rule.replay.rule = rule; + sa_entry->ipsec_rule.replay.fc = flow_counter; + + kvfree(spec); + return 0; + +err_rule: + mlx5_fc_destroy(mdev, flow_counter); +err_cnt: + kvfree(spec); + return err; +} + +static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + struct mlx5_flow_group *g; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!flow_group_in || !spec) { + err = -ENOMEM; + goto err_out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); + g = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop flow group, err=%d\n", err); + goto err_out; + } + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + + rx->status_drops.drop_all_group = g; + rx->status_drops.all.rule = rule; + rx->status_drops.all.fc = flow_counter; + + kvfree(flow_group_in); + kvfree(spec); + return 0; + +err_rule: + mlx5_fc_destroy(mdev, flow_counter); +err_cnt: + mlx5_destroy_flow_group(g); +err_out: + kvfree(flow_group_in); + kvfree(spec); + return err; +} + +static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.ipsec_syndrome); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.ipsec_syndrome, 0); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_4, 0); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - flow_act.modify_hdr = modify_hdr; - fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); - if (IS_ERR(fte)) { - err = PTR_ERR(fte); - mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err); - goto out; + rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(ipsec->mdev, + "Failed to add ipsec rx status pass rule, err=%d\n", err); + goto err_rule; } + rx->status.rule = rule; kvfree(spec); - rx->status.rule = fte; - rx->status.modify_hdr = modify_hdr; return 0; -out: - mlx5_modify_header_dealloc(mdev, modify_hdr); -out_spec: +err_rule: kvfree(spec); return err; } +static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + ipsec_rx_status_pass_destroy(ipsec, rx); + ipsec_rx_status_drop_destroy(ipsec, rx); +} + +static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) +{ + int err; + + err = ipsec_rx_status_drop_all_create(ipsec, rx); + if (err) + return err; + + err = ipsec_rx_status_pass_create(ipsec, rx, dest); + if (err) + goto err_pass_create; + + return 0; + +err_pass_create: + ipsec_rx_status_drop_destroy(ipsec, rx); + return err; +} + static int ipsec_miss_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *ft, struct mlx5e_ipsec_miss *miss, @@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, mlx5_destroy_flow_table(rx->ft.sa); if (rx->allow_tunnel_mode) mlx5_eswitch_unblock_encap(mdev); - if (rx == ipsec->rx_esw) { - mlx5_esw_ipsec_rx_status_destroy(ipsec, rx); - } else { - mlx5_del_flow_rules(rx->status.rule); - mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); - } + mlx5_ipsec_rx_status_destroy(ipsec, rx); mlx5_destroy_flow_table(rx->ft.status); mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); @@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (err) return err; - ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0); + ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft_status; @@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); - if (rx == ipsec->rx_esw) - err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest); - else - err = ipsec_status_rule(mdev, rx, dest); + err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); if (err) goto err_add; @@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec) MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); } -static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi) +static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap) { /* SPI number */ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi); + if (encap) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters.inner_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters.inner_esp_spi, spi); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters.outer_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters.outer_esp_spi, spi); + } } static void setup_fte_no_frags(struct mlx5_flow_spec *spec) @@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 struct mlx5_flow_act *flow_act) { enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir); - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_modify_hdr *modify_hdr; + u8 num_of_actions = 1; - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET); switch (dir) { case XFRM_DEV_OFFLOAD_IN: - MLX5_SET(set_action_in, action, field, + MLX5_SET(set_action_in, action[0], field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + + num_of_actions++; + MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2); + MLX5_SET(set_action_in, action[1], data, val); + MLX5_SET(set_action_in, action[1], offset, 0); + MLX5_SET(set_action_in, action[1], length, 32); + + if (type == XFRM_DEV_OFFLOAD_CRYPTO) { + num_of_actions++; + MLX5_SET(set_action_in, action[2], action_type, + MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[2], field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); + MLX5_SET(set_action_in, action[2], data, 0); + MLX5_SET(set_action_in, action[2], offset, 0); + MLX5_SET(set_action_in, action[2], length, 32); + } break; case XFRM_DEV_OFFLOAD_OUT: - MLX5_SET(set_action_in, action, field, + MLX5_SET(set_action_in, action[0], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); break; default: return -EINVAL; } - MLX5_SET(set_action_in, action, data, val); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); + MLX5_SET(set_action_in, action[0], data, val); + MLX5_SET(set_action_in, action[0], offset, 0); + MLX5_SET(set_action_in, action[0], length, 32); - modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); + modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action); if (IS_ERR(modify_hdr)) { mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", PTR_ERR(modify_hdr)); @@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) else setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); - setup_fte_spi(spec, attrs->spi); - setup_fte_esp(spec); + setup_fte_spi(spec, attrs->spi, attrs->encap); + if (!attrs->encap) + setup_fte_esp(spec); setup_fte_no_frags(spec); setup_fte_upper_proto_match(spec, &attrs->upspec); @@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); goto err_add_flow; } + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) + err = rx_add_rule_drop_replay(sa_entry, rx); + if (err) + goto err_add_replay; + + err = rx_add_rule_drop_auth_trailer(sa_entry, rx); + if (err) + goto err_drop_reason; + kvfree(spec); sa_entry->ipsec_rule.rule = rule; @@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat; return 0; +err_drop_reason: + if (sa_entry->ipsec_rule.replay.rule) { + mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule); + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc); + } +err_add_replay: + mlx5_del_flow_rules(rule); err_add_flow: mlx5_fc_destroy(mdev, counter); err_add_cnt: @@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) switch (attrs->type) { case XFRM_DEV_OFFLOAD_CRYPTO: - setup_fte_spi(spec, attrs->spi); + setup_fte_spi(spec, attrs->spi, false); setup_fte_esp(spec); setup_fte_reg_a(spec); break; @@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev) struct mlx5_eswitch *esw = mdev->priv.eswitch; int err = 0; - if (esw) - down_write(&esw->mode_lock); + if (esw) { + err = mlx5_esw_lock(esw); + if (err) + return err; + } if (mdev->num_block_ipsec) { err = -EBUSY; @@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev) unlock: if (esw) - up_write(&esw->mode_lock); + mlx5_esw_unlock(esw); return err; } @@ -1838,7 +2142,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev) static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev) { - mdev->num_block_tc++; + mdev->num_block_tc--; } int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) if (ipsec_rule->modify_hdr) mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + + mlx5_del_flow_rules(ipsec_rule->trailer.rule); + mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc); + + mlx5_del_flow_rules(ipsec_rule->auth.rule); + mlx5_fc_destroy(mdev, ipsec_rule->auth.fc); + + if (ipsec_rule->replay.rule) { + mlx5_del_flow_rules(ipsec_rule->replay.rule); + mlx5_fc_destroy(mdev, ipsec_rule->replay.fc); + } mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); } @@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) kfree(ipsec->rx_ipv6); if (ipsec->is_uplink_rep) { - xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map); + xa_destroy(&ipsec->ipsec_obj_id_map); mutex_destroy(&ipsec->tx_esw->ft.mutex); WARN_ON(ipsec->tx_esw->ft.refcnt); @@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, mutex_init(&ipsec->tx_esw->ft.mutex); mutex_init(&ipsec->rx_esw->ft.mutex); ipsec->tx_esw->ns = ns_esw; - xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); + xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1); } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom); } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index a91f772dc981..6e00afe4671b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -6,6 +6,8 @@ #include "ipsec.h" #include "lib/crypto.h" #include "lib/ipsec_fs_roce.h" +#include "fs_core.h" +#include "eswitch.h" enum { MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET, @@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) caps |= MLX5_IPSEC_CAP_CRYPTO; - if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) { + if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) && + (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS || + (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS && + is_mdev_legacy_mode(mdev)))) { if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) && MLX5_CAP_FLOWTABLE_NIC_RX(mdev, @@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, if (attrs->dir == XFRM_DEV_OFFLOAD_IN) { MLX5_SET(ipsec_aso, aso_ctx, window_sz, - attrs->replay_esn.replay_window / 64); + attrs->replay_esn.replay_window); MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_REPLAY_PROTECTION); } @@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec) dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL); kfree(aso); + ipsec->aso = NULL; } static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 215261a69255..c7c1b667b105 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) { struct mlx5_core_dev *mdev = priv->mdev; + int count; strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%04d (%.16s)", - fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), - mdev->board_id); + count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); + if (count >= sizeof(drvinfo->fw_version)) + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev)); + strscpy(drvinfo->bus_info, dev_name(mdev->device), sizeof(drvinfo->bus_info)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ea58c6917433..0c87ddb8a7a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; + ASSERT_RTNL(); if (chs->ptp) { mlx5e_ptp_close(chs->ptp); chs->ptp = NULL; @@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) if (mlx5e_is_vport_rep(priv)) mlx5e_rep_activate_channels(priv); + set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_wait_channels_min_rx_wqes(&priv->channels); if (priv->rx_res) mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels); } +static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv) +{ + WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)); + if (current_work() != &priv->tx_timeout_work) + cancel_work_sync(&priv->tx_timeout_work); +} + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { if (priv->rx_res) mlx5e_rx_res_channels_deactivate(priv->rx_res); + clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_cancel_tx_timeout_work(priv); + if (mlx5e_is_vport_rep(priv)) mlx5e_rep_deactivate_channels(priv); @@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) struct net_device *netdev = priv->netdev; int i; - rtnl_lock(); - mutex_lock(&priv->state_lock); + /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues + * through this flow. However, channel closing flows have to wait for + * this work to finish while holding rtnl lock too. So either get the + * lock or find that channels are being closed for other reason and + * this work is not relevant anymore. + */ + while (!rtnl_trylock()) { + if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)) + return; + msleep(20); + } if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; @@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) } unlock: - mutex_unlock(&priv->state_lock); rtnl_unlock(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 693e55b010d9..e92d4f83592e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + int count; strscpy(drvinfo->driver, mlx5e_rep_driver_name, sizeof(drvinfo->driver)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%04d (%.16s)", - fw_rev_maj(mdev), fw_rev_min(mdev), - fw_rev_sub(mdev), mdev->board_id); + count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); + if (count >= sizeof(drvinfo->fw_version)) + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev)); } static const struct counter_desc sw_rep_stats_desc[] = { @@ -1493,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); - if (dl_port) { + if (!IS_ERR(dl_port)) { SET_NETDEV_DEVLINK_PORT(netdev, dl_port); mlx5e_rep_vnic_reporter_create(priv, dl_port); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9a5a5c2c7da9..96af9e2ab1d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, struct mlx5e_flow_meter_handle *meter; enum mlx5e_post_meter_type type; + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); if (IS_ERR(meter)) { mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); @@ -3147,7 +3150,7 @@ static struct mlx5_fields fields[] = { OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), - OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp), + OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp), OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), @@ -3158,21 +3161,31 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), }; -static unsigned long mask_to_le(unsigned long mask, int size) +static u32 mask_field_get(void *mask, struct mlx5_fields *f) { - __be32 mask_be32; - __be16 mask_be16; - - if (size == 32) { - mask_be32 = (__force __be32)(mask); - mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); - } else if (size == 16) { - mask_be32 = (__force __be32)(mask); - mask_be16 = *(__be16 *)&mask_be32; - mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + switch (f->field_bsize) { + case 32: + return be32_to_cpu(*(__be32 *)mask) & f->field_mask; + case 16: + return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask; + default: + return *(u8 *)mask & (u8)f->field_mask; } +} - return mask; +static void mask_field_clear(void *mask, struct mlx5_fields *f) +{ + switch (f->field_bsize) { + case 32: + *(__be32 *)mask &= ~cpu_to_be32(f->field_mask); + break; + case 16: + *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask); + break; + default: + *(u8 *)mask &= ~(u8)f->field_mask; + break; + } } static int offload_pedit_fields(struct mlx5e_priv *priv, @@ -3184,11 +3197,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; struct pedit_headers_action *hdrs = parse_attr->hdrs; void *headers_c, *headers_v, *action, *vals_p; - u32 *s_masks_p, *a_masks_p, s_mask, a_mask; struct mlx5e_tc_mod_hdr_acts *mod_acts; - unsigned long mask, field_mask; + void *s_masks_p, *a_masks_p; int i, first, last, next_z; struct mlx5_fields *f; + unsigned long mask; + u32 s_mask, a_mask; u8 cmd; mod_acts = &parse_attr->mod_hdr_acts; @@ -3204,15 +3218,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, bool skip; f = &fields[i]; - /* avoid seeing bits set from previous iterations */ - s_mask = 0; - a_mask = 0; - s_masks_p = (void *)set_masks + f->offset; a_masks_p = (void *)add_masks + f->offset; - s_mask = *s_masks_p & f->field_mask; - a_mask = *a_masks_p & f->field_mask; + s_mask = mask_field_get(s_masks_p, f); + a_mask = mask_field_get(a_masks_p, f); if (!s_mask && !a_mask) /* nothing to offload here */ continue; @@ -3239,22 +3249,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, match_mask, f->field_bsize)) skip = true; /* clear to denote we consumed this field */ - *s_masks_p &= ~f->field_mask; + mask_field_clear(s_masks_p, f); } else { cmd = MLX5_ACTION_TYPE_ADD; mask = a_mask; vals_p = (void *)add_vals + f->offset; /* add 0 is no change */ - if ((*(u32 *)vals_p & f->field_mask) == 0) + if (!mask_field_get(vals_p, f)) skip = true; /* clear to denote we consumed this field */ - *a_masks_p &= ~f->field_mask; + mask_field_clear(a_masks_p, f); } if (skip) continue; - mask = mask_to_le(mask, f->field_bsize); - first = find_first_bit(&mask, f->field_bsize); next_z = find_next_zero_bit(&mask, f->field_bsize, first); last = find_last_bit(&mask, f->field_bsize); @@ -3281,10 +3289,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, MLX5_SET(set_action_in, action, field, f->field); if (cmd == MLX5_ACTION_TYPE_SET) { + unsigned long field_mask = f->field_mask; int start; - field_mask = mask_to_le(f->field_mask, f->field_bsize); - /* if field is bit sized it can start not from first bit */ start = find_first_bit(&field_mask, f->field_bsize); @@ -3735,6 +3742,20 @@ out_free: } static int +set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) +{ + struct mlx5e_post_act *post_act = get_post_action(priv); + + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act); + + return 0; +} + +static int alloc_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5e_tc_act_branch_ctrl *cond, struct mlx5_flow_attr **cond_attr, @@ -3757,8 +3778,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow, break; case FLOW_ACTION_ACCEPT: case FLOW_ACTION_PIPE: - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); + err = set_branch_dest_ft(flow->priv, attr); + if (err) + goto out_err; break; case FLOW_ACTION_JUMP: if (*jump_count) { @@ -3767,8 +3789,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow, goto out_err; } *jump_count = cond->extval; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); + err = set_branch_dest_ft(flow->priv, attr); + if (err) + goto out_err; break; default: err = -EOPNOTSUPP; @@ -5715,8 +5738,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a esw = priv->mdev->priv.eswitch; attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); - if (IS_ERR(attr->act_id_restore_rule)) + if (IS_ERR(attr->act_id_restore_rule)) { + err = PTR_ERR(attr->act_id_restore_rule); goto err_rule; + } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d41435c22ce5..f0b506e562df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata); mlx5e_skb_cb_hwtstamp_init(skb); - mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index); mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, metadata_index); + mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index); if (!netif_tx_queue_stopped(sq->txq) && mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) { netif_tx_stop_queue(sq->txq); @@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, err_drop: stats->dropped++; - dev_kfree_skb_any(skb); if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist, be32_to_cpu(eseg->flow_table_metadata)); + dev_kfree_skb_any(skb); mlx5e_tx_flush(sq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ea0405e0a43f..40a6cb052a2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_irq *irq; + int cpu; irq = xa_load(&table->comp_irqs, vecidx); if (!irq) return; + cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); + cpumask_clear_cpu(cpu, &table->used_cpus); xa_erase(&table->comp_irqs, vecidx); mlx5_irq_affinity_irq_release(dev, irq); } @@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + struct irq_affinity_desc af_desc = {}; struct mlx5_irq *irq; - irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx); - if (IS_ERR(irq)) { - /* In case SF irq pool does not exist, fallback to the PF irqs*/ - if (PTR_ERR(irq) == -ENOENT) - return comp_irq_request_pci(dev, vecidx); + /* In case SF irq pool does not exist, fallback to the PF irqs*/ + if (!mlx5_irq_pool_is_sf_pool(pool)) + return comp_irq_request_pci(dev, vecidx); + af_desc.is_managed = 1; + cpumask_copy(&af_desc.mask, cpu_online_mask); + cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); + irq = mlx5_irq_affinity_request(pool, &af_desc); + if (IS_ERR(irq)) return PTR_ERR(irq); - } + + cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq)); + mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", + pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), + cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), + mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index 095f31f380fa..190f10aba170 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -21,158 +21,6 @@ enum { MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL, }; -static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) -{ - mlx5_del_flow_rules(rx->status_drop.rule); - mlx5_destroy_flow_group(rx->status_drop.group); - mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt); -} - -static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) -{ - mlx5_del_flow_rules(rx->status.rule); - mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); -} - -static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table *ft = rx->ft.status; - struct mlx5_core_dev *mdev = ipsec->mdev; - struct mlx5_flow_destination dest = {}; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *rule; - struct mlx5_fc *flow_counter; - struct mlx5_flow_spec *spec; - struct mlx5_flow_group *g; - u32 *flow_group_in; - int err = 0; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!flow_group_in || !spec) { - err = -ENOMEM; - goto err_out; - } - - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); - g = mlx5_create_flow_group(ft, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - mlx5_core_err(mdev, - "Failed to add ipsec rx status drop flow group, err=%d\n", err); - goto err_out; - } - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - mlx5_core_err(mdev, - "Failed to add ipsec rx status drop rule counter, err=%d\n", err); - goto err_cnt; - } - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); - spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - mlx5_core_err(mdev, - "Failed to add ipsec rx status drop rule, err=%d\n", err); - goto err_rule; - } - - rx->status_drop.group = g; - rx->status_drop.rule = rule; - rx->status_drop_cnt = flow_counter; - - kvfree(flow_group_in); - kvfree(spec); - return 0; - -err_rule: - mlx5_fc_destroy(mdev, flow_counter); -err_cnt: - mlx5_destroy_flow_group(g); -err_out: - kvfree(flow_group_in); - kvfree(spec); - return err; -} - -static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) -{ - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - misc_parameters_2.ipsec_syndrome); - MLX5_SET(fte_match_param, spec->match_value, - misc_parameters_2.ipsec_syndrome, 0); - spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - flow_act.flags = FLOW_ACT_NO_APPEND; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - mlx5_core_warn(ipsec->mdev, - "Failed to add ipsec rx status pass rule, err=%d\n", err); - goto err_rule; - } - - rx->status.rule = rule; - kvfree(spec); - return 0; - -err_rule: - kvfree(spec); - return err; -} - -void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) -{ - esw_ipsec_rx_status_pass_destroy(ipsec, rx); - esw_ipsec_rx_status_drop_destroy(ipsec, rx); -} - -int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) -{ - int err; - - err = esw_ipsec_rx_status_drop_create(ipsec, rx); - if (err) - return err; - - err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest); - if (err) - goto err_pass_create; - - return 0; - -err_pass_create: - esw_ipsec_rx_status_drop_destroy(ipsec, rx); - return err; -} - void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx_create_attr *attr) { @@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, u32 mapped_id; int err; - err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id, + err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id, xa_mk_value(sa_entry->ipsec_obj_id), XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0); if (err) @@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, return 0; err_header_alloc: - xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id); + xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id); return err; } @@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec *ipsec = sa_entry->ipsec; if (sa_entry->rx_mapped_id) - xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, + xa_erase_bh(&ipsec->ipsec_obj_id_map, sa_entry->rx_mapped_id); } @@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, struct mlx5e_ipsec *ipsec = priv->ipsec; void *val; - val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id); + val = xa_load(&ipsec->ipsec_obj_id_map, id); if (!val) return -ENOENT; @@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) xa_for_each(&esw->offloads.vport_reps, i, rep) { rpriv = rep->rep_data[REP_ETH].priv; - if (!rpriv || !rpriv->netdev) + if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems)) continue; rhashtable_walk_enter(&rpriv->tc_ht, &iter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h index 0c90f7a8b0d3..ac9c65b89166 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h @@ -8,11 +8,6 @@ struct mlx5e_ipsec; struct mlx5e_ipsec_sa_entry; #ifdef CONFIG_MLX5_ESWITCH -void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx); -int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest); void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx_create_attr *attr); int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, @@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx_create_attr *attr); void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev); #else -static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) {} - -static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) -{ - return -EINVAL; -} - static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx_create_attr *attr) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8d0b915a3121..3047d7015c52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) { int err; - lockdep_assert_held(&esw->mode_lock); + devl_assert_locked(priv_to_devlink(esw->dev)); if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); @@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (toggle_lag) mlx5_lag_disable_change(esw->dev); - down_write(&esw->mode_lock); if (!mlx5_esw_is_fdb_created(esw)) { ret = mlx5_eswitch_enable_locked(esw, num_vfs); } else { @@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) } } - up_write(&esw->mode_lock); - if (toggle_lag) mlx5_lag_enable_change(esw->dev); @@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) return; devl_assert_locked(priv_to_devlink(esw->dev)); - down_write(&esw->mode_lock); /* If driver is unloaded, this function is called twice by remove_one() * and mlx5_unload(). Prevent the second call. */ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf) - goto unlock; + return; esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", @@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) esw->esw_funcs.num_vfs = 0; else esw->esw_funcs.num_ec_vfs = 0; - -unlock: - up_write(&esw->mode_lock); } /* Free resources for corresponding eswitch mode. It is called by devlink @@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) devl_assert_locked(priv_to_devlink(esw->dev)); mlx5_lag_disable_change(esw->dev); - down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw); esw->mode = MLX5_ESWITCH_LEGACY; - up_write(&esw->mode_lock); mlx5_lag_enable_change(esw->dev); } @@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev) if (!mlx5_esw_allowed(esw)) return true; - if (down_read_trylock(&esw->mode_lock) != 0) + if (down_read_trylock(&esw->mode_lock) != 0) { + if (esw->eswitch_operation_in_progress) { + up_read(&esw->mode_lock); + return false; + } return true; + } return false; } @@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) if (down_write_trylock(&esw->mode_lock) == 0) return -EINVAL; - if (atomic64_read(&esw->user_count) > 0) { + if (esw->eswitch_operation_in_progress || + atomic64_read(&esw->user_count) > 0) { up_write(&esw->mode_lock); return -EBUSY; } @@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) return esw->mode; } +int mlx5_esw_lock(struct mlx5_eswitch *esw) +{ + down_write(&esw->mode_lock); + + if (esw->eswitch_operation_in_progress) { + up_write(&esw->mode_lock); + return -EBUSY; + } + + return 0; +} + /** * mlx5_esw_unlock() - Release write lock on esw mode lock * @esw: eswitch device. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 37ab66e7b403..b4eb17141edf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -383,6 +383,7 @@ struct mlx5_eswitch { struct xarray paired; struct mlx5_devcom_comp_dev *devcom; u16 enabled_ipsec_vf_count; + bool eswitch_operation_in_progress; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -525,7 +526,8 @@ struct mlx5_esw_flow_attr { u8 total_vlan; struct { u32 flags; - struct mlx5_eswitch_rep *rep; + bool vport_valid; + u16 vport; struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_core_dev *mdev; struct mlx5_termtbl_handle *termtbl; @@ -827,6 +829,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev); void mlx5_esw_get(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); +int mlx5_esw_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); void esw_vport_change_handle_locked(struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b296ac52a439..b0455134c98e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_ for (i = from; i < to; i++) if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) mlx5_chains_put_table(chains, 0, 1, 0); - else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, esw_attr->dests[i].mdev)) - mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, - false); + mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false); } static bool @@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) * this criteria. */ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { - if (esw_attr->dests[i].rep && - mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + if (esw_attr->dests[i].vport_valid && + mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, esw_attr->dests[i].mdev)) { result = true; } else { @@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, - esw_attr->dests[j].rep->vport, false); + esw_attr->dests[j].vport, false); if (IS_ERR(dest[*i].ft)) { err = PTR_ERR(dest[*i].ft); goto err_indir_tbl_get; @@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw, int attr_idx) { if (esw->offloads.ft_ipsec_tx_pol && - esw_attr->dests[attr_idx].rep && - esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK && + esw_attr->dests[attr_idx].vport_valid && + esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK && /* To be aligned with software, encryption is needed only for tunnel device */ (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) && - esw_attr->dests[attr_idx].rep != esw_attr->in_rep && + esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport && esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev)) return true; @@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac int attr_idx, int dest_idx, bool pkt_reformat) { dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; + dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport; if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { dest[dest_idx].vport.vhca_id = MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); @@ -984,7 +983,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) { + if (rep->vport == MLX5_VPORT_UPLINK && + on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) { dest.ft = on_esw->offloads.ft_ipsec_tx_pol; flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -1176,9 +1176,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_flow_handle *flow; struct mlx5_flow_spec *spec; struct mlx5_vport *vport; + int err, pfindex; unsigned long i; void *misc; - int err; if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) return 0; @@ -1254,7 +1254,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, flows[vport->index] = flow; } } - esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows; + + pfindex = mlx5_get_dev_index(peer_dev); + if (pfindex >= MLX5_MAX_PORTS) { + esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n", + pfindex, MLX5_MAX_PORTS); + err = -EINVAL; + goto add_ec_vf_flow_err; + } + esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows; kvfree(spec); return 0; @@ -3652,14 +3660,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) { + struct mlx5_core_dev *dev = devlink_priv(devlink); struct net *devl_net, *netdev_net; - struct mlx5_eswitch *esw; - - esw = mlx5_devlink_eswitch_nocheck_get(devlink); - netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev); - devl_net = devlink_net(devlink); + bool ret = false; - return net_eq(devl_net, netdev_net); + mutex_lock(&dev->mlx5e_res.uplink_netdev_lock); + if (dev->mlx5e_res.uplink_netdev) { + netdev_net = dev_net(dev->mlx5e_res.uplink_netdev); + devl_net = devlink_net(devlink); + ret = net_eq(devl_net, netdev_net); + } + mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock); + return ret; } int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) @@ -3732,13 +3744,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, goto unlock; } + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + mlx5_eswitch_disable_locked(esw); if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode while devlink traps are active"); err = -EOPNOTSUPP; - goto unlock; + goto skip; } err = esw_offloads_start(esw, extack); } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { @@ -3748,6 +3763,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = -EINVAL; } +skip: + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; unlock: mlx5_esw_unlock(esw); enable_lag: @@ -3758,16 +3776,12 @@ enable_lag: int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct mlx5_eswitch *esw; - int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); - err = esw_mode_to_devlink(esw->mode, mode); - up_read(&esw->mode_lock); - return err; + return esw_mode_to_devlink(esw->mode, mode); } static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, @@ -3861,11 +3875,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (err) goto out; + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); - if (err) - goto out; + if (!err) + esw->offloads.inline_mode = mlx5_mode; - esw->offloads.inline_mode = mlx5_mode; + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; up_write(&esw->mode_lock); return 0; @@ -3877,16 +3895,12 @@ out: int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) { struct mlx5_eswitch *esw; - int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); - err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); - up_read(&esw->mode_lock); - return err; + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) @@ -3968,6 +3982,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, goto unlock; } + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + esw_destroy_offloads_fdb_tables(esw); esw->offloads.encap = encap; @@ -3981,6 +3998,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, (void)esw_create_offloads_fdb_tables(esw); } + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; + unlock: up_write(&esw->mode_lock); return err; @@ -3995,9 +4015,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); *encap = esw->offloads.encap; - up_read(&esw->mode_lock); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index edd910258314..40bdc677f051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, /* hairpin */ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) - if (!esw_attr->dest_int_port && esw_attr->dests[i].rep && - esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK) + if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid && + esw_attr->dests[i].vport == MLX5_VPORT_UPLINK) return true; return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index b568988e92e3..c4e19d627da2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work) mlx5_core_err(dev, "Failed to reload FW tracer\n"); } +#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) +static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev) +{ + struct pci_dev *bridge = dev->pdev->bus->self; + u16 reg16; + int err; + + if (!bridge) + return -EOPNOTSUPP; + + err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, ®16); + if (err) + return err; + + if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) { + mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n"); + return -EOPNOTSUPP; + } + + return 0; +} +#endif + static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) { struct pci_bus *bridge_bus = dev->pdev->bus; @@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) return false; } +#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) + err = mlx5_check_hotplug_interrupt(dev); + if (err) + return false; +#endif + err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); if (err) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index 047d5fed5f89..612e666ec263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i if (pool->irqs_per_cpu) cpu_put(pool, cpu); } - -/** - * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device. - * @dev: mlx5 device that is requesting the IRQ. - * @used_cpus: cpumask of bounded cpus by the device - * @vecidx: vector index to request an IRQ for. - * - * Each IRQ is bounded to at most 1 CPU. - * This function is requesting an IRQ according to the default assignment. - * The default assignment policy is: - * - request the least loaded IRQ which is not bound to any - * CPU of the previous IRQs requested. - * - * On success, this function updates used_cpus mask and returns an irq pointer. - * In case of an error, an appropriate error pointer is returned. - */ -struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, - struct cpumask *used_cpus, u16 vecidx) -{ - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); - struct irq_affinity_desc af_desc = {}; - struct mlx5_irq *irq; - - if (!mlx5_irq_pool_is_sf_pool(pool)) - return ERR_PTR(-ENOENT); - - af_desc.is_managed = 1; - cpumask_copy(&af_desc.mask, cpu_online_mask); - cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus); - irq = mlx5_irq_affinity_request(pool, &af_desc); - - if (IS_ERR(irq)) - return irq; - - cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq)); - mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", - pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), - cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), - mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); - - return irq; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index aa29f09e8356..0c83ef174275 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta) { - return mlx5_ptp_adjtime(ptp, delta); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev; + + mdev = container_of(clock, struct mlx5_core_dev, clock); + + return mlx5_ptp_adjtime_real_time(mdev, delta); } static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 653648216730..4dcf995cb1a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -28,7 +28,7 @@ struct mlx5_irq { struct atomic_notifier_head nh; cpumask_var_t mask; - char name[MLX5_MAX_IRQ_NAME]; + char name[MLX5_MAX_IRQ_FORMATTED_NAME]; struct mlx5_irq_pool *pool; int refcount; struct msi_map map; @@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); - snprintf(irq->name, MLX5_MAX_IRQ_NAME, - "%s@pci:%s", name, pci_name(dev->pdev)); + snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME, + MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev)); err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name, &irq->nh); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h index d3a77a0ab848..c4d377f8df30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -7,6 +7,9 @@ #include <linux/mlx5/driver.h> #define MLX5_MAX_IRQ_NAME (32) +#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s") +#define MLX5_MAX_IRQ_FORMATTED_NAME \ + (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR)) /* max irq_index is 2047, so four chars */ #define MLX5_MAX_IRQ_IDX_CHARS (4) #define MLX5_EQ_REFS_PER_IRQ (2) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6ea88a581804..e3ec559369fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev) { - return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) || + return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX || + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) || MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 4e8527a724f5..6fa06ba2d346 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -52,7 +52,6 @@ struct dr_qp_init_attr { u32 cqn; u32 pdn; u32 max_send_wr; - u32 max_send_sge; struct mlx5_uars_page *uar; u8 isolate_vl_tc:1; }; @@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne) return err == CQ_POLL_ERR ? err : npolled; } -static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr) -{ - return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_flow_update_ctrl_seg) + - sizeof(struct mlx5_wqe_header_modify_argument_update_seg)); -} - -/* We calculate for specific RC QP with the required functionality */ -static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr) -{ - int update_arg_size; - int inl_size = 0; - int tot_size; - int size; - - update_arg_size = dr_qp_get_args_update_send_wqe_size(attr); - - size = sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg); - inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) + - DR_STE_SIZE, 16); - - size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg); - - size = max(size, update_arg_size); - - tot_size = max(size, inl_size); - - return ALIGN(tot_size, MLX5_SEND_WQE_BB); -} - static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, struct dr_qp_init_attr *attr) { @@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {}; struct mlx5_wq_param wqp; struct mlx5dr_qp *dr_qp; - int wqe_size; int inlen; void *qpc; void *in; @@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, if (err) goto err_in; dr_qp->uar = attr->uar; - wqe_size = dr_qp_calc_rc_send_wqe(attr); - dr_qp->max_inline_data = min(wqe_size - - (sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg) + - sizeof(struct mlx5_wqe_inline_seg)), - (2 * MLX5_SEND_WQE_BB - - (sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg) + - sizeof(struct mlx5_wqe_inline_seg)))); return dr_qp; @@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, MLX5_SEND_WQE_DS; } -static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp, - struct dr_data_seg *data_seg, void *wqe) -{ - int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg) + - sizeof(struct mlx5_wqe_inline_seg); - struct mlx5_wqe_inline_seg *seg; - int left_space; - int inl = 0; - void *addr; - int len; - int idx; - - seg = wqe; - wqe += sizeof(*seg); - addr = (void *)(unsigned long)(data_seg->addr); - len = data_seg->length; - inl += len; - left_space = MLX5_SEND_WQE_BB - inline_header_size; - - if (likely(len > left_space)) { - memcpy(wqe, addr, left_space); - len -= left_space; - addr += left_space; - idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1); - wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); - } - - memcpy(wqe, addr, len); - - if (likely(inl)) { - seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); - return DIV_ROUND_UP(inl + sizeof(seg->byte_count), - MLX5_SEND_WQE_DS); - } else { - return 0; - } -} - static void -dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp, - struct mlx5_wqe_ctrl_seg *wq_ctrl, +dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl, u64 remote_addr, u32 rkey, struct dr_data_seg *data_seg, @@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp, wq_raddr->reserved = 0; wq_dseg = (void *)(wq_raddr + 1); - /* WQE ctrl segment + WQE remote addr segment */ - *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS; - if (data_seg->send_flags & IB_SEND_INLINE) { - *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg); - } else { - wq_dseg->byte_count = cpu_to_be32(data_seg->length); - wq_dseg->lkey = cpu_to_be32(data_seg->lkey); - wq_dseg->addr = cpu_to_be64(data_seg->addr); - *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */ - } + wq_dseg->byte_count = cpu_to_be32(data_seg->length); + wq_dseg->lkey = cpu_to_be32(data_seg->lkey); + wq_dseg->addr = cpu_to_be64(data_seg->addr); + + *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */ + sizeof(*wq_dseg) + /* WQE data segment */ + sizeof(*wq_raddr)) / /* WQE remote addr segment */ + MLX5_SEND_WQE_DS; } static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl, @@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, switch (opcode) { case MLX5_OPCODE_RDMA_READ: case MLX5_OPCODE_RDMA_WRITE: - dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr, + dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr, rkey, data_seg, &size); break; case MLX5_OPCODE_FLOW_TBL_ACCESS: @@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring, if (send_ring->pending_wqe % send_ring->signal_th == 0) send_info->write.send_flags |= IB_SEND_SIGNALED; else - send_info->write.send_flags &= ~IB_SEND_SIGNALED; + send_info->write.send_flags = 0; } static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, @@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, } send_ring->pending_wqe++; - if (!send_info->write.lkey) - send_info->write.send_flags |= IB_SEND_INLINE; if (send_ring->pending_wqe % send_ring->signal_th == 0) send_info->write.send_flags |= IB_SEND_SIGNALED; - else - send_info->write.send_flags &= ~IB_SEND_SIGNALED; send_ring->pending_wqe++; send_info->read.length = send_info->write.length; @@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn, send_info->read.lkey = send_ring->sync_mr->mkey; if (send_ring->pending_wqe % send_ring->signal_th == 0) - send_info->read.send_flags |= IB_SEND_SIGNALED; + send_info->read.send_flags = IB_SEND_SIGNALED; else - send_info->read.send_flags &= ~IB_SEND_SIGNALED; + send_info->read.send_flags = 0; } static void dr_fill_data_segs(struct mlx5dr_domain *dmn, @@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) dmn->send_ring->cq->qp = dmn->send_ring->qp; dmn->info.max_send_wr = QUEUE_SIZE; - init_attr.max_send_sge = 1; dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data, DR_STE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 5a31fb47ffa5..21753f327868 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, req_list_size = max_list_size; } - out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + + out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); out = kvzalloc(out_sz, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c index 0d5a41a2ae01..227d01cace3f 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) priv->stats.rx_truncate_errors++; } + /* Read receive consumer index before replenish so that this routine + * returns accurate return value even if packet is received into + * just-replenished buffer prior to exiting this routine. + */ + rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); + rx_ci_rem = rx_ci % priv->rx_q_entries; + /* Let hardware know we've replenished one buffer */ rx_pi++; @@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) rx_pi_rem = rx_pi % priv->rx_q_entries; if (rx_pi_rem == 0) priv->valid_polarity ^= 1; - rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); - rx_ci_rem = rx_ci % priv->rx_q_entries; if (skb) netif_receive_skb(skb); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index fecd43754cea..e5ec0a363aff 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -350,6 +350,8 @@ union ks8851_tx_hdr { * @rxd: Space for receiving SPI data, in DMA-able space. * @txd: Space for transmitting SPI data, in DMA-able space. * @msg_enable: The message flags controlling driver output (see ethtool). + * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR). + * @queued_len: Space required in hardware TX buffer for queued packets in txq. * @fid: Incrementing frame id tag. * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. @@ -399,6 +401,7 @@ struct ks8851_net { struct work_struct rxctrl_work; struct sk_buff_head txq; + unsigned int queued_len; struct eeprom_93cx6 eeprom; struct regulator *vdd_reg; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index cfbc900d4aeb..0bf13b38b8f5 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) handled |= IRQ_RXPSI; if (status & IRQ_TXI) { - handled |= IRQ_TXI; + unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR); - /* no lock here, tx queue should have been stopped */ + netif_dbg(ks, intr, ks->netdev, + "%s: txspace %d\n", __func__, tx_space); - /* update our idea of how much tx space is available to the - * system */ - ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + spin_lock(&ks->statelock); + ks->tx_space = tx_space; + if (netif_queue_stopped(ks->netdev)) + netif_wake_queue(ks->netdev); + spin_unlock(&ks->statelock); - netif_dbg(ks, intr, ks->netdev, - "%s: txspace %d\n", __func__, ks->tx_space); + handled |= IRQ_TXI; } if (status & IRQ_RXI) @@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (status & IRQ_LCI) mii_check_link(&ks->mii); - if (status & IRQ_TXI) - netif_wake_queue(ks->netdev); - return IRQ_HANDLED; } @@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_ISR, ks->rc_ier); ks8851_wrreg16(ks, KS_IER, ks->rc_ier); + ks->queued_len = 0; netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 70bc7253454f..88e26c120b48 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -287,6 +287,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp, } /** + * calc_txlen - calculate size of message to send packet + * @len: Length of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static unsigned int calc_txlen(unsigned int len) +{ + return ALIGN(len + 4, 4); +} + +/** * ks8851_rx_skb_spi - receive skbuff * @ks: The device state * @skb: The skbuff @@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb) */ static void ks8851_tx_work(struct work_struct *work) { + unsigned int dequeued_len = 0; struct ks8851_net_spi *kss; + unsigned short tx_space; struct ks8851_net *ks; unsigned long flags; struct sk_buff *txb; @@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work) last = skb_queue_empty(&ks->txq); if (txb) { + dequeued_len += calc_txlen(txb->len); + ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); ks8851_wrfifo_spi(ks, txb, last); @@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work) } } + tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); + + spin_lock(&ks->statelock); + ks->queued_len -= dequeued_len; + ks->tx_space = tx_space; + spin_unlock(&ks->statelock); + ks8851_unlock_spi(ks, &flags); } @@ -347,18 +370,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks) } /** - * calc_txlen - calculate size of message to send packet - * @len: Length of data - * - * Returns the size of the TXFIFO message needed to send - * this packet. - */ -static unsigned int calc_txlen(unsigned int len) -{ - return ALIGN(len + 4, 4); -} - -/** * ks8851_start_xmit_spi - transmit packet using SPI * @skb: The buffer to transmit * @dev: The device used to transmit the packet. @@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb, spin_lock(&ks->statelock); - if (needed > ks->tx_space) { + if (ks->queued_len + needed > ks->tx_space) { netif_stop_queue(dev); ret = NETDEV_TX_BUSY; } else { - ks->tx_space -= needed; + ks->queued_len += needed; skb_queue_tail(&ks->txq, skb); } spin_unlock(&ks->statelock); - schedule_work(&kss->tx_work); + if (ret == NETDEV_TX_OK) + schedule_work(&kss->tx_work); return ret; } diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 090e6b983243..01eb7445ead9 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -20,6 +20,7 @@ config MICROSOFT_MANA depends on PCI_MSI && X86_64 depends on PCI_HYPERV select AUXILIARY_BUS + select PAGE_POOL help This driver supports Microsoft Azure Network Adapter (MANA). So far, the driver is only supported on X86_64. diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index 5c55197c7327..c018783757fb 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -582,10 +582,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511]; - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023]; - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; } static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, @@ -610,10 +610,10 @@ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255]; - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255]; - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511]; - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023]; - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX]; } void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 060a77f2265d..e522845c7c21 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -160,6 +160,18 @@ struct nfp_tun_mac_addr_offload { u8 addr[ETH_ALEN]; }; +/** + * struct nfp_neigh_update_work - update neighbour information to nfp + * @work: Work queue for writing neigh to the nfp + * @n: neighbour entry + * @app: Back pointer to app + */ +struct nfp_neigh_update_work { + struct work_struct work; + struct neighbour *n; + struct nfp_app *app; +}; + enum nfp_flower_mac_offload_cmd { NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, @@ -607,38 +619,30 @@ err: nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); } -static int -nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, - void *ptr) +static void +nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work) { - struct nfp_flower_priv *app_priv; - struct netevent_redirect *redir; - struct neighbour *n; + neigh_release(update_work->n); + kfree(update_work); +} + +static void nfp_tun_neigh_update(struct work_struct *work) +{ + struct nfp_neigh_update_work *update_work; struct nfp_app *app; + struct neighbour *n; bool neigh_invalid; int err; - switch (event) { - case NETEVENT_REDIRECT: - redir = (struct netevent_redirect *)ptr; - n = redir->neigh; - break; - case NETEVENT_NEIGH_UPDATE: - n = (struct neighbour *)ptr; - break; - default: - return NOTIFY_DONE; - } - - neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; - - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); - app = app_priv->app; + update_work = container_of(work, struct nfp_neigh_update_work, work); + app = update_work->app; + n = update_work->n; if (!nfp_flower_get_port_id_from_netdev(app, n->dev)) - return NOTIFY_DONE; + goto out; #if IS_ENABLED(CONFIG_INET) + neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; if (n->tbl->family == AF_INET6) { #if IS_ENABLED(CONFIG_IPV6) struct flowi6 flow6 = {}; @@ -655,13 +659,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, &flow6, NULL); if (IS_ERR(dst)) - return NOTIFY_DONE; + goto out; dst_release(dst); } nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false); -#else - return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ } else { struct flowi4 flow4 = {}; @@ -678,17 +680,71 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, rt = ip_route_output_key(dev_net(n->dev), &flow4); err = PTR_ERR_OR_ZERO(rt); if (err) - return NOTIFY_DONE; + goto out; ip_rt_put(rt); } nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false); } -#else - return NOTIFY_DONE; #endif /* CONFIG_INET */ +out: + nfp_tun_release_neigh_update_work(update_work); +} - return NOTIFY_OK; +static struct nfp_neigh_update_work * +nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n) +{ + struct nfp_neigh_update_work *update_work; + + update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC); + if (!update_work) + return NULL; + + INIT_WORK(&update_work->work, nfp_tun_neigh_update); + neigh_hold(n); + update_work->n = n; + update_work->app = app; + + return update_work; +} + +static int +nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct nfp_neigh_update_work *update_work; + struct nfp_flower_priv *app_priv; + struct netevent_redirect *redir; + struct neighbour *n; + struct nfp_app *app; + + switch (event) { + case NETEVENT_REDIRECT: + redir = (struct netevent_redirect *)ptr; + n = redir->neigh; + break; + case NETEVENT_NEIGH_UPDATE: + n = (struct neighbour *)ptr; + break; + default: + return NOTIFY_DONE; + } +#if IS_ENABLED(CONFIG_IPV6) + if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) +#else + if (n->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); + app = app_priv->app; + update_work = nfp_tun_alloc_neigh_update_work(app, n); + if (!update_work) + return NOTIFY_DONE; + + queue_work(system_highpri_wq, &update_work->work); + + return NOTIFY_DONE; } void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) @@ -706,6 +762,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); if (!netdev) goto fail_rcu_unlock; + dev_hold(netdev); flow.daddr = payload->ipv4_addr; flow.flowi4_proto = IPPROTO_UDP; @@ -725,13 +782,16 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto fail_rcu_unlock; + rcu_read_unlock(); + nfp_tun_write_neigh(n->dev, app, &flow, n, false, true); neigh_release(n); - rcu_read_unlock(); + dev_put(netdev); return; fail_rcu_unlock: rcu_read_unlock(); + dev_put(netdev); nfp_flower_cmsg_warn(app, "Requested route not found.\n"); } @@ -749,6 +809,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); if (!netdev) goto fail_rcu_unlock; + dev_hold(netdev); flow.daddr = payload->ipv6_addr; flow.flowi6_proto = IPPROTO_UDP; @@ -766,14 +827,16 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) dst_release(dst); if (!n) goto fail_rcu_unlock; + rcu_read_unlock(); nfp_tun_write_neigh(n->dev, app, &flow, n, true, true); neigh_release(n); - rcu_read_unlock(); + dev_put(netdev); return; fail_rcu_unlock: rcu_read_unlock(); + dev_put(netdev); nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 1dbc3cb50b1d..9b5463040075 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -223,7 +223,7 @@ struct ionic_desc_info { void *cb_arg; }; -#define IONIC_QUEUE_NAME_MAX_SZ 32 +#define IONIC_QUEUE_NAME_MAX_SZ 16 struct ionic_queue { struct device *dev; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index edc14730ce88..bad919343180 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -49,24 +49,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif); static void ionic_dim_work(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); + struct ionic_intr_info *intr; struct dim_cq_moder cur_moder; struct ionic_qcq *qcq; + struct ionic_lif *lif; u32 new_coal; cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); qcq = container_of(dim, struct ionic_qcq, dim); - new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); + lif = qcq->q.lif; + new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); new_coal = new_coal ? new_coal : 1; - if (qcq->intr.dim_coal_hw != new_coal) { - unsigned int qi = qcq->cq.bound_q->index; - struct ionic_lif *lif = qcq->q.lif; - - qcq->intr.dim_coal_hw = new_coal; + intr = &qcq->intr; + if (intr->dim_coal_hw != new_coal) { + intr->dim_coal_hw = new_coal; ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, - lif->rxqcqs[qi]->intr.index, - qcq->intr.dim_coal_hw); + intr->index, intr->dim_coal_hw); } dim->state = DIM_START_MEASURE; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 65e20693c549..33f4f58ee51c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0d57ffcedf0c..fc78bc959ded 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "lBufQ failed\n"); + kfree(qdev->lrg_buf); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; @@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); + kfree(qdev->lrg_buf); return -ENOMEM; } diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 6f2fa2a42770..1822f2ad8f0d 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -30,6 +30,8 @@ #define QCASPI_MAX_REGS 0x20 +#define QCASPI_RX_MAX_FRAMES 4 + static const u16 qcaspi_spi_regs[] = { SPI_REG_BFR_SIZE, SPI_REG_WRBUF_SPC_AVA, @@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, { struct qcaspi *qca = netdev_priv(dev); - ring->rx_max_pending = 4; + ring->rx_max_pending = QCASPI_RX_MAX_FRAMES; ring->tx_max_pending = TX_RING_MAX_LEN; - ring->rx_pending = 4; + ring->rx_pending = QCASPI_RX_MAX_FRAMES; ring->tx_pending = qca->txr.count; } @@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { - const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); - if ((ring->rx_pending) || + if (ring->rx_pending != QCASPI_RX_MAX_FRAMES || (ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (netif_running(dev)) - ops->ndo_stop(dev); + if (qca->spi_thread) + kthread_park(qca->spi_thread); qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); - if (netif_running(dev)) - ops->ndo_open(dev); + if (qca->spi_thread) + kthread_unpark(qca->spi_thread); return 0; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index bec723028e96..5f3c11fb3fa2 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -580,6 +580,18 @@ qcaspi_spi_thread(void *data) netdev_info(qca->net_dev, "SPI thread created\n"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_park()) { + netif_tx_disable(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + kthread_parkme(); + if (qca->sync == QCASPI_SYNC_READY) { + netif_carrier_on(qca->net_dev); + netif_wake_queue(qca->net_dev); + } + continue; + } + if ((qca->intr_req == qca->intr_svc) && !qca->txr.skb[qca->txr.head]) schedule(); @@ -608,11 +620,17 @@ qcaspi_spi_thread(void *data) if (intr_cause & SPI_INT_CPU_ON) { qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + /* Frame decoding in progress */ + if (qca->frm_handle.state != qca->frm_handle.init) + qca->net_dev->stats.rx_dropped++; + + qcafrm_fsm_init_spi(&qca->frm_handle); + qca->stats.device_reset++; + /* not synced. */ if (qca->sync != QCASPI_SYNC_READY) continue; - qca->stats.device_reset++; netif_wake_queue(qca->net_dev); netif_carrier_on(qca->net_dev); } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0c76c162b8a9..81fd31f6fac4 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -196,6 +196,7 @@ enum rtl_registers { /* No threshold before first PCI xfer */ #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) #define RX_EARLY_OFF (1 << 11) +#define RX_PAUSE_SLOT_ON (1 << 11) /* 8125b and later */ #define RXCFG_DMA_SHIFT 8 /* Unlimited maximum PCI burst. */ #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) @@ -579,6 +580,7 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -624,6 +626,7 @@ struct rtl8169_private { unsigned supports_gmii:1; unsigned aspm_manageable:1; + unsigned dash_enabled:1; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@ -1208,7 +1211,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } static void rtl8168_driver_start(struct rtl8169_private *tp) @@ -1253,14 +1256,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp) return r8168ep_ocp_read(tp, 0x128) & BIT(0); } -static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +static bool rtl_dash_is_enabled(struct rtl8169_private *tp) +{ + switch (tp->dash_type) { + case RTL_DASH_DP: + return r8168dp_check_dash(tp); + case RTL_DASH_EP: + return r8168ep_check_dash(tp); + default: + return false; + } +} + +static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: - return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + return RTL_DASH_DP; case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: - return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + return RTL_DASH_EP; default: return RTL_DASH_NONE; } @@ -1453,7 +1468,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) device_set_wakeup_enable(tp_to_dev(tp), wolopts); - if (tp->dash_type == RTL_DASH_NONE) { + if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, !wolopts); tp->dev->wol_enabled = wolopts ? 1 : 0; } @@ -2292,9 +2307,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; + case RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | + RX_PAUSE_SLOT_ON); + break; default: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); break; @@ -2512,7 +2531,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp) static void rtl_prepare_power_down(struct rtl8169_private *tp) { - if (tp->dash_type != RTL_DASH_NONE) + if (tp->dash_enabled) return; if (tp->mac_version == RTL_GIGA_MAC_VER_32 || @@ -2586,9 +2605,7 @@ static void rtl_set_rx_mode(struct net_device *dev) rx_mode &= ~AcceptMulticast; } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || dev->flags & IFF_ALLMULTI || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_48) { + tp->mac_version == RTL_GIGA_MAC_VER_35) { /* accept all multicasts */ } else if (netdev_mc_empty(dev)) { rx_mode &= ~AcceptMulticast; @@ -4571,6 +4588,8 @@ static void rtl_task(struct work_struct *work) reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); + } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { + rtl_reset_work(tp); } out_unlock: rtnl_unlock(); @@ -4604,7 +4623,7 @@ static void r8169_phylink_handler(struct net_device *ndev) } else { /* In few cases rx is broken after link-down otherwise */ if (rtl_is_8125(tp)) - rtl_reset_work(tp); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE); pm_runtime_idle(d); } @@ -4648,10 +4667,16 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_cleanup(tp); rtl_disable_exit_l1(tp); rtl_prepare_power_down(tp); + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); } static void rtl8169_up(struct rtl8169_private *tp) { + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_start(tp); + pci_set_master(tp->pci_dev); phy_init_hw(tp->phydev); phy_resume(tp->phydev); @@ -4674,7 +4699,7 @@ static int rtl8169_close(struct net_device *dev) rtl8169_down(tp); rtl8169_rx_clear(tp); - cancel_work_sync(&tp->wk.work); + cancel_work(&tp->wk.work); free_irq(tp->irq, tp); @@ -4869,7 +4894,7 @@ static int rtl8169_runtime_idle(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->dash_type != RTL_DASH_NONE) + if (tp->dash_enabled) return -EBUSY; if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) @@ -4895,8 +4920,7 @@ static void rtl_shutdown(struct pci_dev *pdev) /* Restore original MAC address */ rtl_rar_set(tp, tp->dev->perm_addr); - if (system_state == SYSTEM_POWER_OFF && - tp->dash_type == RTL_DASH_NONE) { + if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) { pci_wake_from_d3(pdev, tp->saved_wolopts); pci_set_power_state(pdev, PCI_D3hot); } @@ -4909,6 +4933,8 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); + cancel_work_sync(&tp->wk.work); + unregister_netdev(tp->dev); if (tp->dash_type != RTL_DASH_NONE) @@ -5254,7 +5280,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); tp->aspm_manageable = !rc; - tp->dash_type = rtl_check_dash(tp); + tp->dash_type = rtl_get_dash_type(tp); + tp->dash_enabled = rtl_dash_is_enabled(tp); tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; @@ -5325,7 +5352,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* configure chip for default features */ rtl8169_set_features(dev, dev->features); - if (tp->dash_type == RTL_DASH_NONE) { + if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, true); } else { rtl_set_d3_pll_down(tp, false); @@ -5365,7 +5392,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "ok" : "ko"); if (tp->dash_type != RTL_DASH_NONE) { - netdev_info(dev, "DASH enabled\n"); + netdev_info(dev, "DASH %s\n", + tp->dash_enabled ? "enabled" : "disabled"); rtl8168_driver_start(tp); } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c70cff80cc99..8649b3e90edb 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) return -ETIMEDOUT; } -static int ravb_config(struct net_device *ndev) +static int ravb_set_opmode(struct net_device *ndev, u32 opmode) { + u32 csr_ops = 1U << (opmode & CCC_OPC); + u32 ccc_mask = CCC_OPC; int error; - /* Set config mode */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); - /* Check if the operating mode is changed to the config mode */ - error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); - if (error) - netdev_err(ndev, "failed to switch device to config mode\n"); + /* If gPTP active in config mode is supported it needs to be configured + * along with CSEL and operating mode in the same access. This is a + * hardware limitation. + */ + if (opmode & CCC_GAC) + ccc_mask |= CCC_GAC | CCC_CSEL; + + /* Set operating mode */ + ravb_modify(ndev, CCC, ccc_mask, opmode); + /* Check if the operating mode is changed to the requested one */ + error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); + if (error) { + netdev_err(ndev, "failed to switch device to requested mode (%u)\n", + opmode & CCC_OPC); + } return error; } @@ -515,6 +526,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); + } else { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, + CXR31_SEL_LINK0); + } + /* Receive frame limit set register */ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); @@ -537,14 +557,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) /* E-MAC interrupt enable register */ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); - - if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); - ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); - } else { - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, - CXR31_SEL_LINK0); - } } static void ravb_emac_init_rcar(struct net_device *ndev) @@ -672,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev) int error; /* Set CONFIG mode */ - error = ravb_config(ndev); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); if (error) return error; @@ -681,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev) return error; /* Setting the control will start the AVB-DMAC process. */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); - - return 0; + return ravb_set_opmode(ndev, CCC_OPC_OPERATION); } static void ravb_get_tx_tstamp(struct net_device *ndev) @@ -1045,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev) return error; /* Stop AVB-DMAC process */ - return ravb_config(ndev); + return ravb_set_opmode(ndev, CCC_OPC_CONFIG); } /* E-MAC interrupt handler */ @@ -1811,19 +1821,20 @@ static int ravb_open(struct net_device *ndev) if (info->gptp) ravb_ptp_init(ndev, priv->pdev); - netif_tx_start_all_queues(ndev); - /* PHY control start */ error = ravb_phy_start(ndev); if (error) goto out_ptp_stop; + netif_tx_start_all_queues(ndev); + return 0; out_ptp_stop: /* Stop PTP Clock driver */ if (info->gptp) ravb_ptp_stop(ndev); + ravb_stop_dma(ndev); out_free_irq_mgmta: if (!info->multi_irqs) goto out_free_irq; @@ -1874,6 +1885,12 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct net_device *ndev = priv->ndev; int error; + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + schedule_work(&priv->work); + return; + } + netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ @@ -1907,7 +1924,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) */ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", __func__, error); - return; + goto out_unlock; } ravb_emac_init(ndev); @@ -1917,6 +1934,9 @@ out: ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); + +out_unlock: + rtnl_unlock(); } /* Packet transmit function for Ethernet AVB */ @@ -2549,21 +2569,25 @@ static int ravb_set_gti(struct net_device *ndev) return 0; } -static void ravb_set_config_mode(struct net_device *ndev) +static int ravb_set_config_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; if (info->gptp) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else if (info->ccc_gac) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | - CCC_GAC | CCC_CSEL_HPB); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); } else { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); } + + return error; } /* Set tx and rx clock internal delay modes */ @@ -2645,9 +2669,14 @@ static int ravb_probe(struct platform_device *pdev) ndev->features = info->net_features; ndev->hw_features = info->net_hw_features; - reset_control_deassert(rstc); + error = reset_control_deassert(rstc); + if (error) + goto out_free_netdev; + pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) + goto out_rpm_disable; if (info->multi_irqs) { if (info->err_mgmt_irqs) @@ -2778,7 +2807,9 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - ravb_set_config_mode(ndev); + error = ravb_set_config_mode(ndev); + if (error) + goto out_disable_gptp_clk; if (info->gptp || info->ccc_gac) { /* Set GTI value */ @@ -2872,11 +2903,12 @@ out_disable_gptp_clk: out_disable_refclk: clk_disable_unprepare(priv->refclk); out_release: - free_netdev(ndev); - pm_runtime_put(&pdev->dev); +out_rpm_disable: pm_runtime_disable(&pdev->dev); reset_control_assert(rstc); +out_free_netdev: + free_netdev(ndev); return error; } @@ -2886,22 +2918,25 @@ static void ravb_remove(struct platform_device *pdev) struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - /* Stop PTP Clock driver */ - if (info->ccc_gac) - ravb_ptp_stop(ndev); - - clk_disable_unprepare(priv->gptp_clk); - clk_disable_unprepare(priv->refclk); - - /* Set reset mode */ - ravb_write(ndev, CCC_OPC_RESET, CCC); unregister_netdev(ndev); if (info->nc_queues) netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); + + /* Stop PTP Clock driver */ + if (info->ccc_gac) + ravb_ptp_stop(ndev); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); + + ravb_set_opmode(ndev, CCC_OPC_RESET); + + clk_disable_unprepare(priv->gptp_clk); + clk_disable_unprepare(priv->refclk); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); @@ -2979,8 +3014,11 @@ static int __maybe_unused ravb_resume(struct device *dev) int ret = 0; /* If WoL is enabled set reset mode to rearm the WoL logic */ - if (priv->wol_enabled) - ravb_write(ndev, CCC_OPC_RESET, CCC); + if (priv->wol_enabled) { + ret = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (ret) + return ret; + } /* All register have been reset to default values. * Restore all registers which where setup at probe time and @@ -2988,7 +3026,9 @@ static int __maybe_unused ravb_resume(struct device *dev) */ /* Set AVB config mode */ - ravb_set_config_mode(ndev); + ret = ravb_set_config_mode(ndev); + if (ret) + return ret; if (info->gptp || info->ccc_gac) { /* Set GTI value */ diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 43a7795d6591..e77c6ff93d81 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1504,8 +1504,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_queue *gq = rdev->tx_queue; + netdev_tx_t ret = NETDEV_TX_OK; struct rswitch_ext_desc *desc; - int ret = NETDEV_TX_OK; dma_addr_t dma_addr; if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { @@ -1517,10 +1517,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd return ret; dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) { - dev_kfree_skb_any(skb); - return ret; - } + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto err_kfree; gq->skbs[gq->cur] = skb; desc = &gq->tx_ring[gq->cur]; @@ -1533,10 +1531,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd struct rswitch_gwca_ts_info *ts_info; ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); - if (!ts_info) { - dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); - return -ENOMEM; - } + if (!ts_info) + goto err_unmap; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; rdev->ts_tag++; @@ -1559,6 +1555,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); return ret; + +err_unmap: + dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); + +err_kfree: + dev_kfree_skb_any(skb); + + return ret; } static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index d2f35ee15eff..fac227d372db 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx) } if (!success) { - efx_for_each_channel(channel, efx) + efx_for_each_channel(channel, efx) { kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } efx->type->filter_table_remove(efx); rc = -ENOMEM; goto out_unlock; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index a2b9e289aa36..85dcda51df05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -280,7 +280,7 @@ config DWMAC_INTEL config DWMAC_LOONGSON tristate "Loongson PCI DWMAC support" default MACH_LOONGSON64 - depends on STMMAC_ETH && PCI + depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI depends on COMMON_CLK help This selects the LOONGSON PCI bus support for the stmmac driver, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 2cd6fce5c993..9e40c28d453a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id return -ENODEV; } - if (!of_device_is_compatible(np, "loongson, pci-gmac")) { - pr_info("dwmac_loongson_pci: Incompatible OF node\n"); - return -ENODEV; - } - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + plat->mdio_node = of_get_child_by_name(np, "mdio"); if (plat->mdio_node) { dev_info(&pdev->dev, "Found MDIO subnode\n"); - - plat->mdio_bus_data = devm_kzalloc(&pdev->dev, - sizeof(*plat->mdio_bus_data), - GFP_KERNEL); - if (!plat->mdio_bus_data) { - ret = -ENOMEM; - goto err_put_node; - } plat->mdio_bus_data->needs_reset = true; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index d3bf42d0fceb..31631e3f89d0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -34,6 +34,7 @@ #define RGMII_CONFIG_LOOPBACK_EN BIT(2) #define RGMII_CONFIG_PROG_SWAP BIT(1) #define RGMII_CONFIG_DDR_MODE BIT(0) +#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10) /* SDCC_HC_REG_DLL_CONFIG fields */ #define SDCC_DLL_CONFIG_DLL_RST BIT(30) @@ -78,6 +79,8 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) +#define SGMII_10M_RX_CLK_DVDR 0x31 + struct ethqos_emac_por { unsigned int offset; unsigned int value; @@ -598,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) return 0; } +/* On interface toggle MAC registers gets reset. + * Configure MAC block for SGMII on ethernet phy link up + */ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) { int val; @@ -617,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) case SPEED_10: val |= ETHQOS_MAC_CTRL_PORT_SEL; val &= ~ETHQOS_MAC_CTRL_SPEED_MODE; + rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR, + FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, + SGMII_10M_RX_CLK_DVDR), + RGMII_IO_MACRO_CONFIG); break; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index e95d35f1e5a0..8fd167501fa0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, } } -void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable) { u32 value; - if (!enable) { - value = readl(ioaddr + MAC_FPE_CTRL_STS); - - value &= ~EFPE; - - writel(value, ioaddr + MAC_FPE_CTRL_STS); - return; + if (enable) { + cfg->fpe_csr = EFPE; + value = readl(ioaddr + GMAC_RXQ_CTRL1); + value &= ~GMAC_RXQCTRL_FPRQ; + value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; + writel(value, ioaddr + GMAC_RXQ_CTRL1); + } else { + cfg->fpe_csr = 0; } - - value = readl(ioaddr + GMAC_RXQ_CTRL1); - value &= ~GMAC_RXQCTRL_FPRQ; - value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; - writel(value, ioaddr + GMAC_RXQ_CTRL1); - - value = readl(ioaddr + MAC_FPE_CTRL_STS); - value |= EFPE; - writel(value, ioaddr + MAC_FPE_CTRL_STS); + writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); } int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) @@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) status = FPE_EVENT_UNKNOWN; + /* Reads from the MAC_FPE_CTRL_STS register should only be performed + * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read" + */ value = readl(ioaddr + MAC_FPE_CTRL_STS); if (value & TRSP) { @@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) return status; } -void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type) +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type) { - u32 value; + u32 value = cfg->fpe_csr; - value = readl(ioaddr + MAC_FPE_CTRL_STS); - - if (type == MPACKET_VERIFY) { - value &= ~SRSP; + if (type == MPACKET_VERIFY) value |= SVER; - } else { - value &= ~SVER; + else if (type == MPACKET_RESPONSE) value |= SRSP; - } writel(value, ioaddr + MAC_FPE_CTRL_STS); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 53c138d0ff48..34e620790eb3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, struct stmmac_extra_stats *x, u32 txqcnt); -void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable); void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 453e88b75be0..a74e71db79f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -1484,7 +1484,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, return 0; } -static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq, +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable) { u32 value; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index b95d3e137813..68aa2d5ca6e5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -412,9 +412,11 @@ struct stmmac_ops { unsigned int ptp_rate); void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, struct stmmac_extra_stats *x, u32 txqcnt); - void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, + void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable); void (*fpe_send_mpacket)(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); }; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index ea4910ae0921..6a7c1d325c46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -177,8 +177,10 @@ #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc +#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204 #define MMC_XGMAC_TX_FPE_FRAG 0x208 #define MMC_XGMAC_TX_HOLD_REQ 0x20c +#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224 #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 @@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) { writel(0x0, mmcaddr + MMC_RX_INTR_MASK); writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK); writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 540f6a4ec0b8..f05bd757dfe5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -237,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv) */ ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); - if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) + if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)) return; num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3e50fd53a617..37e64283f910 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -964,7 +964,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) bool *hs_enable = &fpe_cfg->hs_enable; if (is_up && *hs_enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_VERIFY); } else { *lo_state = FPE_STATE_OFF; *lp_state = FPE_STATE_OFF; @@ -5293,6 +5294,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dma_dir = page_pool_get_dma_dir(rx_q->page_pool); buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); if (netif_msg_rx_status(priv)) { void *rx_head; @@ -5328,10 +5330,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) len = 0; } +read_again: if (count >= limit) break; -read_again: buf1_len = 0; buf2_len = 0; entry = next_entry; @@ -5838,6 +5840,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) /* If user has requested FPE enable, quickly response */ if (*hs_enable) stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, MPACKET_RESPONSE); } @@ -7262,6 +7265,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work) if (*lo_state == FPE_STATE_ENTERING_ON && *lp_state == FPE_STATE_ENTERING_ON) { stmmac_fpe_configure(priv, priv->ioaddr, + fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, *enable); @@ -7280,6 +7284,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work) netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, *lo_state, *lp_state); stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, MPACKET_VERIFY); } /* Sleep then retry */ @@ -7294,6 +7299,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) if (priv->plat->fpe_cfg->hs_enable != enable) { if (enable) { stmmac_fpe_send_mpacket(priv, priv->ioaddr, + priv->plat->fpe_cfg, MPACKET_VERIFY); } else { priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; @@ -7754,6 +7760,7 @@ int stmmac_suspend(struct device *dev) if (priv->dma_cap.fpesel) { /* Disable FPE */ stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index fa9e7e7040b9..0542cfd1817e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); - if (err != 0) { + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { dev_err_probe(dev, err, "Cannot register the MDIO bus\n"); goto bus_register_fail; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index ac41ef4cbd2f..6ad3e0a11936 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -1079,6 +1079,7 @@ disable: priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, false); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 6c4b64227ac8..411898a4f38c 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -2063,7 +2063,7 @@ static int prueth_probe(struct platform_device *pdev) &prueth->shram); if (ret) { dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); - pruss_put(prueth->pruss); + goto put_pruss; } prueth->sram_pool = of_gen_pool_get(np, "sram", 0); @@ -2105,10 +2105,7 @@ static int prueth_probe(struct platform_device *pdev) prueth->iep1 = icss_iep_get_idx(np, 1); if (IS_ERR(prueth->iep1)) { ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); - icss_iep_put(prueth->iep0); - prueth->iep0 = NULL; - prueth->iep1 = NULL; - goto free_pool; + goto put_iep0; } if (prueth->pdata.quirk_10m_link_issue) { @@ -2205,6 +2202,12 @@ netdev_exit: exit_iep: if (prueth->pdata.quirk_10m_link_issue) icss_iep_exit_fw(prueth->iep1); + icss_iep_put(prueth->iep1); + +put_iep0: + icss_iep_put(prueth->iep0); + prueth->iep0 = NULL; + prueth->iep1 = NULL; free_pool: gen_pool_free(prueth->sram_pool, @@ -2212,6 +2215,8 @@ free_pool: put_mem: pruss_release_mem_region(prueth->pruss, &prueth->shram); + +put_pruss: pruss_put(prueth->pruss); put_cores: diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 0d98defb011e..0ec7412febc7 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -346,12 +346,6 @@ static inline void *port_priv(struct gelic_port *port) return port->priv; } -#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC -void udbg_shutdown_ps3gelic(void); -#else -static inline void udbg_shutdown_ps3gelic(void) {} -#endif - int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); /* shared netdev ops */ void gelic_card_up(struct gelic_card *card); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index a3c5de9d547a..533e912af089 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1769,10 +1769,12 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = pdev->subsystem_device; } else { err = wx_flash_read_dword(wx, 0xfffdc, &ssid); - if (!err) - wx->subsystem_device_id = swab16((u16)ssid); + if (err < 0) { + wx_err(wx, "read of internal subsystem device id failed\n"); + return err; + } - return err; + wx->subsystem_device_id = swab16((u16)ssid); } wx->mac_table = kcalloc(wx->mac.num_rar_entries, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2823861e5a92..347d3cec02a3 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } -static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, - int rx_buffer_pgcnt) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * wx_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void wx_reuse_rx_page(struct wx_ring *rx_ring, - struct wx_rx_buffer *old_buff) -{ - u16 nta = rx_ring->next_to_alloc; - struct wx_rx_buffer *new_buff; - - new_buff = &rx_ring->rx_buffer_info[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->page_dma = old_buff->page_dma; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - static void wx_dma_sync_frag(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer) { @@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, size, DMA_FROM_DEVICE); skip_sync: - rx_buffer->pagecnt_bias--; - return rx_buffer; } @@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct sk_buff *skb, int rx_buffer_pgcnt) { - if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { - /* hand second half of page back to the ring */ - wx_reuse_rx_page(rx_ring, rx_buffer); - } else { - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - else - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; /* clear contents of rx_buffer */ rx_buffer->page = NULL; @@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, if (size <= WX_RXBUFFER_256) { memcpy(__skb_put(skb, size), page_addr, ALIGN(size, sizeof(long))); - rx_buffer->pagecnt_bias++; - + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); return skb; } + skb_mark_for_recycle(skb); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) WX_CB(skb)->dma = rx_buffer->dma; @@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, bi->page_dma = dma; bi->page = page; bi->page_offset = 0; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; return true; } @@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; - rx_buffer->pagecnt_bias++; break; } @@ -1972,11 +1904,11 @@ void wx_reset_interrupt_capability(struct wx *wx) if (!pdev->msi_enabled && !pdev->msix_enabled) return; - pci_free_irq_vectors(wx->pdev); if (pdev->msix_enabled) { kfree(wx->msix_entries); wx->msix_entries = NULL; } + pci_free_irq_vectors(wx->pdev); } EXPORT_SYMBOL(wx_reset_interrupt_capability); @@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) /* free resources associated with mapping */ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); i++; rx_buffer++; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 165e82de772e..83f9bb7b3c22 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -787,7 +787,6 @@ struct wx_rx_buffer { dma_addr_t page_dma; struct page *page; unsigned int page_offset; - u16 pagecnt_bias; }; struct wx_queue_stats { diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 3d43f808c86b..8db804543e66 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } /* mac type, phy type , oem type */ ngbe_init_type_code(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 70f0b5c01dac..526250102db2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -364,10 +364,8 @@ static int txgbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } txgbe_init_type_code(wx); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 82d0d44b2b02..bf6e33990490 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; - } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { + } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ |