diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 458 |
1 files changed, 303 insertions, 155 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8c30eea61b6d..a9a7f8b52140 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -130,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) ice_for_each_txq(vsi, i) { struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; + struct ice_ring_stats *ring_stats; if (!tx_ring) continue; if (ice_ring_ch_enabled(tx_ring)) continue; + ring_stats = tx_ring->ring_stats; + if (!ring_stats) + continue; + if (tx_ring->desc) { /* If packet counter has not changed the queue is * likely stalled, so force an interrupt for this @@ -144,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) * prev_pkt would be negative if there was no * pending work. */ - packets = tx_ring->stats.pkts & INT_MAX; - if (tx_ring->tx_stats.prev_pkt == packets) { + packets = ring_stats->stats.pkts & INT_MAX; + if (ring_stats->tx_stats.prev_pkt == packets) { /* Trigger sw interrupt to revive the queue */ ice_trigger_sw_intr(hw, tx_ring->q_vector); continue; @@ -155,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) * to ice_get_tx_pending() */ smp_rmb(); - tx_ring->tx_stats.prev_pkt = + ring_stats->tx_stats.prev_pkt = ice_get_tx_pending(tx_ring) ? packets : -1; } } @@ -299,20 +304,6 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) } /** - * ice_get_devlink_port - Get devlink port from netdev - * @netdev: the netdevice structure - */ -static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) -{ - struct ice_pf *pf = ice_netdev_to_pf(netdev); - - if (!ice_is_switchdev_running(pf)) - return NULL; - - return &pf->devlink_port; -} - -/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -1120,8 +1111,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; - if (!ice_is_e810(&pf->hw)) - ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + ice_ptp_link_change(pf, pf->hw.pf_id, link_up); if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -2399,8 +2389,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) return -EBUSY; } - ice_unplug_aux_dev(pf); - switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@ -2562,13 +2550,20 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) ice_for_each_xdp_txq(vsi, i) { u16 xdp_q_idx = vsi->alloc_txq + i; + struct ice_ring_stats *ring_stats; struct ice_tx_ring *xdp_ring; xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); - if (!xdp_ring) goto free_xdp_rings; + ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); + if (!ring_stats) { + ice_free_tx_ring(xdp_ring); + goto free_xdp_rings; + } + + xdp_ring->ring_stats = ring_stats; xdp_ring->q_index = xdp_q_idx; xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; xdp_ring->vsi = vsi; @@ -2591,9 +2586,13 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) return 0; free_xdp_rings: - for (; i >= 0; i--) - if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) + for (; i >= 0; i--) { + if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { + kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); + vsi->xdp_rings[i]->ring_stats = NULL; ice_free_tx_ring(vsi->xdp_rings[i]); + } + } return -ENOMEM; } @@ -2794,6 +2793,8 @@ free_qmap: synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); } + kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); + vsi->xdp_rings[i]->ring_stats = NULL; kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -3095,7 +3096,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - ice_ptp_process_ts(pf); + if (!hw->reset_ongoing) + ret = IRQ_WAKE_THREAD; } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3130,7 +3132,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ice_service_task_schedule(pf); } } - ret = IRQ_HANDLED; + if (!ret) + ret = IRQ_HANDLED; ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); @@ -3139,6 +3142,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } /** + * ice_misc_intr_thread_fn - misc interrupt thread function + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) +{ + struct ice_pf *pf = data; + + if (ice_is_reset_in_progress(pf->state)) + return IRQ_HANDLED; + + while (!ice_ptp_process_ts(pf)) + usleep_range(50, 100); + + return IRQ_HANDLED; +} + +/** * ice_dis_ctrlq_interrupts - disable control queue interrupts * @hw: pointer to HW structure */ @@ -3250,10 +3271,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) pf->num_avail_sw_msix -= 1; pf->oicr_idx = (u16)oicr_idx; - err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, - ice_misc_intr, 0, pf->int_name, pf); + err = devm_request_threaded_irq(dev, + pf->msix_entries[pf->oicr_idx].vector, + ice_misc_intr, ice_misc_intr_thread_fn, + 0, pf->int_name, pf); if (err) { - dev_err(dev, "devm_request_irq for %s failed: %d\n", + dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", pf->int_name, err); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; @@ -3290,7 +3313,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, v_idx) netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, - ice_napi_poll, NAPI_POLL_WEIGHT); + ice_napi_poll); } /** @@ -3393,6 +3416,11 @@ static void ice_set_netdev_features(struct net_device *netdev) if (is_dvm_ena) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + /* Leave CRC / FCS stripping enabled by default, but allow the value to + * be changed at runtime + */ + netdev->hw_features |= NETIF_F_RXFCS; } /** @@ -3925,87 +3953,134 @@ static int ice_init_pf(struct ice_pf *pf) } /** + * ice_reduce_msix_usage - Reduce usage of MSI-X vectors + * @pf: board private structure + * @v_remain: number of remaining MSI-X vectors to be distributed + * + * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. + * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of + * remaining vectors. + */ +static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) +{ + int v_rdma; + + if (!ice_is_rdma_ena(pf)) { + pf->num_lan_msix = v_remain; + return; + } + + /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { + dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining vectors to LAN MSIX */ + pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; + pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; + } else { + /* Split remaining MSIX with RDMA after accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } +} + +/** * ice_ena_msix_range - Request a range of MSIX vectors from the OS * @pf: board private structure * - * compute the number of MSIX vectors required (v_budget) and request from - * the OS. Return the number of vectors reserved or negative on failure + * Compute the number of MSIX vectors wanted and request from the OS. Adjust + * device usage if there are not enough vectors. Return the number of vectors + * reserved or negative on failure. */ static int ice_ena_msix_range(struct ice_pf *pf) { - int num_cpus, v_left, v_actual, v_other, v_budget = 0; + int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; struct device *dev = ice_pf_to_dev(pf); - int needed, err, i; + int err, i; - v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; num_cpus = num_online_cpus(); - /* reserve for LAN miscellaneous handler */ - needed = ICE_MIN_LAN_OICR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; + /* LAN miscellaneous handler */ + v_other = ICE_MIN_LAN_OICR_MSIX; - /* reserve for flow director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { - needed = ICE_FDIR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - } - - /* reserve for switchdev */ - needed = ICE_ESWITCH_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* total used for non-traffic vectors */ - v_other = v_budget; - - /* reserve vectors for LAN traffic */ - needed = num_cpus; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_lan_msix = needed; - v_budget += needed; - v_left -= needed; - - /* reserve vectors for RDMA auxiliary driver */ + /* Flow Director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + v_other += ICE_FDIR_MSIX; + + /* switchdev */ + v_other += ICE_ESWITCH_MSIX; + + v_wanted = v_other; + + /* LAN traffic */ + pf->num_lan_msix = num_cpus; + v_wanted += pf->num_lan_msix; + + /* RDMA auxiliary driver */ if (ice_is_rdma_ena(pf)) { - needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_rdma_msix = needed; - v_budget += needed; - v_left -= needed; + pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + v_wanted += pf->num_rdma_msix; } - pf->msix_entries = devm_kcalloc(dev, v_budget, + if (v_wanted > hw_num_msix) { + int v_remain; + + dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", + v_wanted, hw_num_msix); + + if (hw_num_msix < ICE_MIN_MSIX) { + err = -ERANGE; + goto exit_err; + } + + v_remain = hw_num_msix - v_other; + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { + v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; + v_remain = ICE_MIN_LAN_TXRX_MSIX; + } + + ice_reduce_msix_usage(pf, v_remain); + v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; + + dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); + } + + pf->msix_entries = devm_kcalloc(dev, v_wanted, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { err = -ENOMEM; goto exit_err; } - for (i = 0; i < v_budget; i++) + for (i = 0; i < v_wanted; i++) pf->msix_entries[i].entry = i; /* actually reserve the vectors */ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_budget); + ICE_MIN_MSIX, v_wanted); if (v_actual < 0) { dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; } - if (v_actual < v_budget) { + if (v_actual < v_wanted) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_budget, v_actual); + v_wanted, v_actual); if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ @@ -4014,38 +4089,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) goto msix_err; } else { int v_remain = v_actual - v_other; - int v_rdma = 0, v_min_rdma = 0; - if (ice_is_rdma_ena(pf)) { - /* Need at least 1 interrupt in addition to - * AEQ MSIX - */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - v_min_rdma = ICE_MIN_RDMA_MSIX; - } + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) + v_remain = ICE_MIN_LAN_TXRX_MSIX; - if (v_actual == ICE_MIN_MSIX || - v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { - dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining - * vectors to LAN MSIX - */ - pf->num_rdma_msix = v_min_rdma; - pf->num_lan_msix = v_remain - v_min_rdma; - } else { - /* Split remaining MSIX with RDMA after - * accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } + ice_reduce_msix_usage(pf, v_remain); dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); @@ -4060,12 +4108,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) msix_err: devm_kfree(dev, pf->msix_entries); - goto exit_err; -no_hw_vecs_left_err: - dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", - needed, v_left); - err = -ERANGE; exit_err: pf->num_rdma_msix = 0; pf->num_lan_msix = 0; @@ -4559,6 +4602,11 @@ static int ice_register_netdev(struct ice_pf *pf) if (!vsi || !vsi->netdev) return -EIO; + err = ice_devlink_create_pf_port(pf); + if (err) + goto err_devlink_create; + + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); err = register_netdev(vsi->netdev); if (err) goto err_register_netdev; @@ -4566,17 +4614,11 @@ static int ice_register_netdev(struct ice_pf *pf) set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); - err = ice_devlink_create_pf_port(pf); - if (err) - goto err_devlink_create; - - devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); return 0; -err_devlink_create: - unregister_netdev(vsi->netdev); - clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); err_register_netdev: + ice_devlink_destroy_pf_port(pf); +err_devlink_create: free_netdev(vsi->netdev); vsi->netdev = NULL; clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); @@ -4684,8 +4726,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_set_safe_mode_caps(hw); } - hw->ucast_shared = true; - err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); @@ -4733,11 +4773,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_init_pf_unroll; } + pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, + sizeof(*pf->vsi_stats), GFP_KERNEL); + if (!pf->vsi_stats) { + err = -ENOMEM; + goto err_init_vsi_unroll; + } + err = ice_init_interrupt_scheme(pf); if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_vsi_unroll; + goto err_init_vsi_stats_unroll; } /* In case of MSIX we are going to setup the misc vector right here @@ -4918,6 +4965,9 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); +err_init_vsi_stats_unroll: + devm_kfree(dev, pf->vsi_stats); + pf->vsi_stats = NULL; err_init_vsi_unroll: devm_kfree(dev, pf->vsi); err_init_pf_unroll: @@ -5040,6 +5090,8 @@ static void ice_remove(struct pci_dev *pdev) continue; ice_vsi_free_q_vectors(pf->vsi[i]); } + devm_kfree(&pdev->dev, pf->vsi_stats); + pf->vsi_stats = NULL; ice_deinit_pf(pf); ice_devlink_destroy_regions(pf); ice_deinit_hw(&pf->hw); @@ -5744,6 +5796,9 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) +#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_STAG_FILTER) @@ -5830,6 +5885,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features) NETIF_F_HW_VLAN_STAG_TX); } + if (!(netdev->features & NETIF_F_RXFCS) && + (features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES) && + !ice_vsi_has_non_zero_vlans(np->vsi)) { + netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + } + return features; } @@ -5923,6 +5986,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; if (current_vlan_features ^ requested_vlan_features) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); + return -EIO; + } + err = ice_set_vlan_offload_features(vsi, features); if (err) return err; @@ -6004,6 +6074,23 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) if (ret) return ret; + /* Turn on receive of FCS aka CRC, and after setting this + * flag the packet data will have the 4 byte CRC appended + */ + if (changed & NETIF_F_RXFCS) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); + return -EIO; + } + + ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); + ret = ice_down_up(vsi); + if (ret) + return ret; + } + if (changed & NETIF_F_NTUPLE) { bool ena = !!(features & NETIF_F_NTUPLE); @@ -6252,8 +6339,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - if (!ice_is_e810(&pf->hw)) - ice_ptp_link_change(pf, pf->hw.pf_id, true); + ice_ptp_link_change(pf, pf->hw.pf_id, true); } /* Perform an initial read of the statistics registers now to @@ -6297,10 +6383,10 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, unsigned int start; do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); *pkts = stats.pkts; *bytes = stats.bytes; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); } /** @@ -6322,14 +6408,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, u64 pkts = 0, bytes = 0; ring = READ_ONCE(rings[i]); - if (!ring) + if (!ring || !ring->ring_stats) continue; - ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); + ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, + ring->ring_stats->stats, &pkts, + &bytes); vsi_stats->tx_packets += pkts; vsi_stats->tx_bytes += bytes; - vsi->tx_restart += ring->tx_stats.restart_q; - vsi->tx_busy += ring->tx_stats.tx_busy; - vsi->tx_linearize += ring->tx_stats.tx_linearize; + vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; + vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; + vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; } } @@ -6339,6 +6427,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { + struct rtnl_link_stats64 *net_stats, *stats_prev; struct rtnl_link_stats64 *vsi_stats; u64 pkts, bytes; int i; @@ -6363,12 +6452,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); + struct ice_ring_stats *ring_stats; - ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); + ring_stats = ring->ring_stats; + ice_fetch_u64_stats_per_ring(&ring_stats->syncp, + ring_stats->stats, &pkts, + &bytes); vsi_stats->rx_packets += pkts; vsi_stats->rx_bytes += bytes; - vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; - vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; + vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; } /* update XDP Tx rings counters */ @@ -6378,10 +6471,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_unlock(); - vsi->net_stats.tx_packets = vsi_stats->tx_packets; - vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; - vsi->net_stats.rx_packets = vsi_stats->rx_packets; - vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + net_stats = &vsi->net_stats; + stats_prev = &vsi->net_stats_prev; + + /* clear prev counters after reset */ + if (vsi_stats->tx_packets < stats_prev->tx_packets || + vsi_stats->rx_packets < stats_prev->rx_packets) { + stats_prev->tx_packets = 0; + stats_prev->tx_bytes = 0; + stats_prev->rx_packets = 0; + stats_prev->rx_bytes = 0; + } + + /* update netdev counters */ + net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; + net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; + net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; + net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; + + stats_prev->tx_packets = vsi_stats->tx_packets; + stats_prev->tx_bytes = vsi_stats->tx_bytes; + stats_prev->rx_packets = vsi_stats->rx_packets; + stats_prev->rx_bytes = vsi_stats->rx_bytes; kfree(vsi_stats); } @@ -6443,6 +6554,9 @@ void ice_update_pf_stats(struct ice_pf *pf) prev_ps = &pf->stats_prev; cur_ps = &pf->stats; + if (ice_is_reset_in_progress(pf->state)) + pf->stat_prev_loaded = false; + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); @@ -6651,14 +6765,13 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) */ int ice_down(struct ice_vsi *vsi) { - int i, tx_err, rx_err, link_err = 0, vlan_err = 0; + int i, tx_err, rx_err, vlan_err = 0; WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); if (vsi->netdev && vsi->type == ICE_VSI_PF) { vlan_err = ice_vsi_del_vlan_zero(vsi); - if (!ice_is_e810(&vsi->back->hw)) - ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); + ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { @@ -6685,20 +6798,13 @@ int ice_down(struct ice_vsi *vsi) ice_napi_disable_all(vsi); - if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { - link_err = ice_force_phys_link_state(vsi, false); - if (link_err) - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); - } - ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); - if (tx_err || rx_err || link_err || vlan_err) { + if (tx_err || rx_err || vlan_err) { netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); return -EIO; @@ -6708,6 +6814,31 @@ int ice_down(struct ice_vsi *vsi) } /** + * ice_down_up - shutdown the VSI connection and bring it up + * @vsi: the VSI to be reconnected + */ +int ice_down_up(struct ice_vsi *vsi) +{ + int ret; + + /* if DOWN already set, nothing to do */ + if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) + return 0; + + ret = ice_down(vsi); + if (ret) + return ret; + + ret = ice_up(vsi); + if (ret) { + netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); + return ret; + } + + return 0; +} + +/** * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources * @vsi: VSI having resources allocated * @@ -6860,6 +6991,8 @@ int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); + if (vsi->type == ICE_VSI_PF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); @@ -8190,7 +8323,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) rule.rid = fltr->rid; rule.rule_id = fltr->rule_id; - rule.vsi_handle = fltr->dest_id; + rule.vsi_handle = fltr->dest_vsi_handle; status = ice_rem_adv_rule_by_id(&pf->hw, &rule); if (status) { if (status == -ENOENT) @@ -8502,6 +8635,12 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) switch (mode) { case TC_MQPRIO_MODE_CHANNEL: + if (pf->hw.port_info->is_custom_tx_enabled) { + dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); + return -EBUSY; + } + ice_tear_down_devlink_rate_tree(pf); + ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) { netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", @@ -8892,6 +9031,16 @@ int ice_stop(struct net_device *netdev) return -EBUSY; } + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { + int link_err = ice_force_phys_link_state(vsi, false); + + if (link_err) { + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + return -EIO; + } + } + ice_vsi_close(vsi); return 0; @@ -9005,5 +9154,4 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, - .ndo_get_devlink_port = ice_get_devlink_port, }; |