diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_common.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_prototype.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ptp.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_trace.h | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 27 |
9 files changed, 177 insertions, 51 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9a60d6b207f7..60e351665c70 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -992,6 +992,7 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; + int irq_num; /* IRQ assigned to this q_vector */ } ____cacheline_internodealigned_in_smp; /* lan device */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 60f9e0a6aaca..3357d65a906b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1795,9 +1795,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 +#define I40E_LEGACY_LOOPBACK_NVM_VER 0x6000 +#define I40E_AQ_LB_MAC_LOCAL 0x01 +#define I40E_AQ_LB_PHY_LOCAL 0x05 +#define I40E_AQ_LB_PHY_REMOTE 0x06 +#define I40E_AQ_LB_MAC_LOCAL_LEGACY 0x04 u8 reserved[14]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4f01e2a6b6bb..8f764ff5c990 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1831,6 +1831,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, } /** + * i40e_aq_set_mac_loopback + * @hw: pointer to the HW struct + * @ena_lpbk: Enable or Disable loopback + * @cmd_details: pointer to command details structure or NULL + * + * Enable/disable loopback on a given port + */ +i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_lb_mode *cmd = + (struct i40e_aqc_set_lb_mode *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); + if (ena_lpbk) { + if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) + cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); + else + cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); + } + + return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); +} + +/** * i40e_aq_set_phy_debug * @hw: pointer to the hw struct * @cmd_flags: debug command flags diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index f6fa63e4253c..887a735fe2a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer, * @ring: the ring to copy * * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats. * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the * ring pointer is null, zero out the queue stat values and update the data * pointer. Otherwise safely copy the stats from the ring into the supplied @@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) /* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to - * u64_stats_fetch_retry_irq. But first, make sure our ring is + * u64_stats_fetch_retry. But first, make sure our ring is * non-null before attempting to access its syncp. */ do { - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); for (i = 0; i < size; i++) { i40e_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); } - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; @@ -1287,8 +1287,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev, * trying to set something that we do not support. */ if (memcmp(©_ks.base, &safe_ks.base, - sizeof(struct ethtool_link_settings))) + sizeof(struct ethtool_link_settings))) { + netdev_err(netdev, "Only speed and autoneg are supported.\n"); return -EOPNOTSUPP; + } while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6416322d7c18..94feea3b2599 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -419,10 +419,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; @@ -472,10 +472,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, if (!ring) continue; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); + start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -897,10 +897,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -915,10 +915,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; @@ -935,10 +935,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) continue; do { - start = u64_stats_fetch_begin_irq(&p->syncp); + start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -3694,6 +3694,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) } /** + * i40e_calculate_vsi_rx_buf_len - Calculates buffer length + * + * @vsi: VSI to calculate rx_buf_len from + */ +static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) +{ + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + return I40E_RXBUFFER_2048; + +#if (PAGE_SIZE < 8192) + if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN) + return I40E_RXBUFFER_1536 - NET_IP_ALIGN; +#endif + + return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; +} + +/** * i40e_vsi_configure_rx - Configure the VSI for Rx * @vsi: the VSI being configured * @@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) int err = 0; u16 i; - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); + #if (PAGE_SIZE < 8192) - } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { + if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && + vsi->netdev->mtu <= ETH_DATA_LEN) vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; #endif - } else { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : - I40E_RXBUFFER_2048; - } /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -4123,6 +4135,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) } /* register for affinity change notifications */ + q_vector->irq_num = irq_num; q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); @@ -12937,6 +12950,29 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) } /** + * i40e_set_loopback - turn on/off loopback mode on underlying PF + * @vsi: ptr to VSI + * @ena: flag to indicate the on/off setting + */ +static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) +{ + bool if_running = netif_running(vsi->netdev) && + !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); + int ret; + + if (if_running) + i40e_down(vsi); + + ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); + if (ret) + netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); + if (if_running) + i40e_up(vsi); + + return ret; +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -12976,6 +13012,9 @@ static int i40e_set_features(struct net_device *netdev, if (need_reset) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + if ((features ^ netdev->features) & NETIF_F_LOOPBACK) + return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); + return 0; } @@ -13282,7 +13321,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, int i; /* Don't allow frames that span over multiple buffers */ - if (frame_size > vsi->rx_buf_len) { + if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) { NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); return -EINVAL; } @@ -13738,7 +13777,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->hw_features |= hw_features; + netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ebdcde6f1aeb..9a71121420c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -105,6 +105,9 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); +i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, + bool ena_lpbk, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index ffea0c9c82f1..c37abbb3cd06 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -347,23 +347,12 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_hw *hw = &pf->hw; - u64 adj, freq, diff; - int neg_adj = 0; - - if (scaled_ppm < 0) { - neg_adj = 1; - scaled_ppm = -scaled_ppm; - } + u64 adj, base_adj; smp_mb(); /* Force any pending update before accessing. */ - freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult); - diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm, - 1000000ULL << 16); + base_adj = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult); - if (neg_adj) - adj = I40E_PTP_40GB_INCVAL - diff; - else - adj = I40E_PTP_40GB_INCVAL + diff; + adj = adjust_by_scaled_ppm(base_adj, scaled_ppm); wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index b5b12299931f..79d587ad5409 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -55,6 +55,55 @@ * being built from shared code. */ +#define NO_DEV "(i40e no_device)" + +TRACE_EVENT(i40e_napi_poll, + + TP_PROTO(struct napi_struct *napi, struct i40e_q_vector *q, int budget, + int budget_per_ring, unsigned int rx_cleaned, unsigned int tx_cleaned, + bool rx_clean_complete, bool tx_clean_complete), + + TP_ARGS(napi, q, budget, budget_per_ring, rx_cleaned, tx_cleaned, + rx_clean_complete, tx_clean_complete), + + TP_STRUCT__entry( + __field(int, budget) + __field(int, budget_per_ring) + __field(unsigned int, rx_cleaned) + __field(unsigned int, tx_cleaned) + __field(int, rx_clean_complete) + __field(int, tx_clean_complete) + __field(int, irq_num) + __field(int, curr_cpu) + __string(qname, q->name) + __string(dev_name, napi->dev ? napi->dev->name : NO_DEV) + __bitmask(irq_affinity, nr_cpumask_bits) + ), + + TP_fast_assign( + __entry->budget = budget; + __entry->budget_per_ring = budget_per_ring; + __entry->rx_cleaned = rx_cleaned; + __entry->tx_cleaned = tx_cleaned; + __entry->rx_clean_complete = rx_clean_complete; + __entry->tx_clean_complete = tx_clean_complete; + __entry->irq_num = q->irq_num; + __entry->curr_cpu = get_cpu(); + __assign_str(qname, q->name); + __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); + __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask), + nr_cpumask_bits); + ), + + TP_printk("i40e_napi_poll on dev %s q %s irq %d irq_mask %s curr_cpu %d " + "budget %d bpr %d rx_cleaned %u tx_cleaned %u " + "rx_clean_complete %d tx_clean_complete %d", + __get_str(dev_name), __get_str(qname), __entry->irq_num, + __get_bitmask(irq_affinity), __entry->curr_cpu, __entry->budget, + __entry->budget_per_ring, __entry->rx_cleaned, __entry->tx_cleaned, + __entry->rx_clean_complete, __entry->tx_clean_complete) +); + /* Events related to a vsi & ring */ DECLARE_EVENT_CLASS( i40e_tx_template, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b97c95f89fa0..924f972b91fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -923,11 +923,13 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll + * @tx_cleaned: Out parameter set to the number of TXes cleaned * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) + struct i40e_ring *tx_ring, int napi_budget, + unsigned int *tx_cleaned) { int i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; @@ -1048,6 +1050,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, } } + *tx_cleaned = total_packets; return !!budget; } @@ -2422,6 +2425,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring) * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process + * @rx_cleaned: Out parameter of the number of packets processed * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have @@ -2430,7 +2434,8 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring) * * Returns amount of work completed **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, + unsigned int *rx_cleaned) { unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); @@ -2567,6 +2572,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + *rx_cleaned = total_rx_packets; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; } @@ -2689,6 +2696,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) container_of(napi, struct i40e_q_vector, napi); struct i40e_vsi *vsi = q_vector->vsi; struct i40e_ring *ring; + bool tx_clean_complete = true; + bool rx_clean_complete = true; + unsigned int tx_cleaned = 0; + unsigned int rx_cleaned = 0; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; @@ -2705,10 +2716,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->tx) { bool wd = ring->xsk_pool ? i40e_clean_xdp_tx_irq(vsi, ring) : - i40e_clean_tx_irq(vsi, ring, budget); + i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned); if (!wd) { - clean_complete = false; + clean_complete = tx_clean_complete = false; continue; } arm_wb |= ring->arm_wb; @@ -2733,14 +2744,18 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) i40e_for_each_ring(ring, q_vector->rx) { int cleaned = ring->xsk_pool ? i40e_clean_rx_irq_zc(ring, budget_per_ring) : - i40e_clean_rx_irq(ring, budget_per_ring); + i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring) - clean_complete = false; + clean_complete = rx_clean_complete = false; } + if (!i40e_enabled_xdp_vsi(vsi)) + trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned, + tx_cleaned, rx_clean_complete, tx_clean_complete); + /* If work not completed, return budget and polling will return */ if (!clean_complete) { int cpu_id = smp_processor_id(); |