diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
56 files changed, 1127 insertions, 868 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 4a8013f20152..36418b510dde 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2848,7 +2848,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); nic = netdev_priv(netdev); - netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); nic->netdev = netdev; nic->pdev = pdev; nic->msg_enable = (1 << debug) - 1; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d60e2016d03c..e6c8e6d5234f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ - u16 lat_enc_d = 0; /* latency decoded */ + u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u32 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 55c6bce5da61..18558a019353 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -852,6 +852,7 @@ struct i40e_vsi { u64 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_buf_failed; u64 rx_page_failed; u64 rx_page_reuse; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aefffd83615..2819e261a126 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_SFP_X722_A: hw->mac.type = I40E_MAC_X722; break; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index be7c6f34d45c..c9dcd6d92c83 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->stats.bytes, tx_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", i, tx_ring->tx_stats.tx_busy, - tx_ring->tx_stats.tx_done_old); + tx_ring->tx_stats.tx_done_old, + tx_ring->tx_stats.tx_stopped); dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i\n", i, tx_ring->size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 1bcb0ec0f0c0..2610338002fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -33,6 +33,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_SFP_X722_A 0x0DDA #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e48499624d22..610f00cbaff9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -293,12 +293,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_busy", tx_busy), + I40E_VSI_STAT("tx_stopped", tx_stopped), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), I40E_VSI_STAT("rx_cache_waive", rx_page_waive), I40E_VSI_STAT("rx_cache_busy", rx_page_busy), + I40E_VSI_STAT("tx_restart", tx_restart), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6778df2177a1..332a608dbaa6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, @@ -785,6 +786,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) unsigned int start; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -804,6 +806,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_stopped = 0; rx_page = 0; rx_buf = 0; rx_reuse = 0; @@ -828,6 +831,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_stopped += p->tx_stats.tx_stopped; /* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); @@ -872,6 +876,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; vsi->rx_page_reuse = rx_reuse; @@ -7549,42 +7554,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { + struct i40e_channel *ch = NULL, *ch_tmp, *iter; int ret = 0, num_tc = 1, i, aq_err; - struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - if (list_empty(&vsi->macvlan_list)) - return -EINVAL; - /* Go through the list and find an available channel */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (!i40e_is_channel_macvlan(ch)) { - ch->fwd = fwd; + list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { + if (!i40e_is_channel_macvlan(iter)) { + iter->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, - ch->num_queue_pairs, - ch->base_queue); - for (i = 0; i < ch->num_queue_pairs; i++) { + iter->num_queue_pairs, + iter->base_queue); + for (i = 0; i < iter->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; - pf_q = ch->base_queue + i; + pf_q = iter->base_queue + i; /* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = ch; + tx_ring->ch = iter; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = ch; + rx_ring->ch = iter; } + ch = iter; break; } } + if (!ch) + return -EINVAL; + /* Guarantee all rings are updated before we update the * MAC address filter. */ @@ -13436,8 +13442,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | @@ -13468,6 +13473,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; +#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + I40E_GSO_PARTIAL_FEATURES; + + netdev->mpls_features |= NETIF_F_SG; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_TSO; + netdev->mpls_features |= NETIF_F_TSO6; + netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; + /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0eae5858f2fe..7bc1174edf6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3,6 +3,7 @@ #include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <net/mpls.h> #include <net/xdp.h> #include "i40e.h" #include "i40e_trace.h" @@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, { struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; + __be16 protocol; union { struct iphdr *v4; struct ipv6hdr *v6; @@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, unsigned char *hdr; } l4; u32 paylen, l4_offset; - u16 gso_segs, gso_size; + u16 gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, if (err < 0) return err; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; + + first->tx_flags |= I40E_TX_FLAGS_TSO; } else { ip.v6->payload_len = 0; + first->tx_flags |= I40E_TX_FLAGS_TSO; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | @@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; - gso_segs = skb_shinfo(skb)->gso_segs; /* update GSO size and bytecount with header size */ - first->gso_segs = gso_segs; + first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ @@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *exthdr; u32 offset, cmd = 0; __be16 frag_off; + __be16 protocol; u8 l4_proto = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + else + *tx_flags |= I40E_TX_FLAGS_IPV6; /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; @@ -3373,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) /* Memory barrier before checking head and tail */ smp_mb(); + ++tx_ring->tx_stats.tx_stopped; + /* Check again in a case another CPU has just made room available. */ if (likely(I40E_DESC_UNUSED(tx_ring) < size)) return -EBUSY; @@ -3749,7 +3774,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; - __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; @@ -3791,15 +3815,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; - /* obtain protocol of skb */ - protocol = vlan_get_protocol(skb); - - /* setup IPv4/IPv6 offloads */ - if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index c471c2da313c..41f86e9535a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -290,6 +290,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; int prev_pkt_ctr; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 19da3b22160f..8c5118c8baaf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); #define I40E_XDP_CONSUMED BIT(0) #define I40E_XDP_TX BIT(1) #define I40E_XDP_REDIR BIT(2) +#define I40E_XDP_EXIT BIT(3) /* * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index c1d25b0b0ca2..af3e7e6afc85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -161,9 +161,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return I40E_XDP_REDIR; + if (!err) + return I40E_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = I40E_XDP_EXIT; + else + result = I40E_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -175,16 +179,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (result == I40E_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = I40E_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = I40E_XDP_CONSUMED; - break; } return result; } @@ -271,7 +275,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, unsigned int *rx_packets, unsigned int *rx_bytes, unsigned int size, - unsigned int xdp_res) + unsigned int xdp_res, + bool *failure) { struct sk_buff *skb; @@ -281,11 +286,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; + if (xdp_res == I40E_XDP_EXIT) { + *failure = true; + return; + } + if (xdp_res == I40E_XDP_CONSUMED) { xsk_buff_free(xdp_buff); return; } - if (xdp_res == I40E_XDP_PASS) { /* NB! We are not checking for errors using * i40e_test_staterr with @@ -371,7 +380,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) xdp_res = i40e_run_xdp_zc(rx_ring, bi); i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, - &rx_bytes, size, xdp_res); + &rx_bytes, size, xdp_res, &failure); + if (failure) + break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); @@ -382,7 +393,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); @@ -594,13 +605,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return -ENETDOWN; if (!i40e_enabled_xdp_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_queue_pairs) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 190590d32faf..7dfcf78b57fb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2871,7 +2871,6 @@ continue_reset: running = adapter->state == __IAVF_RUNNING; if (running) { - netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2988,7 +2987,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - netdev->flags |= IFF_UP; + iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -3004,10 +3003,8 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) { + if (running) iavf_change_state(adapter, __IAVF_RUNNING); - netdev->flags |= IFF_UP; - } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d4f1874df7d0..60453b3b8d23 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -301,7 +301,6 @@ enum ice_vsi_state { ICE_VSI_NETDEV_REGISTERED, ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, - ICE_VSI_VLAN_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -541,6 +540,7 @@ struct ice_pf { struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ + struct mutex adev_mutex; /* lock to protect aux device access */ u32 msg_enable; struct ice_ptp ptp; struct tty_driver *ice_gnss_tty_driver; @@ -672,7 +672,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) @@ -759,6 +759,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) } /** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ +static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) +{ + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; +} + +/** * ice_is_switchdev_running - check if switchdev is configured * @pf: pointer to PF structure * diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b25e27c4d887..05cb9dd7035a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -601,12 +601,30 @@ struct ice_aqc_sw_rules { __le32 addr_low; }; +/* Add switch rule response: + * Content of return buffer is same as the input buffer. The status field and + * LUT index are updated as part of the response + */ +struct ice_aqc_sw_rules_elem_hdr { + __le16 type; /* Switch rule type, one of T_... */ +#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 +#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 +#define ICE_AQC_SW_RULES_T_LG_ACT 0x2 +#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 +#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 + __le16 status; +} __packed __aligned(sizeof(__le16)); + /* Add/Update/Get/Remove lookup Rx/Tx command/response entry * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be * used to identify the rule for Update/Get/Remove commands. */ struct ice_sw_rule_lkup_rx_tx { + struct ice_aqc_sw_rules_elem_hdr hdr; + __le16 recipe_id; #define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10 /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */ @@ -683,14 +701,16 @@ struct ice_sw_rule_lkup_rx_tx { * lookup-type */ __le16 hdr_len; - u8 hdr[]; -}; + u8 hdr_data[]; +} __packed __aligned(sizeof(__le16)); /* Add/Update/Remove large action command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the action for Update/Get/Remove commands. */ struct ice_sw_rule_lg_act { + struct ice_aqc_sw_rules_elem_hdr hdr; + __le16 index; /* Index in large action table */ __le16 size; /* Max number of large actions */ @@ -744,45 +764,19 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_STAT_COUNT_S 3 #define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) __le32 act[]; /* array of size for actions */ -}; +} __packed __aligned(sizeof(__le16)); /* Add/Update/Remove VSI list command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the VSI list for Update/Get/Remove commands. */ struct ice_sw_rule_vsi_list { + struct ice_aqc_sw_rules_elem_hdr hdr; + __le16 index; /* Index of VSI/Prune list */ __le16 number_vsi; __le16 vsi[]; /* Array of number_vsi VSI numbers */ -}; - -/* Query VSI list command/response entry */ -struct ice_sw_rule_vsi_list_query { - __le16 index; - DECLARE_BITMAP(vsi_list, ICE_MAX_VSI); -} __packed; - -/* Add switch rule response: - * Content of return buffer is same as the input buffer. The status field and - * LUT index are updated as part of the response - */ -struct ice_aqc_sw_rules_elem { - __le16 type; /* Switch rule type, one of T_... */ -#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 -#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 -#define ICE_AQC_SW_RULES_T_LG_ACT 0x2 -#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 -#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 -#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 -#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 - __le16 status; - union { - struct ice_sw_rule_lkup_rx_tx lkup_tx_rx; - struct ice_sw_rule_lg_act lg_act; - struct ice_sw_rule_vsi_list vsi_list; - struct ice_sw_rule_vsi_list_query vsi_list_query; - } __packed pdata; -}; +} __packed __aligned(sizeof(__le16)); /* Query PFC Mode (direct 0x0302) * Set PFC Mode (direct 0x0303) diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 5daade32ea62..fba178e07600 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; - if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) + if (!vsi || vsi->type != ICE_VSI_PF) return; netdev = vsi->netdev; @@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) int base_idx, i; if (!vsi || vsi->type != ICE_VSI_PF) - return -EINVAL; + return 0; pf = vsi->back; netdev = vsi->netdev; @@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf) if (!pf_vsi) return; - ice_free_cpu_rx_rmap(pf_vsi); ice_clear_arfs(pf_vsi); } @@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf) return; ice_remove_arfs(pf); - if (ice_set_cpu_rx_rmap(pf_vsi)) { - dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); - return; - } ice_init_arfs(pf_vsi); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index a230edb38466..3991d62473bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -647,6 +647,23 @@ void ice_devlink_unregister(struct ice_pf *pf) devlink_unregister(priv_to_devlink(pf)); } +/** + * ice_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); @@ -704,6 +721,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = pf->hw.bus.func; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); @@ -753,13 +773,18 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) pf = vf->pf; dev = ice_pf_to_dev(pf); - vsi = ice_get_vf_vsi(vf); devlink_port = &vf->devlink_port; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; attrs.pci_vf.pf = pf->hw.bus.func; attrs.pci_vf.vf = vf->vf_id; + ice_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 9a84d746a6c4..6a463b242c7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) np = netdev_priv(netdev); vsi = np->vsi; - if (ice_is_reset_in_progress(vsi->back->state)) + if (ice_is_reset_in_progress(vsi->back->state) || + test_bit(ICE_VF_DIS, vsi->back->state)) return NETDEV_TX_BUSY; repr = ice_netdev_to_repr(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index bd58d9d2e565..6a413331572b 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { - return -EOPNOTSUPP; + return 0; } static inline int ice_eswitch_rebuild(struct ice_pf *pf) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 24cda7e1f916..1e71b70f0e52 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -190,19 +190,17 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); + + strscpy(drvinfo->bus_info, pci_name(pf->pdev), + sizeof(drvinfo->bus_info)); } static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; __ice_get_drvinfo(netdev, drvinfo, np->vsi); - - strscpy(drvinfo->bus_info, pci_name(pf->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } @@ -3113,36 +3111,47 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) return np->vsi->rss_table_size; } -/** - * ice_get_rxfh - get the Rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function - * - * Reads the indirection table directly from the hardware. - */ static int -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +ice_get_rxfh_context(struct net_device *netdev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - int err, i; + u16 qcount, offset; + int err, num_tc, i; u8 *lut; + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + netdev_warn(netdev, "RSS is not supported on this VSI!\n"); + return -EOPNOTSUPP; + } + + if (rss_context && !ice_is_adq_active(pf)) { + netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n"); + return -EINVAL; + } + + qcount = vsi->mqprio_qopt.qopt.count[rss_context]; + offset = vsi->mqprio_qopt.qopt.offset[rss_context]; + + if (rss_context && ice_is_adq_active(pf)) { + num_tc = vsi->mqprio_qopt.qopt.num_tc; + if (rss_context >= num_tc) { + netdev_err(netdev, "RSS context:%d > num_tc:%d\n", + rss_context, num_tc); + return -EINVAL; + } + /* Use channel VSI of given TC */ + vsi = vsi->tc_map_vsi[rss_context]; + } + if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - /* RSS not supported return error here */ - netdev_warn(netdev, "RSS is not configured on this VSI!\n"); - return -EIO; - } - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -3155,8 +3164,14 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) if (err) goto out; + if (ice_is_adq_active(pf)) { + for (i = 0; i < vsi->rss_table_size; i++) + indir[i] = offset + lut[i] % qcount; + goto out; + } + for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = (u32)(lut[i]); + indir[i] = lut[i]; out: kfree(lut); @@ -3164,6 +3179,21 @@ out: } /** + * ice_get_rxfh - get the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Reads the indirection table directly from the hardware. + */ +static int +ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + return ice_get_rxfh_context(netdev, indir, key, hfunc, 0); +} + +/** * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table @@ -4104,6 +4134,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_pauseparam = ice_set_pauseparam, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, + .get_rxfh_context = ice_get_rxfh_context, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, .get_channels = ice_get_channels, diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index af57eb114966..85a94483c2ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -58,7 +58,16 @@ int ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -73,7 +82,16 @@ int ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -87,7 +105,16 @@ int ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** @@ -101,7 +128,16 @@ int ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 35579cf4283f..57586a2e6dec 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -76,8 +76,7 @@ static void ice_gnss_read(struct kthread_work *work) for (i = 0; i < data_len; i += bytes_read) { u16 bytes_left = data_len - i; - bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left : - ICE_MAX_I2C_DATA_SIZE; + bytes_read = min_t(typeof(bytes_left), bytes_left, ICE_MAX_I2C_DATA_SIZE); err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA), diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 25a436d342c2..895c32bcc8b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -37,29 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) if (WARN_ON_ONCE(!in_task())) return; + mutex_lock(&pf->adev_mutex); if (!pf->adev) - return; + goto finish; device_lock(&pf->adev->dev); iadrv = ice_get_auxiliary_drv(pf); if (iadrv && iadrv->event_handler) iadrv->event_handler(pf, event); device_unlock(&pf->adev->dev); -} - -/** - * ice_find_vsi - Find the VSI from VSI ID - * @pf: The PF pointer to search in - * @vsi_num: The VSI ID to search for - */ -static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) - return pf->vsi[i]; - return NULL; +finish: + mutex_unlock(&pf->adev_mutex); } /** @@ -290,7 +278,6 @@ int ice_plug_aux_dev(struct ice_pf *pf) return -ENOMEM; adev = &iadev->adev; - pf->adev = adev; iadev->pf = pf; adev->id = pf->aux_idx; @@ -300,18 +287,20 @@ int ice_plug_aux_dev(struct ice_pf *pf) ret = auxiliary_device_init(adev); if (ret) { - pf->adev = NULL; kfree(iadev); return ret; } ret = auxiliary_device_add(adev); if (ret) { - pf->adev = NULL; auxiliary_device_uninit(adev); return ret; } + mutex_lock(&pf->adev_mutex); + pf->adev = adev; + mutex_unlock(&pf->adev_mutex); + return 0; } @@ -320,12 +309,17 @@ int ice_plug_aux_dev(struct ice_pf *pf) */ void ice_unplug_aux_dev(struct ice_pf *pf) { - if (!pf->adev) - return; + struct auxiliary_device *adev; - auxiliary_device_delete(pf->adev); - auxiliary_device_uninit(pf->adev); + mutex_lock(&pf->adev_mutex); + adev = pf->adev; pf->adev = NULL; + mutex_unlock(&pf->adev_mutex); + + if (adev) { + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index b897926f817d..454e01ae09b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; if (dvm_ena) ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; else @@ -2688,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; + ice_free_cpu_rx_rmap(vsi); + ice_for_each_q_vector(vsi, i) { u16 vector = i + base; int irq_num; @@ -2701,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) continue; /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); @@ -2983,6 +2987,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); @@ -3037,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - coalesce[i].itr_tx = q_vector->tx.itr_setting; - coalesce[i].itr_rx = q_vector->rx.itr_setting; + coalesce[i].itr_tx = q_vector->tx.itr_settings; + coalesce[i].itr_rx = q_vector->rx.itr_settings; coalesce[i].intrl = q_vector->intrl; if (i < vsi->num_txq) @@ -3094,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, */ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[i].itr_rx; + rc->itr_settings = coalesce[i].itr_rx; ice_write_itr(rc, rc->itr_setting); } else if (i < vsi->alloc_rxq) { rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[0].itr_rx; + rc->itr_settings = coalesce[0].itr_rx; ice_write_itr(rc, rc->itr_setting); } if (i < vsi->alloc_txq && coalesce[i].tx_valid) { rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[i].itr_tx; + rc->itr_settings = coalesce[i].itr_tx; ice_write_itr(rc, rc->itr_setting); } else if (i < vsi->alloc_txq) { rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[0].itr_tx; + rc->itr_settings = coalesce[0].itr_tx; ice_write_itr(rc, rc->itr_setting); } @@ -3122,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, for (; i < vsi->num_q_vectors; i++) { /* transmit */ rc = &vsi->q_vectors[i]->tx; - rc->itr_setting = coalesce[0].itr_tx; + rc->itr_settings = coalesce[0].itr_tx; ice_write_itr(rc, rc->itr_setting); /* receive */ rc = &vsi->q_vectors[i]->rx; - rc->itr_setting = coalesce[0].itr_rx; + rc->itr_settings = coalesce[0].itr_rx; ice_write_itr(rc, rc->itr_setting); vsi->q_vectors[i]->intrl = coalesce[0].intrl; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b588d7995631..e1cae253412c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); } /** @@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } @@ -280,14 +284,33 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } /** + * ice_get_devlink_port - Get devlink port from netdev + * @netdev: the netdevice structure + */ +static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (!ice_is_switchdev_running(pf)) + return NULL; + + return &pf->devlink_port; +} + +/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -302,7 +325,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 changed_flags = 0; - u8 promisc_m; int err; if (!vsi->netdev) @@ -320,7 +342,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (ice_vsi_fltr_changed(vsi)) { clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); - clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); /* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -369,29 +390,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_set_promisc(vsi, promisc_m); + err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_clear_promisc(vsi, promisc_m); + err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags |= IFF_ALLMULTI; goto out_promisc; } @@ -2517,6 +2524,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); } + err = ice_set_cpu_rx_rmap(vsi); + if (err) { + netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", + vsi->vsi_num, ERR_PTR(err)); + goto free_q_irqs; + } + vsi->irqs_ready = true; return 0; @@ -2569,7 +2583,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) spin_lock_init(&xdp_ring->tx_lock); for (j = 0; j < xdp_ring->count; j++) { tx_desc = ICE_TX_DESC(xdp_ring, j); - tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); + tx_desc->cmd_type_offset_bsz = 0; } } @@ -2765,8 +2779,10 @@ free_qmap: ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -3334,7 +3350,9 @@ static void ice_set_netdev_features(struct net_device *netdev) vlano_features | tso_features; /* add support for HW_CSUM on packets with MPLS header */ - netdev->mpls_features = NETIF_F_HW_CSUM; + netdev->mpls_features = NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; /* enable features */ netdev->features |= netdev->hw_features; @@ -3488,6 +3506,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + + /* Add multicast promisc rule for the VLAN ID to be added if + * all-multicast is currently enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + vid); + if (ret) + goto finish; + } + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged @@ -3495,8 +3527,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) */ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->add_vlan(vsi, &vlan); - if (!ret) - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + if (ret) + goto finish; + + /* If all-multicast is currently enabled and this VLAN ID is only one + * besides VLAN-0 we have to update look-up type of multicast promisc + * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. + */ + if ((vsi->current_netdev_flags & IFF_ALLMULTI) && + ice_vsi_num_non_zero_vlans(vsi) == 1) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); return ret; } @@ -3522,6 +3569,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Make sure VLAN delete is successful before updating VLAN @@ -3530,10 +3580,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->del_vlan(vsi, &vlan); if (ret) - return ret; + goto finish; - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); - return 0; + /* Remove multicast promisc rule for the removed VLAN ID if + * all-multicast is enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, vid); + + if (!ice_vsi_has_non_zero_vlans(vsi)) { + /* Update look-up type of multicast promisc rule for VLAN 0 + * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when + * all-multicast is enabled and VLAN 0 is the only VLAN rule. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + } + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); + + return ret; } /** @@ -3642,20 +3715,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); - status = ice_set_cpu_rx_rmap(vsi); - if (status) { - dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", - vsi->vsi_num, status); - goto unroll_napi_add; - } status = ice_init_mac_fltr(pf); if (status) - goto free_cpu_rx_map; + goto unroll_napi_add; return 0; -free_cpu_rx_map: - ice_free_cpu_rx_rmap(vsi); unroll_napi_add: ice_tc_indir_block_unregister(vsi); unroll_cfg_netdev: @@ -3720,6 +3785,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); + mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); @@ -3798,6 +3864,7 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + mutex_init(&pf->adev_mutex); INIT_HLIST_HEAD(&pf->aq_wait_list); spin_lock_init(&pf->aq_wait_lock); @@ -5117,7 +5184,6 @@ static int __maybe_unused ice_suspend(struct device *dev) continue; ice_vsi_free_q_vectors(pf->vsi[v]); } - ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); ice_clear_interrupt_scheme(pf); pci_save_state(pdev); @@ -5475,16 +5541,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* Add filter for new MAC. If filter exists, return success */ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (err == -EEXIST) + if (err == -EEXIST) { /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (err) + + return 0; + } else if (err) { /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; + } err_update_filters: if (err) { @@ -5621,11 +5690,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @extack: netlink extended ack */ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid) + __always_unused u16 vid, struct netlink_ext_ack *extack) { int err; @@ -6119,9 +6189,10 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_ptp_link_change(pf, pf->hw.pf_id, true); } - /* clear this now, and the first stats read will be used as baseline */ - vsi->stat_offsets_loaded = false; - + /* Perform an initial read of the statistics registers now to + * set the baseline so counters are ready when interface is up + */ + ice_update_eth_stats(vsi); ice_service_task_schedule(pf); return 0; @@ -6878,12 +6949,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); +#define ICE_EMP_RESET_SLEEP_MS 5000 if (reset_type == ICE_RESET_EMPR) { /* If an EMP reset has occurred, any previously pending flash * update will have completed. We no longer know whether or * not the NVM update EMP reset is restricted. */ pf->fw_emp_reset_disabled = false; + + msleep(ICE_EMP_RESET_SLEEP_MS); } err = ice_init_all_ctrlq(hw); @@ -8870,4 +8944,5 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_get_devlink_port = ice_get_devlink_port, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 4eb0599714f4..13cdb5ea594d 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0, orom_data, hw->flash.banks.orom_size); if (status) { + vfree(orom_data); ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index a1cd33273ca4..662947c882e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) * This function must be called periodically to ensure that the cached value * is never more than 2 seconds old. It must also be called whenever the PHC * time has been changed. + * + * Return: + * * 0 - OK, successfully updated + * * -EAGAIN - PF was busy, need to reschedule the update */ -static void ice_ptp_update_cached_phctime(struct ice_pf *pf) +static int ice_ptp_update_cached_phctime(struct ice_pf *pf) { u64 systime; int i; + if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) + return -EAGAIN; + /* Read the current PHC time */ systime = ice_ptp_read_src_clk_reg(pf, NULL); @@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf) WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); } } + clear_bit(ICE_CFG_BUSY, pf->state); + + return 0; } /** @@ -2287,6 +2297,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) /** * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped + * @hw: pointer to the hw struct * @tx: PTP Tx tracker to clean up * * Loop through the Tx timestamp requests and see if any of them have been @@ -2295,7 +2306,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) * timestamp will never be captured. This might happen if the packet gets * discarded before it reaches the PHY timestamping block. */ -static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) +static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx) { u8 idx; @@ -2304,11 +2315,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) for_each_set_bit(idx, tx->in_use, tx->len) { struct sk_buff *skb; + u64 raw_tstamp; /* Check if this SKB has been waiting for too long */ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) continue; + /* Read tstamp to be able to use this register again */ + ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset, + &raw_tstamp); + spin_lock(&tx->lock); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; @@ -2324,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work) { struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); + int err; if (!test_bit(ICE_FLAG_PTP, pf->flags)) return; - ice_ptp_update_cached_phctime(pf); + err = ice_ptp_update_cached_phctime(pf); - ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx); + ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx); - /* Run twice a second */ + /* Run twice a second or reschedule if phc update failed */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, - msecs_to_jiffies(500)); + msecs_to_jiffies(err ? 10 : 500)); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 848f2adea563..0dac67cd9c77 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -293,8 +293,13 @@ static int ice_repr_add(struct ice_vf *vf) struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; + struct ice_vsi *vsi; int err; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) return -ENOMEM; @@ -313,7 +318,7 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc; } - repr->src_vsi = ice_get_vf_vsi(vf); + repr->src_vsi = vsi; repr->vf = vf; vf->repr = repr; np = netdev_priv(repr->netdev); @@ -333,6 +338,7 @@ static int ice_repr_add(struct ice_vf *vf) repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->max_mtu = ICE_MAX_MTU; + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); err = ice_repr_reg_netdev(repr->netdev); if (err) goto err_netdev; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 8915a9d39e36..bb1721f1321d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -46,7 +46,12 @@ static void ice_free_vf_entries(struct ice_pf *pf) */ static void ice_vf_vsi_release(struct ice_vf *vf) { - ice_vsi_release(ice_get_vf_vsi(vf)); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vsi_release(vsi); ice_vf_invalidate_vsi(vf); } @@ -104,6 +109,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) hw = &pf->hw; vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; dev = ice_pf_to_dev(pf); wr32(hw, VPINT_ALLOC(vf->vf_id), 0); @@ -341,6 +348,9 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) struct ice_hw *hw = &vf->pf->hw; u32 reg; + if (WARN_ON(!vsi)) + return; + /* set regardless of mapping mode */ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); @@ -386,6 +396,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_ena_vf_msix_mappings(vf); ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); } @@ -1046,8 +1059,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { if (!pci_vfs_assigned(pdev)) { - ice_mbx_deinit_snapshot(&pf->hw); ice_free_vfs(pf); + ice_mbx_deinit_snapshot(&pf->hw); if (pf->lag) ice_enable_lag(pf->lag); return 0; @@ -1128,6 +1141,8 @@ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) u16 rxq_idx; vsi = ice_get_vf_vsi(vf); + if (!vsi) + continue; ice_for_each_rxq(vsi, rxq_idx) if (vsi->rxq_map[rxq_idx] == pfq) { @@ -1521,8 +1536,15 @@ static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) static bool ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) { - int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); - int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int all_vfs_min_tx_rate; + int link_speed_mbps; + + if (WARN_ON(!vsi)) + return false; + + link_speed_mbps = ice_get_link_speed_mbps(vsi); + all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); /* this VF's previous rate is being overwritten */ all_vfs_min_tx_rate -= vf->min_tx_rate; @@ -1566,6 +1588,10 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, goto out_put_vf; vsi = ice_get_vf_vsi(vf); + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } /* when max_tx_rate is zero that means no max Tx rate limiting, so only * check if max_tx_rate is non-zero diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 25b8f6f726eb..8d8f3eec79ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -30,12 +30,46 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +enum { + ICE_PKT_VLAN = BIT(0), + ICE_PKT_OUTER_IPV6 = BIT(1), + ICE_PKT_TUN_GTPC = BIT(2), + ICE_PKT_TUN_GTPU = BIT(3), + ICE_PKT_TUN_NVGRE = BIT(4), + ICE_PKT_TUN_UDP = BIT(5), + ICE_PKT_INNER_IPV6 = BIT(6), + ICE_PKT_INNER_TCP = BIT(7), + ICE_PKT_INNER_UDP = BIT(8), + ICE_PKT_GTP_NOPAY = BIT(9), +}; + struct ice_dummy_pkt_offsets { enum ice_protocol_type type; u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ }; -static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { +struct ice_dummy_pkt_profile { + const struct ice_dummy_pkt_offsets *offsets; + const u8 *pkt; + u32 match; + u16 pkt_len; +}; + +#define ICE_DECLARE_PKT_OFFSETS(type) \ + static const struct ice_dummy_pkt_offsets \ + ice_dummy_##type##_packet_offsets[] + +#define ICE_DECLARE_PKT_TEMPLATE(type) \ + static const u8 ice_dummy_##type##_packet[] + +#define ICE_PKT_PROFILE(type, m) { \ + .match = (m), \ + .pkt = ice_dummy_##type##_packet, \ + .pkt_len = sizeof(ice_dummy_##type##_packet), \ + .offsets = ice_dummy_##type##_packet_offsets, \ +} + +ICE_DECLARE_PKT_OFFSETS(gre_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -47,7 +81,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -82,7 +116,7 @@ static const u8 dummy_gre_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -94,7 +128,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -126,7 +160,7 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -141,7 +175,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -179,7 +213,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -194,7 +228,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -229,8 +263,7 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -242,7 +275,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -282,8 +315,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -295,7 +327,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -332,8 +364,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -348,7 +379,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -391,8 +422,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -407,7 +437,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -448,7 +478,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = { }; /* offset info for MAC + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -457,7 +487,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + UDP */ -static const u8 dummy_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -477,7 +507,7 @@ static const u8 dummy_udp_packet[] = { }; /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -487,7 +517,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:UDP dummy packet */ -static const u8 dummy_vlan_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -509,7 +539,7 @@ static const u8 dummy_vlan_udp_packet[] = { }; /* offset info for MAC + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -518,7 +548,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + TCP */ -static const u8 dummy_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -541,7 +571,7 @@ static const u8 dummy_tcp_packet[] = { }; /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -551,7 +581,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:TCP dummy packet */ -static const u8 dummy_vlan_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -575,7 +605,7 @@ static const u8 dummy_vlan_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -583,7 +613,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -611,8 +641,7 @@ static const u8 dummy_tcp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + TCP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -622,7 +651,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + TCP dummy packet */ -static const u8 dummy_vlan_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -652,7 +681,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = { }; /* IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -661,7 +690,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { }; /* IPv6 + UDP dummy packet */ -static const u8 dummy_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -689,8 +718,7 @@ static const u8 dummy_udp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -700,7 +728,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + UDP dummy packet */ -static const u8 dummy_vlan_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -727,8 +755,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -738,7 +765,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -776,8 +803,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -787,7 +813,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -822,8 +848,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { }; /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -833,7 +858,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -875,8 +900,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -886,7 +910,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -925,8 +949,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -936,7 +959,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -978,8 +1001,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -989,7 +1011,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1028,8 +1050,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1039,7 +1060,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1086,8 +1107,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1097,7 +1117,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1141,7 +1161,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_GTP_NO_PAY, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1171,17 +1199,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { 0x00, 0x00, }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = { - { ICE_MAC_OFOS, 0 }, - { ICE_IPV4_OFOS, 14 }, - { ICE_UDP_OF, 34 }, - { ICE_GTP_NO_PAY, 42 }, - { ICE_PROTOCOL_LAST, 0 }, -}; - -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1189,7 +1207,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1215,18 +1233,62 @@ static const u8 dummy_ipv6_gtp_packet[] = { 0x00, 0x00, }; -#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ - (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ - (DUMMY_ETH_HDR_LEN * \ - sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) -#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ - (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) -#define ICE_SW_RULE_LG_ACT_SIZE(n) \ - (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ - ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) -#define ICE_SW_RULE_VSI_LIST_SIZE(n) \ - (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ - ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) +static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 | + ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE), + ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP), + ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP | + ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp, 0), +}; + +#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) +#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \ + ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN) +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \ + ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0) +#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n)) +#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) /* this is a recipe to profile association bitmap */ static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], @@ -2309,7 +2371,8 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) */ static void ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, - struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) + struct ice_sw_rule_lkup_rx_tx *s_rule, + enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; u16 vlan_tpid = ETH_P_8021Q; @@ -2321,15 +2384,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { - s_rule->pdata.lkup_tx_rx.act = 0; - s_rule->pdata.lkup_tx_rx.index = - cpu_to_le16(f_info->fltr_rule_id); - s_rule->pdata.lkup_tx_rx.hdr_len = 0; + s_rule->act = 0; + s_rule->index = cpu_to_le16(f_info->fltr_rule_id); + s_rule->hdr_len = 0; return; } eth_hdr_sz = sizeof(dummy_eth_header); - eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; + eth_hdr = s_rule->hdr_data; /* initialize the ether header with a dummy header */ memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); @@ -2414,14 +2476,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, break; } - s_rule->type = (f_info->flag & ICE_FLTR_RX) ? + s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ? cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); /* Recipe set depending on lookup type */ - s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); - s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); - s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + s_rule->recipe_id = cpu_to_le16(f_info->lkup_type); + s_rule->src = cpu_to_le16(f_info->src); + s_rule->act = cpu_to_le32(act); if (daddr) ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); @@ -2435,7 +2497,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, /* Create the switch rule with the final dummy Ethernet header */ if (opc != ice_aqc_opc_update_sw_rules) - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); + s_rule->hdr_len = cpu_to_le16(eth_hdr_sz); } /** @@ -2452,7 +2514,8 @@ static int ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { - struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; + struct ice_sw_rule_lkup_rx_tx *rx_tx; + struct ice_sw_rule_lg_act *lg_act; /* For software marker we need 3 large actions * 1. FWD action: FWD TO VSI or VSI LIST * 2. GENERIC VALUE action to hold the profile ID @@ -2473,18 +2536,18 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * 1. Large Action * 2. Look up Tx Rx */ - lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); - rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts); + rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx); lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) return -ENOMEM; - rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); + rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size); /* Fill in the first switch rule i.e. large action */ - lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); - lg_act->pdata.lg_act.index = cpu_to_le16(l_id); - lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); + lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); + lg_act->index = cpu_to_le16(l_id); + lg_act->size = cpu_to_le16(num_lg_acts); /* First action VSI forwarding or VSI list forwarding depending on how * many VSIs @@ -2496,13 +2559,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; - lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); + lg_act->act[0] = cpu_to_le32(act); /* Second action descriptor type */ act = ICE_LG_ACT_GENERIC; act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; - lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); + lg_act->act[1] = cpu_to_le32(act); act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; @@ -2512,24 +2575,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; - lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); + lg_act->act[2] = cpu_to_le32(act); /* call the fill switch rule to fill the lookup Tx Rx structure */ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); /* Update the action to point to the large action ID */ - rx_tx->pdata.lkup_tx_rx.act = - cpu_to_le32(ICE_SINGLE_ACT_PTR | - ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & - ICE_SINGLE_ACT_PTR_VAL_M)); + rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR | + ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & + ICE_SINGLE_ACT_PTR_VAL_M)); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large * action */ - rx_tx->pdata.lkup_tx_rx.index = - cpu_to_le16(m_ent->fltr_info.fltr_rule_id); + rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id); status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, ice_aqc_opc_update_sw_rules, NULL); @@ -2591,7 +2652,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_vsi_list *s_rule; u16 s_rule_size; u16 rule_type; int status; @@ -2614,7 +2675,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, else return -EINVAL; - s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) return -ENOMEM; @@ -2624,13 +2685,13 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, goto exit; } /* AQ call requires hw_vsi_id(s) */ - s_rule->pdata.vsi_list.vsi[i] = + s_rule->vsi[i] = cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); } - s_rule->type = cpu_to_le16(rule_type); - s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); - s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + s_rule->hdr.type = cpu_to_le16(rule_type); + s_rule->number_vsi = cpu_to_le16(num_vsi); + s_rule->index = cpu_to_le16(vsi_list_id); status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); @@ -2678,13 +2739,14 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *fm_entry; - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_lkup_rx_tx *s_rule; enum ice_sw_lkup_type l_type; struct ice_sw_recipe *recp; int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), + GFP_KERNEL); if (!s_rule) return -ENOMEM; fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), @@ -2705,17 +2767,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, ice_aqc_opc_add_sw_rules); - status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1, ice_aqc_opc_add_sw_rules, NULL); if (status) { devm_kfree(ice_hw_to_dev(hw), fm_entry); goto ice_create_pkt_fwd_rule_exit; } - f_entry->fltr_info.fltr_rule_id = - le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); - fm_entry->fltr_info.fltr_rule_id = - le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index); + fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index); /* The book keeping entries will get removed when base driver * calls remove filter AQ command @@ -2740,20 +2801,22 @@ ice_create_pkt_fwd_rule_exit: static int ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_lkup_rx_tx *s_rule; int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), + GFP_KERNEL); if (!s_rule) return -ENOMEM; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); + s_rule->index = cpu_to_le16(f_info->fltr_rule_id); /* Update switch rule with new rule set to forward VSI list */ - status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1, ice_aqc_opc_update_sw_rules, NULL); devm_kfree(ice_hw_to_dev(hw), s_rule); @@ -3037,17 +3100,17 @@ static int ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_vsi_list *s_rule; u16 s_rule_size; int status; - s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) return -ENOMEM; - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); - s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->index = cpu_to_le16(vsi_list_id); /* Free the vsi_list resource that we allocated. It is assumed that the * list is empty at this point. @@ -3207,10 +3270,10 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, if (remove_rule) { /* Remove the lookup rule */ - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_lkup_rx_tx *s_rule; s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule), GFP_KERNEL); if (!s_rule) { status = -ENOMEM; @@ -3221,8 +3284,8 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, ice_aqc_opc_remove_sw_rules); status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_remove_sw_rules, NULL); + ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule), + 1, ice_aqc_opc_remove_sw_rules, NULL); /* Remove a book keeping from the list */ devm_kfree(ice_hw_to_dev(hw), s_rule); @@ -3370,7 +3433,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) */ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { - struct ice_aqc_sw_rules_elem *s_rule, *r_iter; + struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; struct list_head *rule_head; u16 total_elem_left, s_rule_size; @@ -3434,7 +3497,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Allocate switch rule buffer for the bulk update for unicast */ - s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule); s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); if (!s_rule) { @@ -3450,8 +3513,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) if (is_unicast_ether_addr(mac_addr)) { ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, ice_aqc_opc_add_sw_rules); - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); + r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size); } } @@ -3460,7 +3522,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) /* Call AQ switch rule in AQ_MAX chunk */ for (total_elem_left = num_unicast; total_elem_left > 0; total_elem_left -= elem_sent) { - struct ice_aqc_sw_rules_elem *entry = r_iter; + struct ice_sw_rule_lkup_rx_tx *entry = r_iter; elem_sent = min_t(u8, total_elem_left, (ICE_AQ_MAX_BUF_LEN / s_rule_size)); @@ -3469,7 +3531,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) NULL); if (status) goto ice_add_mac_exit; - r_iter = (struct ice_aqc_sw_rules_elem *) + r_iter = (typeof(s_rule)) ((u8 *)r_iter + (elem_sent * s_rule_size)); } @@ -3481,8 +3543,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) struct ice_fltr_mgmt_list_entry *fm_entry; if (is_unicast_ether_addr(mac_addr)) { - f_info->fltr_rule_id = - le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); + f_info->fltr_rule_id = le16_to_cpu(r_iter->index); f_info->fltr_act = ICE_FWD_TO_VSI; /* Create an entry to track this MAC address */ fm_entry = devm_kzalloc(ice_hw_to_dev(hw), @@ -3498,8 +3559,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) */ list_add(&fm_entry->list_entry, rule_head); - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); + r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size); } } @@ -3798,7 +3858,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) */ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_lkup_rx_tx *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; u16 s_rule_size; @@ -3809,8 +3869,8 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : - ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) : + ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) @@ -3848,7 +3908,7 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) if (status || !(f_info.flag & ICE_FLTR_TX_RX)) goto out; if (set) { - u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + u16 index = le16_to_cpu(s_rule->index); if (f_info.flag & ICE_FLTR_TX) { hw->port_info->dflt_tx_vsi_num = hw_vsi_id; @@ -5501,212 +5561,66 @@ err_free_lkup_exts: * structure per protocol header * @lkups_cnt: number of protocols * @tun_type: tunnel type - * @pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: pointer to receive the pointer to the offsets for the packet + * + * Returns the &ice_dummy_pkt_profile corresponding to these lookup params. */ -static void +static const struct ice_dummy_pkt_profile * ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, - enum ice_sw_tunnel_type tun_type, - const u8 **pkt, u16 *pkt_len, - const struct ice_dummy_pkt_offsets **offsets) + enum ice_sw_tunnel_type tun_type) { - bool inner_tcp = false, inner_udp = false, outer_ipv6 = false; - bool vlan = false, inner_ipv6 = false, gtp_no_pay = false; + const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles; + u32 match = 0; u16 i; + switch (tun_type) { + case ICE_SW_TUN_GTPC: + match |= ICE_PKT_TUN_GTPC; + break; + case ICE_SW_TUN_GTPU: + match |= ICE_PKT_TUN_GTPU; + break; + case ICE_SW_TUN_NVGRE: + match |= ICE_PKT_TUN_NVGRE; + break; + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + match |= ICE_PKT_TUN_UDP; + break; + default: + break; + } + for (i = 0; i < lkups_cnt; i++) { if (lkups[i].type == ICE_UDP_ILOS) - inner_udp = true; + match |= ICE_PKT_INNER_UDP; else if (lkups[i].type == ICE_TCP_IL) - inner_tcp = true; + match |= ICE_PKT_INNER_TCP; else if (lkups[i].type == ICE_IPV6_OFOS) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_VLAN_OFOS) - vlan = true; + match |= ICE_PKT_VLAN; else if (lkups[i].type == ICE_ETYPE_OL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_ETYPE_IL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_IPV6_IL) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_GTP_NO_PAY) - gtp_no_pay = true; - } - - if (tun_type == ICE_SW_TUN_GTPU) { - if (outer_ipv6) { - if (gtp_no_pay) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; - } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets; - } - } else { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets; - } - } - } else { - if (gtp_no_pay) { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; - } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets; - } - } else { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets; - } - } - } - return; - } - - if (tun_type == ICE_SW_TUN_GTPC) { - if (outer_ipv6) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; - } - return; + match |= ICE_PKT_GTP_NOPAY; } - if (tun_type == ICE_SW_TUN_NVGRE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_gre_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); - *offsets = dummy_gre_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_gre_tcp_packet; - *pkt_len = sizeof(dummy_gre_tcp_packet); - *offsets = dummy_gre_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_gre_ipv6_udp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); - *offsets = dummy_gre_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_gre_udp_packet; - *pkt_len = sizeof(dummy_gre_udp_packet); - *offsets = dummy_gre_udp_packet_offsets; - return; - } + while (ret->match && (match & ret->match) != ret->match) + ret++; - if (tun_type == ICE_SW_TUN_VXLAN || - tun_type == ICE_SW_TUN_GENEVE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); - *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_udp_tun_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_tcp_packet); - *offsets = dummy_udp_tun_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); - *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_udp_tun_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_udp_packet); - *offsets = dummy_udp_tun_udp_packet_offsets; - return; - } - - if (inner_udp && !outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_packet; - *pkt_len = sizeof(dummy_vlan_udp_packet); - *offsets = dummy_vlan_udp_packet_offsets; - return; - } - *pkt = dummy_udp_packet; - *pkt_len = sizeof(dummy_udp_packet); - *offsets = dummy_udp_packet_offsets; - return; - } else if (inner_udp && outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); - *offsets = dummy_vlan_udp_ipv6_packet_offsets; - return; - } - *pkt = dummy_udp_ipv6_packet; - *pkt_len = sizeof(dummy_udp_ipv6_packet); - *offsets = dummy_udp_ipv6_packet_offsets; - return; - } else if ((inner_tcp && outer_ipv6) || outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); - *offsets = dummy_vlan_tcp_ipv6_packet_offsets; - return; - } - *pkt = dummy_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_tcp_ipv6_packet); - *offsets = dummy_tcp_ipv6_packet_offsets; - return; - } - - if (vlan) { - *pkt = dummy_vlan_tcp_packet; - *pkt_len = sizeof(dummy_vlan_tcp_packet); - *offsets = dummy_vlan_tcp_packet_offsets; - } else { - *pkt = dummy_tcp_packet; - *pkt_len = sizeof(dummy_tcp_packet); - *offsets = dummy_tcp_packet_offsets; - } + return ret; } /** @@ -5716,15 +5630,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * structure per protocol header * @lkups_cnt: number of protocols * @s_rule: stores rule information from the match criteria - * @dummy_pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: offset info for the dummy packet + * @profile: dummy packet profile (the template, its size and header offsets) */ static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, - struct ice_aqc_sw_rules_elem *s_rule, - const u8 *dummy_pkt, u16 pkt_len, - const struct ice_dummy_pkt_offsets *offsets) + struct ice_sw_rule_lkup_rx_tx *s_rule, + const struct ice_dummy_pkt_profile *profile) { u8 *pkt; u16 i; @@ -5732,11 +5643,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, /* Start with a packet with a pre-defined/dummy content. Then, fill * in the header values to be looked up or matched. */ - pkt = s_rule->pdata.lkup_tx_rx.hdr; + pkt = s_rule->hdr_data; - memcpy(pkt, dummy_pkt, pkt_len); + memcpy(pkt, profile->pkt, profile->pkt_len); for (i = 0; i < lkups_cnt; i++) { + const struct ice_dummy_pkt_offsets *offsets = profile->offsets; enum ice_protocol_type type; u16 offset = 0, len = 0, j; bool found = false; @@ -5810,16 +5722,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * indicated by the mask to make sure we don't improperly write * over any significant packet data. */ - for (j = 0; j < len / sizeof(u16); j++) - if (((u16 *)&lkups[i].m_u)[j]) - ((u16 *)(pkt + offset))[j] = - (((u16 *)(pkt + offset))[j] & - ~((u16 *)&lkups[i].m_u)[j]) | - (((u16 *)&lkups[i].h_u)[j] & - ((u16 *)&lkups[i].m_u)[j]); + for (j = 0; j < len / sizeof(u16); j++) { + u16 *ptr = (u16 *)(pkt + offset); + u16 mask = lkups[i].m_raw[j]; + + if (!mask) + continue; + + ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask); + } } - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); + s_rule->hdr_len = cpu_to_le16(profile->pkt_len); return 0; } @@ -6042,12 +5956,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_rule_query_data *added_entry) { struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; - u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; - const struct ice_dummy_pkt_offsets *pkt_offsets; - struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct ice_sw_rule_lkup_rx_tx *s_rule = NULL; + const struct ice_dummy_pkt_profile *profile; + u16 rid = 0, i, rule_buf_sz, vsi_handle; struct list_head *rule_head; struct ice_switch_info *sw; - const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; int status; @@ -6065,24 +5978,21 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* get # of words we need to match */ word_cnt = 0; for (i = 0; i < lkups_cnt; i++) { - u16 j, *ptr; + u16 j; - ptr = (u16 *)&lkups[i].m_u; - for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) - if (ptr[j] != 0) + for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++) + if (lkups[i].m_raw[j]) word_cnt++; } - if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) + if (!word_cnt) return -EINVAL; - /* make sure that we can locate a dummy packet */ - ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, - &pkt_offsets); - if (!pkt) { - status = -EINVAL; - goto err_ice_add_adv_rule; - } + if (word_cnt > ICE_MAX_CHAIN_WORDS) + return -ENOSPC; + + /* locate a dummy packet */ + profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type); if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || @@ -6123,7 +6033,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } return status; } - rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len); s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) return -ENOMEM; @@ -6172,27 +6082,25 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * by caller) */ if (rinfo->rx) { - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - s_rule->pdata.lkup_tx_rx.src = - cpu_to_le16(hw->port_info->lport); + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); } else { - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); - s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->src = cpu_to_le16(rinfo->sw_act.src); } - s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); - s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + s_rule->recipe_id = cpu_to_le16(rid); + s_rule->act = cpu_to_le32(act); - status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, - pkt_len, pkt_offsets); + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile); if (status) goto err_ice_add_adv_rule; if (rinfo->tun_type != ICE_NON_TUN && rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, - s_rule->pdata.lkup_tx_rx.hdr, - pkt_offsets); + s_rule->hdr_data, + profile->offsets); if (status) goto err_ice_add_adv_rule; } @@ -6219,8 +6127,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, adv_fltr->lkups_cnt = lkups_cnt; adv_fltr->rule_info = *rinfo; - adv_fltr->rule_info.fltr_rule_id = - le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index); sw = hw->switch_info; sw->recp_list[rid].adv_rule = true; rule_head = &sw->recp_list[rid].filt_rules; @@ -6468,17 +6375,16 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } mutex_unlock(rule_lock); if (remove_rule) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_sw_rule_lkup_rx_tx *s_rule; u16 rule_buf_sz; - rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule); s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) return -ENOMEM; - s_rule->pdata.lkup_tx_rx.act = 0; - s_rule->pdata.lkup_tx_rx.index = - cpu_to_le16(list_elem->rule_info.fltr_rule_id); - s_rule->pdata.lkup_tx_rx.hdr_len = 0; + s_rule->act = 0; + s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); + s_rule->hdr_len = 0; status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_remove_sw_rules, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index ed3d1d03befa..eb641e5512d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -23,9 +23,6 @@ #define ICE_PROFID_IPV6_GTPU_TEID 46 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 -#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ - (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) - /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; @@ -138,8 +135,16 @@ struct ice_update_recipe_lkup_idx_params { struct ice_adv_lkup_elem { enum ice_protocol_type type; - union ice_prot_hdr h_u; /* Header values */ - union ice_prot_hdr m_u; /* Mask of header values to match */ + union { + union ice_prot_hdr h_u; /* Header values */ + /* Used to iterate over the headers */ + u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; + union { + union ice_prot_hdr m_u; /* Mask of header values to match */ + /* Used to iterate over header mask */ + u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; }; struct ice_sw_act_ctrl { diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 3acd9f921c44..0a0c55fb8699 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -622,7 +622,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); - ret = -EIO; goto exit; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index f9bf008471c9..3f8b7274ed2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -8,6 +8,7 @@ #include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/dsfield.h> +#include <net/mpls.h> #include <net/xdp.h> #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* compute outer L2 header size */ l2_len = ip.hdr - skb->data; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; - protocol = vlan_get_protocol(skb); - - if (protocol == htons(ETH_P_IP)) + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) first->tx_flags |= ICE_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) + else if (ip.v6->version == 6) first->tx_flags |= ICE_TX_FLAGS_IPV6; if (skb->encapsulation) { @@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) unsigned char *hdr; } l4; u64 cd_mss, cd_tso_len; + __be16 protocol; u32 paylen; u8 l4_start; int err; @@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) return err; /* cppcheck-suppress unreadVariable */ - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index cead3eb149bd..ca902af54bb4 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -133,6 +133,7 @@ static inline int ice_skb_pad(void) #define ICE_XDP_CONSUMED BIT(0) #define ICE_XDP_TX BIT(1) #define ICE_XDP_REDIR BIT(2) +#define ICE_XDP_EXIT BIT(3) #define ICE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) @@ -384,9 +385,14 @@ struct ice_ring_container { /* this matches the maximum number of ITR bits, but in usec * values, so it is shifted left one bit (bit zero is ignored) */ - u16 itr_setting:13; - u16 itr_reserved:2; - u16 itr_mode:1; + union { + struct { + u16 itr_setting:13; + u16 itr_reserved:2; + u16 itr_mode:1; + }; + u16 itr_settings; + }; enum ice_container_type type; }; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 6578059d9479..cd8e6b50968c 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -220,8 +220,10 @@ static void ice_vf_clear_counters(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (vsi) + vsi->num_vlan = 0; + vf->num_mac = 0; - vsi->num_vlan = 0; memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); } @@ -251,6 +253,9 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) struct ice_vsi *vsi = ice_get_vf_vsi(vf); struct ice_pf *pf = vf->pf; + if (WARN_ON(!vsi)) + return -EINVAL; + if (ice_vsi_rebuild(vsi, true)) { dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", vf->vf_id); @@ -354,12 +359,12 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * + * Reset all VFs at once, in response to a PF or other device reset. + * * First, tell the hardware to reset each VF, then do all the waiting in one * chunk, and finally finish restoring each VF after the wait. This is useful * during PF routines which need to reset all VFs, as otherwise it must perform * these resets in a serialized fashion. - * - * Returns true if any VFs were reset, and false otherwise. */ void ice_reset_all_vfs(struct ice_pf *pf) { @@ -472,8 +477,8 @@ static void ice_notify_vf_reset(struct ice_vf *vf) * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting * - * Returns 0 if the VF is currently in reset, if the resets are disabled, or - * if the VF resets successfully. Returns an error code if the VF fails to + * Returns 0 if the VF is currently in reset, if resets are disabled, or if + * the VF resets successfully. Returns an error code if the VF fails to * rebuild. */ int ice_reset_vf(struct ice_vf *vf, u32 flags) @@ -514,6 +519,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EIO; + goto out_unlock; + } ice_dis_vf_qs(vf); @@ -572,6 +581,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) vf->vf_ops->post_vsi_rebuild(vf); vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EINVAL; + goto out_unlock; + } + ice_eswitch_update_repr(vsi); ice_eswitch_replay_vf_mac_rule(vf); @@ -610,6 +624,9 @@ void ice_dis_vf_qs(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); ice_vsi_stop_all_rx_rings(vsi); ice_set_vf_state_qs_dis(vf); @@ -640,6 +657,13 @@ struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) return vf->pf->hw.port_info; } +/** + * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior + * @vsi: the VSI to configure + * @enable: whether to enable or disable the spoof checking + * + * Configure a VSI to enable (or disable) spoof checking behavior. + */ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) { struct ice_vsi_ctx *ctx; @@ -790,6 +814,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) u8 broadcast[ETH_ALEN]; int status; + if (WARN_ON(!vsi)) + return -EINVAL; + if (ice_is_eswitch_mode_switchdev(vf->pf)) return 0; @@ -875,6 +902,9 @@ static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) struct ice_vsi *vsi = ice_get_vf_vsi(vf); int err; + if (WARN_ON(!vsi)) + return -EINVAL; + if (vf->min_tx_rate) { err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); if (err) { @@ -938,6 +968,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf) struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_vf_set_host_trust_cfg(vf); if (ice_vf_rebuild_host_mac_cfg(vf)) diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 831b667dc5b2..1b4380d6d949 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -176,7 +176,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) * ice_for_each_vf - Iterate over each VF entry * @pf: pointer to the PF private structure * @bkt: bucket index used for iteration - * @vf: pointer to the VF entry currently being processed in the loop. + * @vf: pointer to the VF entry currently being processed in the loop * * The bkt variable is an unsigned integer iterator used to traverse the VF * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. @@ -192,7 +192,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU * @pf: pointer to the PF private structure * @bkt: bucket index used for iteration - * @vf: pointer to the VF entry currently being processed in the loop. + * @vf: pointer to the VF entry currently being processed in the loop * * The bkt variable is an unsigned integer iterator used to traverse the VF * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 3f1a63815bac..1d9b84c3937a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -515,24 +515,6 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) } /** - * ice_find_vsi_from_id - * @pf: the PF structure to search for the VSI - * @id: ID of the VSI it is searching for - * - * searches for the VSI with the given ID - */ -static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) - return pf->vsi[i]; - - return NULL; -} - -/** * ice_vc_isvalid_vsi_id * @vf: pointer to the VF info * @vsi_id: VF relative VSI ID @@ -544,7 +526,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_find_vsi_from_id(pf, vsi_id); + vsi = ice_find_vsi(pf, vsi_id); return (vsi && (vsi->vf == vf)); } @@ -559,7 +541,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) */ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ return (vsi && (qid < vsi->alloc_txq)); } @@ -1308,12 +1290,51 @@ error_param: } /** + * ice_vf_vsi_dis_single_txq - disable a single Tx queue + * @vf: VF to disable queue for + * @vsi: VSI for the VF + * @q_id: VF relative (0-based) queue ID + * + * Attempt to disable the Tx queue passed in. If the Tx queue was successfully + * disabled then clear q_id bit in the enabled queues bitmap and return + * success. Otherwise return error. + */ +static int +ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id) +{ + struct ice_txq_meta txq_meta = { 0 }; + struct ice_tx_ring *ring; + int err; + + if (!test_bit(q_id, vf->txq_ena)) + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + q_id, vsi->vsi_num); + + ring = vsi->tx_rings[q_id]; + if (!ring) + return -EINVAL; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", + q_id, vsi->vsi_num); + return err; + } + + /* Clear enabled queues flag */ + clear_bit(q_id, vf->txq_ena); + + return 0; +} + +/** * ice_vc_dis_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * called from the VF to disable all or specific - * queue(s) + * called from the VF to disable all or specific queue(s) */ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) { @@ -1350,30 +1371,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id]; - struct ice_txq_meta txq_meta = { 0 }; - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - /* Skip queue if not enabled */ - if (!test_bit(vf_q_id, vf->txq_ena)) - continue; - - ice_fill_txq_meta(vsi, ring, &txq_meta); - - if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, - ring, &txq_meta)) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", - vf_q_id, vsi->vsi_num); + if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - - /* Clear enabled queues flag */ - clear_bit(vf_q_id, vf->txq_ena); } } @@ -1622,6 +1628,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->txq.ring_len > 0) { vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len; + + /* Disable any existing queue first */ + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2360,6 +2374,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) } vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -3625,6 +3644,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) return; } + mutex_lock(&vf->cfg_lock); + /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { err = -EPERM; @@ -3642,32 +3663,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EINVAL; } - if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { - ice_vc_send_msg_to_vf(vf, v_opcode, - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, - 0); - ice_put_vf(vf); - return; - } - error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); - ice_put_vf(vf); - return; + goto finish; } - /* VF is being configured in another context that triggers a VFR, so no - * need to process this message - */ - if (!mutex_trylock(&vf->cfg_lock)) { - dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", - vf->vf_id); - ice_put_vf(vf); - return; + if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { + ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, + 0); + goto finish; } switch (v_opcode) { @@ -3780,6 +3789,7 @@ error_handler: vf_id, v_opcode, err); } +finish: mutex_unlock(&vf->cfg_lock); ice_put_vf(vf); } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 8e38ee2faf58..c6a58343d81d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1344,12 +1344,17 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) pf = vf->pf; hw = &pf->hw; dev = ice_pf_to_dev(pf); - vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); + return; + } + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); - dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", + dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", vf->vf_id, (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index dfbcaf08520e..49ba8bfdbf04 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -413,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, */ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) { + u32 nb_buffs_extra = 0, nb_buffs = 0; union ice_32b_rx_flex_desc *rx_desc; - u32 nb_buffs_extra = 0, nb_buffs; u16 ntu = rx_ring->next_to_use; u16 total_count = count; struct xdp_buff **xdp; @@ -426,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, rx_ring->count - ntu); + if (nb_buffs_extra != rx_ring->count - ntu) { + ntu += nb_buffs_extra; + goto exit; + } rx_desc = ICE_RX_DESC(rx_ring, 0); xdp = ice_xdp_buf(rx_ring, 0); ntu = 0; @@ -439,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) if (ntu == rx_ring->count) ntu = 0; +exit: if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); @@ -538,9 +545,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return ICE_XDP_REDIR; + if (!err) + return ICE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = ICE_XDP_EXIT; + else + result = ICE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -551,15 +562,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (result == ICE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = ICE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = ICE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; - case XDP_DROP: - result = ICE_XDP_CONSUMED; break; } @@ -580,6 +592,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; bool failure = false; + int entries_to_alloc; /* ZC patch is enabled only when XDP program is set, * so here it can not be NULL @@ -627,18 +640,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); - if (xdp_res) { - if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(xdp); + if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == ICE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == ICE_XDP_CONSUMED) { + xsk_buff_free(xdp); + } else if (xdp_res == ICE_XDP_PASS) { + goto construct_skb; + } - total_rx_bytes += size; - total_rx_packets++; + total_rx_bytes += size; + total_rx_packets++; + + ice_bump_ntc(rx_ring); + continue; - ice_bump_ntc(rx_ring); - continue; - } construct_skb: /* XDP_PASS path */ skb = ice_construct_skb_zc(rx_ring, xdp); @@ -666,7 +684,9 @@ construct_skb: ice_receive_skb(rx_ring, skb, vlan_tag); } - failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); + if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) + failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); @@ -918,17 +938,17 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *ring; - if (test_bit(ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_txq) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2a5782063f4c..c14fc871dd41 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1798,14 +1798,14 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, frame_size >>= 1; - data = kmap(rx_buffer->page); + data = kmap_local_page(rx_buffer->page); if (data[3] != 0xFF || data[frame_size + 10] != 0xBE || data[frame_size + 12] != 0xAF) match = false; - kunmap(rx_buffer->page); + kunmap_local(data); return match; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 34b33b21e0dc..68be2976f539 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work) break; } - if (adapter->link_speed != SPEED_1000) + if (adapter->link_speed != SPEED_1000 || + !hw->phy.ops.read_reg) goto no_wait; /* wait for Remote receiver status OK */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 3e386c38d016..1e7e7071f64d 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -264,7 +264,6 @@ int igc_reinit_queues(struct igc_adapter *adapter); void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); -int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); void igc_update_stats(struct igc_adapter *adapter); void igc_disable_rx_ring(struct igc_ring *ring); void igc_enable_rx_ring(struct igc_ring *ring); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index f068b66b8025..a15927e77272 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -182,8 +182,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) igc_check_for_copper_link(hw); - phy->type = igc_phy_i225; - out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index b1e72ec5f131..360644f33d5f 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -53,11 +53,6 @@ enum igc_mac_type { igc_num_macs /* List is 1-based, so subtract 1 for true count. */ }; -enum igc_phy_type { - igc_phy_unknown = 0, - igc_phy_i225, -}; - enum igc_media_type { igc_media_type_unknown = 0, igc_media_type_copper = 1, @@ -138,8 +133,6 @@ struct igc_nvm_info { struct igc_phy_info { struct igc_phy_operations ops; - enum igc_phy_type type; - u32 addr; u32 id; u32 reset_delay_us; /* in usec */ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 66ea566488d1..59d5c467ea6e 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) { u32 swfw_sync; - while (igc_get_hw_semaphore_i225(hw)) - ; /* Empty */ + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } swfw_sync = rd32(IGC_SW_FW_SYNC); swfw_sync &= ~mask; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 74b2c590ed5d..ae17af44fe02 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6187,56 +6187,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) return value; } -int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) -{ - struct igc_mac_info *mac = &adapter->hw.mac; - - mac->autoneg = false; - - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work - */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; - case SPEED_10 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_10_FULL; - break; - case SPEED_100 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_100_HALF; - break; - case SPEED_100 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_100_FULL; - break; - case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - goto err_inval; - case SPEED_2500 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; - break; - case SPEED_2500 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; - } - - /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ - adapter->hw.phy.mdix = AUTO_ALL_MODES; - - return 0; - -err_inval: - netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; -} - /** * igc_probe - Device Initialization Routine * @pdev: PCI device information struct diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 40dbf4b43234..53b77c969c85 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -141,24 +141,14 @@ void igc_power_down_phy_copper(struct igc_hw *hw) * igc_check_downshift - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * - * Success returns 0, Failure returns 1 - * * A downshift is detected by querying the PHY link health. */ -s32 igc_check_downshift(struct igc_hw *hw) +void igc_check_downshift(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; - s32 ret_val; - switch (phy->type) { - case igc_phy_i225: - default: - /* speed downshift not supported */ - phy->speed_downgraded = false; - ret_val = 0; - } - - return ret_val; + /* speed downshift not supported */ + phy->speed_downgraded = false; } /** @@ -581,7 +571,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; @@ -638,7 +628,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h index 1b031372d206..832a7e359f18 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.h +++ b/drivers/net/ethernet/intel/igc/igc_phy.h @@ -11,7 +11,7 @@ s32 igc_phy_hw_reset(struct igc_hw *hw); s32 igc_get_phy_id(struct igc_hw *hw); s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, u32 usec_interval, bool *success); -s32 igc_check_downshift(struct igc_hw *hw); +void igc_check_downshift(struct igc_hw *hw); s32 igc_setup_copper_link(struct igc_hw *hw); void igc_power_up_phy_copper(struct igc_hw *hw); void igc_power_down_phy_copper(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 0d6e3215e98f..653e9f1e35b5 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter) igc_ptp_write_i225(adapter, &ts); } +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + /** * igc_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure @@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - if (pci_device_is_present(adapter->pdev)) + if (pci_device_is_present(adapter->pdev)) { igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index e596e1a9fc75..774de63dd93a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -585,7 +585,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { @@ -757,7 +757,7 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa *rsa; u8 ipi; @@ -903,7 +903,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* Tx IPsec offload doesn't seem to work on this * device, so block these requests for now. */ - if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + if (sam->dir != XFRM_DEV_OFFLOAD_IN) { err = -EOPNOTSUPP; goto err_out; } @@ -914,7 +914,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) goto err_out; } - xs->xso.flags = sam->flags; + xs->xso.dir = sam->dir; xs->id.spi = sam->spi; xs->id.proto = sam->proto; xs->props.family = sam->family; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index d2b64ff8eb4e..809ab51a7842 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -74,7 +74,7 @@ struct ixgbe_ipsec { struct sa_mbx_msg { __be32 spi; - u8 flags; + u8 dir; u8 proto; u16 family; __be32 addr[4]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4a4954aa317..77c2e70b0860 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -151,8 +151,8 @@ MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); #endif /* CONFIG_PCI_IOV */ -static unsigned int allow_unsupported_sfp; -module_param(allow_unsupported_sfp, uint, 0); +static bool allow_unsupported_sfp; +module_param(allow_unsupported_sfp, bool, 0); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); @@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - offset; xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); @@ -5051,12 +5052,12 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); + netif_set_tso_max_size(adapter->netdev, 65536); return; } if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); + netif_set_tso_max_size(adapter->netdev, 32768); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) @@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - u32 len, cmd_type; - dma_addr_t dma; - u16 i; - - len = xdpf->len; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 i = 0, index = ring->next_to_use; + struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; + struct ixgbe_tx_buffer *tx_buff = tx_head; + union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index); + u32 cmd_type, len = xdpf->len; + void *data = xdpf->data; - if (unlikely(!ixgbe_desc_unused(ring))) + if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags)) return IXGBE_XDP_CONSUMED; - dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(ring->dev, dma)) - return IXGBE_XDP_CONSUMED; + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; - /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; + tx_desc->read.olinfo_status = + cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); - i = ring->next_to_use; - tx_desc = IXGBE_TX_DESC(ring, i); + for (;;) { + dma_addr_t dma; - dma_unmap_len_set(tx_buffer, len, len); - dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->xdpf = xdpf; + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + goto unmap; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + dma_unmap_len_set(tx_buff, len, len); + dma_unmap_addr_set(tx_buff, dma, dma); + + cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS | len; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buff->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + tx_buff = &ring->tx_buffer_info[index]; + tx_desc = IXGBE_TX_DESC(ring, index); + tx_desc->read.olinfo_status = 0; + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } /* put descriptor type bits */ - cmd_type = IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT | - IXGBE_ADVTXD_DCMD_IFCS; - cmd_type |= len | IXGBE_TXD_CMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - tx_desc->read.olinfo_status = - cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); /* Avoid any potential race with xdp_xmit and cleanup */ smp_wmb(); - /* set next_to_watch value indicating a packet is present */ - i++; - if (i == ring->count) - i = 0; - - tx_buffer->next_to_watch = tx_desc; - ring->next_to_use = i; + tx_head->next_to_watch = tx_desc; + ring->next_to_use = index; return IXGBE_XDP_TX; + +unmap: + for (;;) { + tx_buff = &ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buff, len)) + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), + dma_unmap_len(tx_buff, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buff, len, 0); + if (tx_buff == tx_head) + break; + + if (!index) + index += ring->count; + index--; + } + + return IXGBE_XDP_CONSUMED; } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index bba3feaf3318..f1f69ce67420 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -8,6 +8,7 @@ #define IXGBE_XDP_CONSUMED BIT(0) #define IXGBE_XDP_TX BIT(1) #define IXGBE_XDP_REDIR BIT(2) +#define IXGBE_XDP_EXIT BIT(3) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index dd7ff66d422f..1703c640a434 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return IXGBE_XDP_REDIR; + if (!err) + return IXGBE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = IXGBE_XDP_EXIT; + else + result = IXGBE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = IXGBE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = IXGBE_XDP_CONSUMED; - break; } return result; } @@ -303,21 +307,26 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); - if (xdp_res) { - if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(bi->xdp); + if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == IXGBE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IXGBE_XDP_CONSUMED) { + xsk_buff_free(bi->xdp); + } else if (xdp_res == IXGBE_XDP_PASS) { + goto construct_skb; + } - bi->xdp = NULL; - total_rx_packets++; - total_rx_bytes += size; + bi->xdp = NULL; + total_rx_packets++; + total_rx_bytes += size; - cleaned_count++; - ixgbe_inc_ntc(rx_ring); - continue; - } + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; +construct_skb: /* XDP_PASS path */ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { @@ -516,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!READ_ONCE(adapter->xdp_prog)) - return -ENXIO; + return -EINVAL; if (qid >= adapter->num_xdp_queues) - return -ENXIO; + return -EINVAL; ring = adapter->xdp_ring[qid]; @@ -527,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!ring->xsk_pool) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index e763cee0695e..9984ebc62d78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -25,7 +25,7 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, /* send the important bits to the PF */ sam = (struct sa_mbx_msg *)(&msgbuf[1]); - sam->flags = xs->xso.flags; + sam->dir = xs->xso.dir; sam->spi = xs->id.spi; sam->proto = xs->id.proto; sam->family = xs->props.family; @@ -280,7 +280,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { @@ -394,7 +394,7 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) adapter = netdev_priv(dev); ipsec = adapter->ipsec; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; if (!ipsec->rx_tbl[sa_idx].used) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h index 3740725041c3..d22990165353 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h @@ -57,7 +57,7 @@ struct ixgbevf_ipsec { struct sa_mbx_msg { __be32 spi; - u8 flags; + u8 dir; u8 proto; u16 family; __be32 addr[4]; |