aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c494
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c53
18 files changed, 425 insertions, 378 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a895e3a8e988..60453b3b8d23 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -759,6 +759,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
+/**
* ice_is_switchdev_running - check if switchdev is configured
* @pf: pointer to PF structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index a230edb38466..3991d62473bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -647,6 +647,23 @@ void ice_devlink_unregister(struct ice_pf *pf)
devlink_unregister(priv_to_devlink(pf));
}
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
@@ -704,6 +721,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
@@ -753,13 +773,18 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
- vsi = ice_get_vf_vsi(vf);
devlink_port = &vf->devlink_port;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = pf->hw.bus.func;
attrs.pci_vf.vf = vf->vf_id;
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 24cda7e1f916..476bd1c83c87 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -190,19 +190,17 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
+
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
__ice_get_drvinfo(netdev, drvinfo, np->vsi);
-
- strscpy(drvinfo->bus_info, pci_name(pf->pdev),
- sizeof(drvinfo->bus_info));
-
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 35579cf4283f..57586a2e6dec 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -76,8 +76,7 @@ static void ice_gnss_read(struct kthread_work *work)
for (i = 0; i < data_len; i += bytes_read) {
u16 bytes_left = data_len - i;
- bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left :
- ICE_MAX_I2C_DATA_SIZE;
+ bytes_read = min_t(typeof(bytes_left), bytes_left, ICE_MAX_I2C_DATA_SIZE);
err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 3e3b2ed4cd5d..895c32bcc8b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -51,21 +51,6 @@ finish:
}
/**
- * ice_find_vsi - Find the VSI from VSI ID
- * @pf: The PF pointer to search in
- * @vsi_num: The VSI ID to search for
- */
-static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
- return pf->vsi[i];
- return NULL;
-}
-
-/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
* @pf: PF struct
* @qset: Resource to be allocated
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 949669fed7d6..4a5d4d971161 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -297,6 +297,20 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
}
/**
+ * ice_get_devlink_port - Get devlink port from netdev
+ * @netdev: the netdevice structure
+ */
+static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!ice_is_switchdev_running(pf))
+ return NULL;
+
+ return &pf->devlink_port;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -3336,7 +3350,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
@@ -5674,11 +5690,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+ __always_unused u16 vid, struct netlink_ext_ack *extack)
{
int err;
@@ -8926,4 +8943,5 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_get_devlink_port = ice_get_devlink_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 848f2adea563..0dac67cd9c77 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -293,8 +293,13 @@ static int ice_repr_add(struct ice_vf *vf)
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
+ struct ice_vsi *vsi;
int err;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return -ENOMEM;
@@ -313,7 +318,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
- repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->src_vsi = vsi;
repr->vf = vf;
vf->repr = repr;
np = netdev_priv(repr->netdev);
@@ -333,6 +338,7 @@ static int ice_repr_add(struct ice_vf *vf)
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
err = ice_repr_reg_netdev(repr->netdev);
if (err)
goto err_netdev;
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 0c438219f7a3..bb1721f1321d 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -46,7 +46,12 @@ static void ice_free_vf_entries(struct ice_pf *pf)
*/
static void ice_vf_vsi_release(struct ice_vf *vf)
{
- ice_vsi_release(ice_get_vf_vsi(vf));
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
ice_vf_invalidate_vsi(vf);
}
@@ -104,6 +109,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
dev = ice_pf_to_dev(pf);
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
@@ -341,6 +348,9 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
struct ice_hw *hw = &vf->pf->hw;
u32 reg;
+ if (WARN_ON(!vsi))
+ return;
+
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -386,6 +396,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
+
ice_ena_vf_msix_mappings(vf);
ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
}
@@ -1128,6 +1141,8 @@ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
u16 rxq_idx;
vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ continue;
ice_for_each_rxq(vsi, rxq_idx)
if (vsi->rxq_map[rxq_idx] == pfq) {
@@ -1521,8 +1536,15 @@ static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
static bool
ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
{
- int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
- int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int all_vfs_min_tx_rate;
+ int link_speed_mbps;
+
+ if (WARN_ON(!vsi))
+ return false;
+
+ link_speed_mbps = ice_get_link_speed_mbps(vsi);
+ all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
/* this VF's previous rate is being overwritten */
all_vfs_min_tx_rate -= vf->min_tx_rate;
@@ -1566,6 +1588,10 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
goto out_put_vf;
vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
/* when max_tx_rate is zero that means no max Tx rate limiting, so only
* check if max_tx_rate is non-zero
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 25b8f6f726eb..9f0a4dfb4818 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -30,12 +30,46 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+enum {
+ ICE_PKT_VLAN = BIT(0),
+ ICE_PKT_OUTER_IPV6 = BIT(1),
+ ICE_PKT_TUN_GTPC = BIT(2),
+ ICE_PKT_TUN_GTPU = BIT(3),
+ ICE_PKT_TUN_NVGRE = BIT(4),
+ ICE_PKT_TUN_UDP = BIT(5),
+ ICE_PKT_INNER_IPV6 = BIT(6),
+ ICE_PKT_INNER_TCP = BIT(7),
+ ICE_PKT_INNER_UDP = BIT(8),
+ ICE_PKT_GTP_NOPAY = BIT(9),
+};
+
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
-static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+struct ice_dummy_pkt_profile {
+ const struct ice_dummy_pkt_offsets *offsets;
+ const u8 *pkt;
+ u32 match;
+ u16 pkt_len;
+};
+
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
+ ice_dummy_##type##_packet_offsets[]
+
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
+ static const u8 ice_dummy_##type##_packet[]
+
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+}
+
+ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -47,7 +81,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -82,7 +116,7 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -94,7 +128,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -126,7 +160,7 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -141,7 +175,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -179,7 +213,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -194,7 +228,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -229,8 +263,7 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -242,7 +275,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -282,8 +315,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -295,7 +327,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -332,8 +364,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -348,7 +379,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -391,8 +422,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -407,7 +437,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -448,7 +478,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
};
/* offset info for MAC + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -457,7 +487,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + UDP */
-static const u8 dummy_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -477,7 +507,7 @@ static const u8 dummy_udp_packet[] = {
};
/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -487,7 +517,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:UDP dummy packet */
-static const u8 dummy_vlan_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -509,7 +539,7 @@ static const u8 dummy_vlan_udp_packet[] = {
};
/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -518,7 +548,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -541,7 +571,7 @@ static const u8 dummy_tcp_packet[] = {
};
/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -551,7 +581,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -575,7 +605,7 @@ static const u8 dummy_vlan_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -583,7 +613,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -611,8 +641,7 @@ static const u8 dummy_tcp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -622,7 +651,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -652,7 +681,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
};
/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -661,7 +690,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
};
/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -689,8 +718,7 @@ static const u8 dummy_udp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -700,7 +728,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -727,8 +755,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -738,7 +765,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -776,8 +803,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -787,7 +813,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -822,8 +848,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
};
/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -833,7 +858,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -875,8 +900,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -886,7 +910,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -925,8 +949,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -936,7 +959,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -978,8 +1001,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -989,7 +1011,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1028,8 +1050,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1039,7 +1060,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1086,8 +1107,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1097,7 +1117,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1141,7 +1161,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1171,17 +1199,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
0x00, 0x00,
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_UDP_OF, 34 },
- { ICE_GTP_NO_PAY, 42 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1189,7 +1207,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1215,6 +1233,55 @@ static const u8 dummy_ipv6_gtp_packet[] = {
0x00, 0x00,
};
+static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
+ ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
+ ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
+ ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp, 0),
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -5501,212 +5568,66 @@ err_free_lkup_exts:
* structure per protocol header
* @lkups_cnt: number of protocols
* @tun_type: tunnel type
- * @pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: pointer to receive the pointer to the offsets for the packet
+ *
+ * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
*/
-static void
+static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- enum ice_sw_tunnel_type tun_type,
- const u8 **pkt, u16 *pkt_len,
- const struct ice_dummy_pkt_offsets **offsets)
+ enum ice_sw_tunnel_type tun_type)
{
- bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
- bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
+ const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
+ u32 match = 0;
u16 i;
+ switch (tun_type) {
+ case ICE_SW_TUN_GTPC:
+ match |= ICE_PKT_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_GTPU:
+ match |= ICE_PKT_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ match |= ICE_PKT_TUN_NVGRE;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ match |= ICE_PKT_TUN_UDP;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- inner_udp = true;
+ match |= ICE_PKT_INNER_UDP;
else if (lkups[i].type == ICE_TCP_IL)
- inner_tcp = true;
+ match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_VLAN_OFOS)
- vlan = true;
+ match |= ICE_PKT_VLAN;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_ETYPE_IL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_IPV6_IL)
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_GTP_NO_PAY)
- gtp_no_pay = true;
+ match |= ICE_PKT_GTP_NOPAY;
}
- if (tun_type == ICE_SW_TUN_GTPU) {
- if (outer_ipv6) {
- if (gtp_no_pay) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- } else {
- if (gtp_no_pay) {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_GTPC) {
- if (outer_ipv6) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_NVGRE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_gre_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
- *offsets = dummy_gre_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_gre_tcp_packet;
- *pkt_len = sizeof(dummy_gre_tcp_packet);
- *offsets = dummy_gre_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_gre_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
- *offsets = dummy_gre_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_gre_udp_packet;
- *pkt_len = sizeof(dummy_gre_udp_packet);
- *offsets = dummy_gre_udp_packet_offsets;
- return;
- }
-
- if (tun_type == ICE_SW_TUN_VXLAN ||
- tun_type == ICE_SW_TUN_GENEVE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
- *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_udp_tun_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
- *offsets = dummy_udp_tun_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
- *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_tun_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_udp_packet);
- *offsets = dummy_udp_tun_udp_packet_offsets;
- return;
- }
+ while (ret->match && (match & ret->match) != ret->match)
+ ret++;
- if (inner_udp && !outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_packet;
- *pkt_len = sizeof(dummy_vlan_udp_packet);
- *offsets = dummy_vlan_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_packet;
- *pkt_len = sizeof(dummy_udp_packet);
- *offsets = dummy_udp_packet_offsets;
- return;
- } else if (inner_udp && outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
- *offsets = dummy_vlan_udp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_udp_ipv6_packet);
- *offsets = dummy_udp_ipv6_packet_offsets;
- return;
- } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
- *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_tcp_ipv6_packet);
- *offsets = dummy_tcp_ipv6_packet_offsets;
- return;
- }
-
- if (vlan) {
- *pkt = dummy_vlan_tcp_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_packet);
- *offsets = dummy_vlan_tcp_packet_offsets;
- } else {
- *pkt = dummy_tcp_packet;
- *pkt_len = sizeof(dummy_tcp_packet);
- *offsets = dummy_tcp_packet_offsets;
- }
+ return ret;
}
/**
@@ -5716,15 +5637,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* structure per protocol header
* @lkups_cnt: number of protocols
* @s_rule: stores rule information from the match criteria
- * @dummy_pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: offset info for the dummy packet
+ * @profile: dummy packet profile (the template, its size and header offsets)
*/
static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
- const u8 *dummy_pkt, u16 pkt_len,
- const struct ice_dummy_pkt_offsets *offsets)
+ const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
u16 i;
@@ -5734,9 +5652,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*/
pkt = s_rule->pdata.lkup_tx_rx.hdr;
- memcpy(pkt, dummy_pkt, pkt_len);
+ memcpy(pkt, profile->pkt, profile->pkt_len);
for (i = 0; i < lkups_cnt; i++) {
+ const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
enum ice_protocol_type type;
u16 offset = 0, len = 0, j;
bool found = false;
@@ -5810,16 +5729,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* indicated by the mask to make sure we don't improperly write
* over any significant packet data.
*/
- for (j = 0; j < len / sizeof(u16); j++)
- if (((u16 *)&lkups[i].m_u)[j])
- ((u16 *)(pkt + offset))[j] =
- (((u16 *)(pkt + offset))[j] &
- ~((u16 *)&lkups[i].m_u)[j]) |
- (((u16 *)&lkups[i].h_u)[j] &
- ((u16 *)&lkups[i].m_u)[j]);
+ for (j = 0; j < len / sizeof(u16); j++) {
+ u16 *ptr = (u16 *)(pkt + offset);
+ u16 mask = lkups[i].m_raw[j];
+
+ if (!mask)
+ continue;
+
+ ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
+ }
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -6042,12 +5963,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
- const struct ice_dummy_pkt_offsets *pkt_offsets;
struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ const struct ice_dummy_pkt_profile *profile;
+ u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
struct ice_switch_info *sw;
- const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
int status;
@@ -6065,24 +5985,21 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* get # of words we need to match */
word_cnt = 0;
for (i = 0; i < lkups_cnt; i++) {
- u16 j, *ptr;
+ u16 j;
- ptr = (u16 *)&lkups[i].m_u;
- for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
- if (ptr[j] != 0)
+ for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
+ if (lkups[i].m_raw[j])
word_cnt++;
}
- if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ if (!word_cnt)
return -EINVAL;
- /* make sure that we can locate a dummy packet */
- ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
- &pkt_offsets);
- if (!pkt) {
- status = -EINVAL;
- goto err_ice_add_adv_rule;
- }
+ if (word_cnt > ICE_MAX_CHAIN_WORDS)
+ return -ENOSPC;
+
+ /* locate a dummy packet */
+ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
@@ -6123,7 +6040,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
return status;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -6183,8 +6100,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
- status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
- pkt_len, pkt_offsets);
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
goto err_ice_add_adv_rule;
@@ -6192,7 +6108,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
- pkt_offsets);
+ profile->offsets);
if (status)
goto err_ice_add_adv_rule;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ed3d1d03befa..ecac75e71395 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -138,8 +138,16 @@ struct ice_update_recipe_lkup_idx_params {
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
- union ice_prot_hdr h_u; /* Header values */
- union ice_prot_hdr m_u; /* Mask of header values to match */
+ union {
+ union ice_prot_hdr h_u; /* Header values */
+ /* Used to iterate over the headers */
+ u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
+ union {
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+ /* Used to iterate over header mask */
+ u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
};
struct ice_sw_act_ctrl {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 3acd9f921c44..0a0c55fb8699 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -622,7 +622,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f9bf008471c9..3f8b7274ed2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -8,6 +8,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cead3eb149bd..f5a906c03669 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -133,6 +133,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED BIT(0)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
+#define ICE_XDP_EXIT BIT(3)
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 6578059d9479..cd8e6b50968c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -220,8 +220,10 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ if (vsi)
+ vsi->num_vlan = 0;
+
vf->num_mac = 0;
- vsi->num_vlan = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -251,6 +253,9 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_pf *pf = vf->pf;
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
if (ice_vsi_rebuild(vsi, true)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
vf->vf_id);
@@ -354,12 +359,12 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
*
+ * Reset all VFs at once, in response to a PF or other device reset.
+ *
* First, tell the hardware to reset each VF, then do all the waiting in one
* chunk, and finally finish restoring each VF after the wait. This is useful
* during PF routines which need to reset all VFs, as otherwise it must perform
* these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
*/
void ice_reset_all_vfs(struct ice_pf *pf)
{
@@ -472,8 +477,8 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
* ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
* ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
*
- * Returns 0 if the VF is currently in reset, if the resets are disabled, or
- * if the VF resets successfully. Returns an error code if the VF fails to
+ * Returns 0 if the VF is currently in reset, if resets are disabled, or if
+ * the VF resets successfully. Returns an error code if the VF fails to
* rebuild.
*/
int ice_reset_vf(struct ice_vf *vf, u32 flags)
@@ -514,6 +519,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EIO;
+ goto out_unlock;
+ }
ice_dis_vf_qs(vf);
@@ -572,6 +581,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
vf->vf_ops->post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
ice_eswitch_update_repr(vsi);
ice_eswitch_replay_vf_mac_rule(vf);
@@ -610,6 +624,9 @@ void ice_dis_vf_qs(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
+
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
@@ -640,6 +657,13 @@ struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
return vf->pf->hw.port_info;
}
+/**
+ * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
+ * @vsi: the VSI to configure
+ * @enable: whether to enable or disable the spoof checking
+ *
+ * Configure a VSI to enable (or disable) spoof checking behavior.
+ */
static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
{
struct ice_vsi_ctx *ctx;
@@ -790,6 +814,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
u8 broadcast[ETH_ALEN];
int status;
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
if (ice_is_eswitch_mode_switchdev(vf->pf))
return 0;
@@ -875,6 +902,9 @@ static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
int err;
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
if (vf->min_tx_rate) {
err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
if (err) {
@@ -938,6 +968,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
+
ice_vf_set_host_trust_cfg(vf);
if (ice_vf_rebuild_host_mac_cfg(vf))
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 831b667dc5b2..1b4380d6d949 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -176,7 +176,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
* ice_for_each_vf - Iterate over each VF entry
* @pf: pointer to the PF private structure
* @bkt: bucket index used for iteration
- * @vf: pointer to the VF entry currently being processed in the loop.
+ * @vf: pointer to the VF entry currently being processed in the loop
*
* The bkt variable is an unsigned integer iterator used to traverse the VF
* entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
@@ -192,7 +192,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
* ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
* @pf: pointer to the PF private structure
* @bkt: bucket index used for iteration
- * @vf: pointer to the VF entry currently being processed in the loop.
+ * @vf: pointer to the VF entry currently being processed in the loop
*
* The bkt variable is an unsigned integer iterator used to traverse the VF
* entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 2889e050a4c9..1d9b84c3937a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -515,24 +515,6 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
}
/**
- * ice_find_vsi_from_id
- * @pf: the PF structure to search for the VSI
- * @id: ID of the VSI it is searching for
- *
- * searches for the VSI with the given ID
- */
-static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
- return pf->vsi[i];
-
- return NULL;
-}
-
-/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
* @vsi_id: VF relative VSI ID
@@ -544,7 +526,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_find_vsi_from_id(pf, vsi_id);
+ vsi = ice_find_vsi(pf, vsi_id);
return (vsi && (vsi->vf == vf));
}
@@ -559,7 +541,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
return (vsi && (qid < vsi->alloc_txq));
}
@@ -2392,6 +2374,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
}
vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 8e38ee2faf58..c6a58343d81d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -1344,12 +1344,17 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
pf = vf->pf;
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
+ return;
+ }
+
vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
+ dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
vf->vf_id,
(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 9dd38f667059..49ba8bfdbf04 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -545,9 +545,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -558,15 +562,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -587,6 +592,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
+ int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
@@ -634,18 +640,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
- if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(xdp);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ } else if (xdp_res == ICE_XDP_PASS) {
+ goto construct_skb;
+ }
- total_rx_bytes += size;
- total_rx_packets++;
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
- ice_bump_ntc(rx_ring);
- continue;
- }
construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
@@ -673,7 +684,9 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -929,13 +942,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_txq)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];