aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c52
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c80
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/xen-netfront.c2
10 files changed, 279 insertions, 57 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 626b491f7674..0d6c98a9e07b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15376,27 +15376,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
return 0;
}
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
int bnx2x_configure_ptp_filters(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ u32 param, rule;
int rc;
if (!bp->hwtstamp_ioctl_called)
return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
switch (bp->tx_type) {
case HWTSTAMP_TX_ON:
bp->flags |= TX_TIMESTAMPING_EN;
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
switch (bp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -15410,30 +15430,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15441,10 +15455,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
break;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2055c97dc22b..63b1ecc18c26 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2571,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
/* This is the last ethertype field parsed:
@@ -2583,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
@@ -2613,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
@@ -2683,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
{
int i, size = 0;
- for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
size += dist_fields[i].size;
+ }
return size;
}
@@ -2709,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
return 0;
}
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
@@ -2730,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- /* For Rx hashing key we set only the selected fields.
- * For Rx flow classification key we set all supported fields
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
*/
- if (type == DPAA2_ETH_RX_DIST_HASH) {
- if (!(flags & dist_fields[i].rxnfc_field))
- continue;
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
rx_hash_fields |= dist_fields[i].rxnfc_field;
- }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2792,16 +2821,28 @@ free_key:
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
- return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
}
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
+ int err;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
@@ -2809,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
- if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
- !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ if (!dpaa2_eth_fs_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
@@ -2820,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
priv->rx_cls_enabled = 1;
- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+ return 0;
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
@@ -2857,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
- err = dpaa2_eth_set_cls(priv);
+ err = dpaa2_eth_set_default_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index a11ebfdc4a23..5fb8f5c0dc9f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -342,6 +342,7 @@ struct dpaa2_eth_dist_fields {
enum net_prot cls_prot;
int cls_field;
int size;
+ u64 id;
};
struct dpaa2_eth_cls_rule {
@@ -394,6 +395,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
+ u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
struct bpf_prog *xdp_prog;
@@ -437,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+#define dpaa2_eth_fs_enabled(priv) \
+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv) \
+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
@@ -449,6 +457,18 @@ enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_CLS
};
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0U)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -483,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 591dfcf76adb..76bd8d2872cc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto;
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
}
if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source);
+ *fields |= DPAA2_ETH_DIST_ETHSRC;
}
if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
}
if (uip_mask->l4_4_bytes) {
@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+ *fields |= DPAA2_ETH_DIST_L4SRC;
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
return 0;
}
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto)
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc;
+ *fields |= DPAA2_ETH_DIST_L4SRC;
}
if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst;
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
return 0;
}
static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ *fields |= DPAA2_ETH_DIST_VLAN;
}
return 0;
@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+ u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask);
+ key, mask, fields);
break;
case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask);
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP);
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP);
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP);
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+ fields);
if (err)
return err;
}
@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova;
+ u64 fields = 0;
void *key_buf;
int err;
@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL;
- rule_cfg.key_size = dpaa2_eth_cls_key_size();
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
/* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
+ /* Masking allows us to configure a maximal key during init and
+ * use it for all flow steering rules. Without it, we include
+ * in the key only the fields actually used, so we need to
+ * extract the others from the final key buffer.
+ *
+ * Program the FS key if needed, or return error if previously
+ * set key can't be used for the current rule. User needs to
+ * delete existing rules in this case to allow for the new one.
+ */
+ if (!priv->rx_cls_fields) {
+ err = dpaa2_eth_set_cls(net_dev, fields);
+ if (err)
+ goto free_mem;
+
+ priv->rx_cls_fields = fields;
+ } else if (priv->rx_cls_fields != fields) {
+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+ err = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ dpaa2_eth_cls_trim_rule(key_buf, fields);
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+ }
+
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
@@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev,
}
rule_cfg.key_iova = key_iova;
- rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+ if (dpaa2_eth_fs_mask_enabled(priv))
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) {
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
@@ -522,6 +569,17 @@ free_mem:
return err;
}
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+ int i, rules = 0;
+
+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+ if (priv->cls_rules[i].in_use)
+ rules++;
+
+ return rules;
+}
+
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
return err;
rule->in_use = 0;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ priv->rx_cls_fields = 0;
}
/* If no new entry to add, return here */
@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- for (i = 0; i < max_rules; i++)
- if (priv->cls_rules[i].in_use)
- rxnfc->rule_cnt++;
+ rxnfc->rule_cnt = num_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index df81d86ee16b..aefe211da82c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -629,6 +629,7 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
if (tunnel_act)
*tunnel_act = true;
+ /* fall through */
case NFP_FL_ACTION_OPCODE_PRE_LAG:
memcpy(act_dst + act_off, act_src + act_off, act_len);
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 37d5e6fe7473..085b700a4994 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -143,6 +143,11 @@
#define XGMAC_RSF BIT(5)
#define XGMAC_RTC GENMASK(1, 0)
#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
+#define XGMAC_RFD GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT 17
+#define XGMAC_RFA GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT 1
#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
#define XGMAC_RXOIE BIT(16)
#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 2ba712b48a89..e79037f511e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -147,6 +147,52 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
value &= ~XGMAC_RQS;
value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+ u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ unsigned int rfd, rfa;
+
+ value |= XGMAC_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ flow &= ~XGMAC_RFD;
+ flow |= rfd << XGMAC_RFD_SHIFT;
+
+ flow &= ~XGMAC_RFA;
+ flow |= rfa << XGMAC_RFA_SHIFT;
+
+ writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ }
+
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
/* Enable MTL RX overflow */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a26e36dbb5df..7a895a2889e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
-static int flow_ctrl = FLOW_OFF;
+static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d6807b5bcd97..ddd6b6374d8c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -738,6 +738,31 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9031_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+ * Whenever the device's Asymmetric Pause capability is set to 1,
+ * link-up may fail after a link-up to link-down transition.
+ *
+ * Workaround:
+ * Do not enable the Asymmetric Pause capability bit.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+
+ /* We force setting the Pause capability as the core will force the
+ * Asymmetric Pause capability to 1 otherwise.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+
+ return 0;
+}
+
static int ksz9031_read_status(struct phy_device *phydev)
{
int err;
@@ -1052,9 +1077,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
- /* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
+ .get_features = ksz9031_get_features,
.config_init = ksz9031_config_init,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 80c30321de41..8d33970a2950 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2037,7 +2037,7 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* Fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;