diff options
-rw-r--r-- | drivers/net/Space.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c | 492 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 363 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h | 64 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpsw.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpsw.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/hwif.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 39 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 1 | ||||
-rw-r--r-- | drivers/net/phy/marvell-88x2222.c | 314 | ||||
-rw-r--r-- | include/linux/stmmac.h | 2 |
17 files changed, 1203 insertions, 176 deletions
diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 890c86e11bcc..df79e7370bcc 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -59,9 +59,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { -#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ - {hp100_probe, 0}, -#endif #ifdef CONFIG_3C515 {tc515_probe, 0}, #endif diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 644ef9ae02a3..c2ef74052ef8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o -fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c new file mode 100644 index 000000000000..f9451ec5f2cb --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DPAA2 Ethernet Switch flower support + * + * Copyright 2021 NXP + * + */ + +#include "dpaa2-switch.h" + +static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, + struct dpsw_acl_key *acl_key) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + struct dpsw_acl_fields *acl_h, *acl_m; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + acl_h = &acl_key->match; + acl_m = &acl_key->mask; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + acl_h->l3_protocol = match.key->ip_proto; + acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto); + acl_m->l3_protocol = match.mask->ip_proto; + acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]); + ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]); + ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]); + ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + acl_h->l2_vlan_id = match.key->vlan_id; + acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid); + acl_h->l2_pcp_dei = match.key->vlan_priority << 1 | + match.key->vlan_dei; + + acl_m->l2_vlan_id = match.mask->vlan_id; + acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid); + acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 | + match.mask->vlan_dei; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + acl_h->l3_source_ip = be32_to_cpu(match.key->src); + acl_h->l3_dest_ip = be32_to_cpu(match.key->dst); + acl_m->l3_source_ip = be32_to_cpu(match.mask->src); + acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + acl_h->l4_source_port = be16_to_cpu(match.key->src); + acl_h->l4_dest_port = be16_to_cpu(match.key->dst); + acl_m->l4_source_port = be16_to_cpu(match.mask->src); + acl_m->l4_dest_port = be16_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if (match.mask->ttl != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL not supported"); + return -EOPNOTSUPP; + } + + if ((match.mask->tos & 0x3) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on ECN not supported, only DSCP"); + return -EOPNOTSUPP; + } + + acl_h->l3_dscp = match.key->tos >> 2; + acl_m->l3_dscp = match.mask->tos >> 2; + } + + return 0; +} + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpsw_acl_key *acl_key = &entry->key; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct dpsw_acl_key *acl_key = &entry->key; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + struct list_head *pos, *n; + int index = 0; + + if (list_empty(&acl_tbl->entries)) { + list_add(&entry->list, &acl_tbl->entries); + return index; + } + + list_for_each_safe(pos, n, &acl_tbl->entries) { + tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); + if (entry->prio < tmp->prio) + break; + index++; + } + list_add(&entry->list, pos->prev); + return index; +} + +static struct dpaa2_switch_acl_entry* +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, + int index) +{ + struct dpaa2_switch_acl_entry *tmp; + int i = 0; + + list_for_each_entry(tmp, &acl_tbl->entries, list) { + if (i == index) + return tmp; + ++i; + } + + return NULL; +} + +static int +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry, + int precedence) +{ + int err; + + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + entry->cfg.precedence = precedence; + return dpaa2_switch_acl_entry_add(acl_tbl, entry); +} + +static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + /* Add the new ACL entry to the linked list and get its index */ + index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + + /* Move up in priority the ACL entries to make space + * for the new filter. + */ + precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1; + for (i = 0; i < index; i++) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence++; + } + + /* Add the new entry to hardware */ + entry->cfg.precedence = precedence; + err = dpaa2_switch_acl_entry_add(acl_tbl, entry); + acl_tbl->num_rules++; + + return err; +} + +static struct dpaa2_switch_acl_entry * +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, + unsigned long cookie) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return NULL; +} + +static int +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + int index = 0; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == entry->cookie) + return index; + index++; + } + return -ENOENT; +} + +static int +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + + /* Remove from hardware the ACL entry */ + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + acl_tbl->num_rules--; + + /* Remove it from the list also */ + list_del(&entry->list); + + /* Move down in priority the entries over the deleted one */ + precedence = entry->cfg.precedence; + for (i = index - 1; i >= 0; i--) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence--; + } + + kfree(entry); + + return 0; +} + +static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (cls_act->id) { + case FLOW_ACTION_TRAP: + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; + break; + case FLOW_ACTION_REDIRECT: + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + + dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT; + break; + case FLOW_ACTION_DROP: + dpsw_act->action = DPSW_ACL_ACTION_DROP; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Action not supported"); + err = -EOPNOTSUPP; + goto out; + } + +out: + return err; +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key); + if (err) + goto free_acl_entry; + + act = &rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + act = &cls->rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 80efc8116963..5250d51d783c 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -14,6 +14,7 @@ #include <linux/kthread.h> #include <linux/workqueue.h> #include <linux/iommu.h> +#include <net/pkt_cls.h> #include <linux/fsl/mc.h> @@ -40,6 +41,17 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e return NULL; } +static struct dpaa2_switch_acl_tbl * +dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (!ethsw->acls[i].in_use) + return ðsw->acls[i]; + return NULL; +} + static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, struct net_device *bridge_dev) { @@ -1114,6 +1126,259 @@ err_exit: return NETDEV_TX_OK; } +static int +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return dpaa2_switch_cls_flower_replace(acl_tbl, f); + case FLOW_CLS_DESTROY: + return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + case TC_CLSMATCHALL_DESTROY: + return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); + case TC_SETUP_CLSMATCHALL: + return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(dpaa2_switch_block_cb_list); + +static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports |= BIT(port_priv->idx); + port_priv->acl_tbl = acl_tbl; + + return 0; +} + +static int +dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl != acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports &= ~BIT(port_priv->idx); + port_priv->acl_tbl = NULL; + return 0; +} + +static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + int err; + + /* If the port is already bound to this ACL table then do nothing. This + * can happen when this port is the first one to join a tc block + */ + if (port_priv->acl_tbl == acl_tbl) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + if (err) + return err; + + /* Mark the previous ACL table as being unused if this was the last + * port that was using it. + */ + if (old_acl_tbl->ports == 0) + old_acl_tbl->in_use = false; + + return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); +} + +static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *new_acl_tbl; + int err; + + /* We are the last port that leaves a block (an ACL table). + * We'll continue to use this table. + */ + if (acl_tbl->ports == BIT(port_priv->idx)) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + if (err) + return err; + + if (acl_tbl->ports == 0) + acl_tbl->in_use = false; + + new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + new_acl_tbl->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); +} + +static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + bool register_block = false; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + + if (!block_cb) { + /* If the ACL table is not already known, then this port must + * be the first to join it. In this case, we can just continue + * to use our private table + */ + acl_tbl = port_priv->acl_tbl; + + block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw, acl_tbl, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + register_block = true; + } else { + acl_tbl = flow_block_cb_priv(block_cb); + } + + flow_block_cb_incref(block_cb); + err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, + &dpaa2_switch_block_cb_list); + } + + return 0; + +err_block_bind: + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); + return err; +} + +static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + if (!block_cb) + return; + + acl_tbl = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + if (!err && !flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +} + +static int dpaa2_switch_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &dpaa2_switch_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return dpaa2_switch_setup_tc_block_bind(netdev, f); + case FLOW_BLOCK_UNBIND: + dpaa2_switch_setup_tc_block_unbind(netdev, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc(struct net_device *netdev, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: { + return dpaa2_switch_setup_tc_block(netdev, type_data); + } + default: + return -EOPNOTSUPP; + } + + return 0; +} + static const struct net_device_ops dpaa2_switch_port_ops = { .ndo_open = dpaa2_switch_port_open, .ndo_stop = dpaa2_switch_port_stop, @@ -1130,6 +1395,7 @@ static const struct net_device_ops dpaa2_switch_port_ops = { .ndo_start_xmit = dpaa2_switch_port_tx, .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, + .ndo_setup_tc = dpaa2_switch_port_setup_tc, }; bool dpaa2_switch_port_dev_check(const struct net_device *netdev) @@ -2676,61 +2942,17 @@ err_close: static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, const char *mac) { - struct net_device *netdev = port_priv->netdev; - struct dpsw_acl_entry_cfg acl_entry_cfg; - struct dpsw_acl_fields *acl_h; - struct dpsw_acl_fields *acl_m; - struct dpsw_acl_key acl_key; - struct device *dev; - u8 *cmd_buff; - int err; - - dev = port_priv->netdev->dev.parent; - acl_h = &acl_key.match; - acl_m = &acl_key.mask; - - if (port_priv->acl_num_rules >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) { - netdev_err(netdev, "ACL full\n"); - return -ENOMEM; - } - - memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); - memset(&acl_key, 0, sizeof(acl_key)); + struct dpaa2_switch_acl_entry acl_entry = {0}; /* Match on the destination MAC address */ - ether_addr_copy(acl_h->l2_dest_mac, mac); - eth_broadcast_addr(acl_m->l2_dest_mac); + ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); + eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); - cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); - if (!cmd_buff) - return -ENOMEM; - dpsw_acl_prepare_entry_cfg(&acl_key, cmd_buff); - - memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); - acl_entry_cfg.precedence = port_priv->acl_num_rules; - acl_entry_cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - acl_entry_cfg.key_iova = dma_map_single(dev, cmd_buff, - DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, acl_entry_cfg.key_iova))) { - netdev_err(netdev, "DMA mapping failed\n"); - return -EFAULT; - } + /* Trap to CPU */ + acl_entry.cfg.precedence = 0; + acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - err = dpsw_acl_add_entry(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->acl_tbl, &acl_entry_cfg); - - dma_unmap_single(dev, acl_entry_cfg.key_iova, sizeof(cmd_buff), - DMA_TO_DEVICE); - if (err) { - netdev_err(netdev, "dpsw_acl_add_entry() failed %d\n", err); - return err; - } - - port_priv->acl_num_rules++; - - return 0; + return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); } static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) @@ -2743,12 +2965,12 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) }; struct net_device *netdev = port_priv->netdev; struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; struct dpsw_fdb_cfg fdb_cfg = {0}; - struct dpsw_acl_if_cfg acl_if_cfg; struct dpsw_if_attr dpsw_if_attr; struct dpaa2_switch_fdb *fdb; struct dpsw_acl_cfg acl_cfg; - u16 fdb_id; + u16 fdb_id, acl_tbl_id; int err; /* Get the Tx queue for this specific port */ @@ -2792,21 +3014,22 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) /* Create an ACL table to be used by this switch port */ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, - &port_priv->acl_tbl, &acl_cfg); + &acl_tbl_id, &acl_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add err %d\n", err); return err; } - acl_if_cfg.if_id[0] = port_priv->idx; - acl_if_cfg.num_ifs = 1; - err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->acl_tbl, &acl_if_cfg); - if (err) { - netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); - dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->acl_tbl); - } + acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + acl_tbl->ethsw = ethsw; + acl_tbl->id = acl_tbl_id; + acl_tbl->in_use = true; + acl_tbl->num_rules = 0; + INIT_LIST_HEAD(&acl_tbl->entries); + + err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + if (err) + return err; err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); if (err) @@ -2858,6 +3081,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) } kfree(ethsw->fdbs); + kfree(ethsw->acls); kfree(ethsw->ports); dpaa2_switch_takedown(sw_dev); @@ -2915,7 +3139,9 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, /* The DPAA2 switch's ingress path depends on the VLAN table, * thus we are not able to disable VLAN filtering. */ - port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; + port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_HW_TC; err = dpaa2_switch_port_init(port_priv, port_idx); if (err) @@ -2983,6 +3209,13 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) goto err_free_ports; } + ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), + GFP_KERNEL); + if (!ethsw->acls) { + err = -ENOMEM; + goto err_free_fdbs; + } + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { err = dpaa2_switch_probe_port(ethsw, i); if (err) @@ -3031,6 +3264,8 @@ err_stop: err_free_netdev: for (i--; i >= 0; i--) free_netdev(ethsw->ports[i]->netdev); + kfree(ethsw->acls); +err_free_fdbs: kfree(ethsw->fdbs); err_free_ports: kfree(ethsw->ports); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index 0ae1d27c811e..bdef71f234cb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -18,6 +18,7 @@ #include <net/switchdev.h> #include <linux/if_bridge.h> #include <linux/fsl/mc.h> +#include <net/pkt_cls.h> #include <soc/fsl/dpaa2-io.h> #include "dpsw.h" @@ -80,6 +81,8 @@ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN) #define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16 +#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1 + #define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256 extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; @@ -101,6 +104,34 @@ struct dpaa2_switch_fdb { bool in_use; }; +struct dpaa2_switch_acl_entry { + struct list_head list; + u16 prio; + unsigned long cookie; + + struct dpsw_acl_entry_cfg cfg; + struct dpsw_acl_key key; +}; + +struct dpaa2_switch_acl_tbl { + struct list_head entries; + struct ethsw_core *ethsw; + u64 ports; + + u16 id; + u8 num_rules; + bool in_use; +}; + +static inline bool +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +{ + if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= + DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) + return true; + return false; +} + /* Per port private data */ struct ethsw_port_priv { struct net_device *netdev; @@ -118,8 +149,7 @@ struct ethsw_port_priv { bool ucast_flood; bool learn_ena; - u16 acl_tbl; - u8 acl_num_rules; + struct dpaa2_switch_acl_tbl *acl_tbl; }; /* Switch data */ @@ -145,8 +175,21 @@ struct ethsw_core { int napi_users; struct dpaa2_switch_fdb *fdbs; + struct dpaa2_switch_acl_tbl *acls; }; +static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, + struct net_device *netdev) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->netdev == netdev) + return ethsw->ports[i]->idx; + + return -EINVAL; +} + static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) { if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) { @@ -183,4 +226,21 @@ int dpaa2_switch_port_vlans_del(struct net_device *netdev, typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, struct fdb_dump_entry *fdb_entry, void *data); + +/* TC offload */ + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry); #endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index 1747cee19a72..cb13e740f72b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -77,6 +77,7 @@ #define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090) #define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091) #define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092) +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093) #define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094) #define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index 6704efe89bc1..6352d6d1ecba 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1544,3 +1544,38 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, return mc_send_command(mc_io, &cmd); } + +/** + * dpsw_acl_remove_entry() - Removes an entry from ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_set_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg) +{ + struct dpsw_cmd_acl_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + dpsw_set_field(cmd_params->result_action, + RESULT_ACTION, + cfg->result.action); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h index 08e37c475ae8..5ef221a25b02 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -749,4 +749,7 @@ void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); + +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); #endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 60566598d644..ec140fc4a0f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -296,6 +296,13 @@ static int intel_crosststamp(ktime_t *device, intel_priv = priv->plat->bsp_priv; + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); /* Enable Internal snapshot trigger */ acr_value = readl(ptpaddr + PTP_ACR); acr_value &= ~PTP_ACR_MASK; @@ -321,6 +328,8 @@ static int intel_crosststamp(ktime_t *device, acr_value = readl(ptpaddr + PTP_ACR); acr_value |= PTP_ACR_ATSFC; writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); /* Trigger Internal snapshot signal * Create a rising edge by just toggle the GPO1 to low @@ -520,6 +529,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; plat->has_crossts = true; plat->crosststamp = intel_crosststamp; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 2b5022ef1e52..2cc91759b91f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -504,6 +504,8 @@ struct stmmac_ops { #define stmmac_fpe_irq_status(__priv, __args...) \ stmmac_do_callback(__priv, mac, fpe_irq_status, __args) +struct stmmac_priv; + /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); @@ -515,6 +517,7 @@ struct stmmac_hwtimestamp { int add_sub, int gmac4); void (*get_systime) (void __iomem *ioaddr, u64 *systime); void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -531,6 +534,8 @@ struct stmmac_hwtimestamp { stmmac_do_void_callback(__priv, ptp, get_systime, __args) #define stmmac_get_ptptime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) /* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b8a42260066d..b6cd43eda7ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -250,6 +250,9 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + void __iomem *mmcaddr; void __iomem *ptpaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 113c51bcc0b5..074e2cdfb0fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,8 +12,11 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/delay.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" #include "stmmac_ptp.h" +#include "dwmac4.h" +#include "stmmac.h" static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { @@ -163,6 +166,41 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) *ptp_time = ns; } +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + const struct stmmac_hwtimestamp stmmac_ptp = { .config_hw_tstamping = config_hw_tstamping, .init_systime = init_systime, @@ -171,4 +209,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .adjust_systime = adjust_systime, .get_systime = get_systime, .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e3e22200a4fd..3a5ca5833ce1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5687,6 +5687,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) else netif_carrier_off(priv->dev); } + + stmmac_timestamp_interrupt(priv, priv); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b164ae22e35f..4e86cdf2bc9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -135,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; struct stmmac_pps_cfg *cfg; + u32 intr_value, acr_value; int ret = -EOPNOTSUPP; unsigned long flags; @@ -159,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); spin_unlock_irqrestore(&priv->ptp_lock, flags); break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Enable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value |= GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Disable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value &= ~GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + ret = 0; + break; + default: break; } @@ -202,7 +236,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .name = "stmmac ptp", .max_adj = 62500000, .n_alarm = 0, - .n_ext_ts = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, @@ -237,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv) stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; spin_lock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, @@ -264,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } + + mutex_destroy(&priv->aux_ts_lock); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index f88727ce4d30..53172a439810 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -73,6 +73,7 @@ #define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ #define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ #define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ #define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ #define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ #define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index eca8c2f20684..9b9ac3ef735d 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -32,6 +32,10 @@ #define MV_HOST_RST_SW BIT(7) #define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW) +/* PMD Receive Signal Detect */ +#define MV_RX_SIGNAL_DETECT 0x000A +#define MV_RX_SIGNAL_DETECT_GLOBAL BIT(0) + /* 1000Base-X/SGMII Control Register */ #define MV_1GBX_CTRL (0x2000 + MII_BMCR) @@ -48,9 +52,12 @@ #define MV_1GBX_PHY_STAT_SPEED100 BIT(14) #define MV_1GBX_PHY_STAT_SPEED1000 BIT(15) +#define AUTONEG_TIMEOUT 3 + struct mv2222_data { phy_interface_t line_interface; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + bool sfp_link; }; /* SFI PMA transmit enable */ @@ -81,86 +88,6 @@ static int mv2222_soft_reset(struct phy_device *phydev) 5000, 1000000, true); } -/* Returns negative on error, 0 if link is down, 1 if link is up */ -static int mv2222_read_status_10g(struct phy_device *phydev) -{ - int val, link = 0; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); - if (val < 0) - return val; - - if (val & MDIO_STAT1_LSTATUS) { - link = 1; - - /* 10GBASE-R do not support auto-negotiation */ - phydev->autoneg = AUTONEG_DISABLE; - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; - } - - return link; -} - -/* Returns negative on error, 0 if link is down, 1 if link is up */ -static int mv2222_read_status_1g(struct phy_device *phydev) -{ - int val, link = 0; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); - if (val < 0) - return val; - - if (!(val & BMSR_LSTATUS) || - (phydev->autoneg == AUTONEG_ENABLE && - !(val & BMSR_ANEGCOMPLETE))) - return 0; - - link = 1; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); - if (val < 0) - return val; - - if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { - if (val & MV_1GBX_PHY_STAT_DUPLEX) - phydev->duplex = DUPLEX_FULL; - else - phydev->duplex = DUPLEX_HALF; - - if (val & MV_1GBX_PHY_STAT_SPEED1000) - phydev->speed = SPEED_1000; - else if (val & MV_1GBX_PHY_STAT_SPEED100) - phydev->speed = SPEED_100; - else - phydev->speed = SPEED_10; - } - - return link; -} - -static int mv2222_read_status(struct phy_device *phydev) -{ - struct mv2222_data *priv = phydev->priv; - int link; - - phydev->link = 0; - phydev->speed = SPEED_UNKNOWN; - phydev->duplex = DUPLEX_UNKNOWN; - - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) - link = mv2222_read_status_10g(phydev); - else - link = mv2222_read_status_1g(phydev); - - if (link < 0) - return link; - - phydev->link = link; - - return 0; -} - static int mv2222_disable_aneg(struct phy_device *phydev) { int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, @@ -248,6 +175,24 @@ static bool mv2222_is_1gbx_capable(struct phy_device *phydev) priv->supported); } +static bool mv2222_is_sgmii_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + priv->supported)); +} + static int mv2222_config_line(struct phy_device *phydev) { struct mv2222_data *priv = phydev->priv; @@ -267,7 +212,8 @@ static int mv2222_config_line(struct phy_device *phydev) } } -static int mv2222_setup_forced(struct phy_device *phydev) +/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */ +static int mv2222_swap_line_type(struct phy_device *phydev) { struct mv2222_data *priv = phydev->priv; bool changed = false; @@ -275,25 +221,23 @@ static int mv2222_setup_forced(struct phy_device *phydev) switch (priv->line_interface) { case PHY_INTERFACE_MODE_10GBASER: - if (phydev->speed == SPEED_1000 && - mv2222_is_1gbx_capable(phydev)) { + if (mv2222_is_1gbx_capable(phydev)) { priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; changed = true; } - break; - case PHY_INTERFACE_MODE_1000BASEX: - if (phydev->speed == SPEED_10000 && - mv2222_is_10g_capable(phydev)) { - priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + if (mv2222_is_sgmii_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_SGMII; changed = true; } break; + case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: - ret = mv2222_set_sgmii_speed(phydev); - if (ret < 0) - return ret; + if (mv2222_is_10g_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + changed = true; + } break; default: @@ -306,6 +250,29 @@ static int mv2222_setup_forced(struct phy_device *phydev) return ret; } + return 0; +} + +static int mv2222_setup_forced(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int ret; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) { + if (phydev->speed < SPEED_10000 && + phydev->speed != SPEED_UNKNOWN) { + ret = mv2222_swap_line_type(phydev); + if (ret < 0) + return ret; + } + } + + if (priv->line_interface == PHY_INTERFACE_MODE_SGMII) { + ret = mv2222_set_sgmii_speed(phydev); + if (ret < 0) + return ret; + } + return mv2222_disable_aneg(phydev); } @@ -319,17 +286,9 @@ static int mv2222_config_aneg(struct phy_device *phydev) return 0; if (phydev->autoneg == AUTONEG_DISABLE || - phydev->speed == SPEED_10000) + priv->line_interface == PHY_INTERFACE_MODE_10GBASER) return mv2222_setup_forced(phydev); - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER && - mv2222_is_1gbx_capable(phydev)) { - priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; - ret = mv2222_config_line(phydev); - if (ret < 0) - return ret; - } - adv = linkmode_adv_to_mii_adv_x(priv->supported, ETHTOOL_LINK_MODE_1000baseX_Full_BIT); @@ -363,6 +322,135 @@ static int mv2222_aneg_done(struct phy_device *phydev) return (ret & BMSR_ANEGCOMPLETE); } +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_10g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (val < 0) + return val; + + if (val & MDIO_STAT1_LSTATUS) { + link = 1; + + /* 10GBASE-R do not support auto-negotiation */ + phydev->autoneg = AUTONEG_DISABLE; + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } else { + if (phydev->autoneg == AUTONEG_ENABLE) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + } + } + + return link; +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_1g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (val < 0) + return val; + + if (phydev->autoneg == AUTONEG_ENABLE && + !(val & BMSR_ANEGCOMPLETE)) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + + return 0; + } + + if (!(val & BMSR_LSTATUS)) + return 0; + + link = 1; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); + if (val < 0) + return val; + + if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { + if (val & MV_1GBX_PHY_STAT_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + if (val & MV_1GBX_PHY_STAT_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & MV_1GBX_PHY_STAT_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + } + + return link; +} + +static bool mv2222_link_is_operational(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_RX_SIGNAL_DETECT); + if (val < 0 || !(val & MV_RX_SIGNAL_DETECT_GLOBAL)) + return false; + + if (phydev->sfp_bus && !priv->sfp_link) + return false; + + return true; +} + +static int mv2222_read_status(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int link; + + phydev->link = 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + + if (!mv2222_link_is_operational(phydev)) + return 0; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + link = mv2222_read_status_10g(phydev); + else + link = mv2222_read_status_1g(phydev); + + if (link < 0) + return link; + + phydev->link = link; + + return 0; +} + static int mv2222_resume(struct phy_device *phydev) { return mv2222_tx_enable(phydev); @@ -424,11 +512,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) return ret; if (mutex_trylock(&phydev->lock)) { - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) - ret = mv2222_setup_forced(phydev); - else - ret = mv2222_config_aneg(phydev); - + ret = mv2222_config_aneg(phydev); mutex_unlock(&phydev->lock); } @@ -446,9 +530,29 @@ static void mv2222_sfp_remove(void *upstream) linkmode_zero(priv->supported); } +static void mv2222_sfp_link_up(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = true; +} + +static void mv2222_sfp_link_down(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = false; +} + static const struct sfp_upstream_ops sfp_phy_ops = { .module_insert = mv2222_sfp_insert, .module_remove = mv2222_sfp_remove, + .link_up = mv2222_sfp_link_up, + .link_down = mv2222_sfp_link_down, .attach = phy_sfp_attach, .detach = phy_sfp_detach, }; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index e338ef7abc00..97edb31d6310 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -238,6 +238,8 @@ struct plat_stmmacenet_data { struct pci_dev *pdev; bool has_crossts; int int_snapshot_num; + int ext_snapshot_num; + bool ext_snapshot_en; bool multi_msi_en; int msi_mac_vec; int msi_wol_vec; |