diff options
author | David S. Miller <davem@davemloft.net> | 2022-03-03 14:15:31 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-03-03 14:15:31 +0000 |
commit | 6fb8661c8f97bb062d2ecc7a57591d38e6c4f8c8 (patch) | |
tree | 30247092495e3758a688f96dc30c4bf53eeaa0d0 /net/dsa/slave.c | |
parent | 43113ff73453c231af2c8e700cb4ff8bca97cff6 (diff) | |
parent | ac45520960234cfa4be89b5aa072ac0fc7e57c07 (diff) |
Merge branch 'dsa-unicast-filtering'
Vladimir Oltean says:
====================
DSA unicast filtering
This series doesn't attempt anything extremely brave, it just changes
the way in which standalone ports which support FDB isolation work.
Up until now, DSA has recommended that switch drivers configure
standalone ports in a separate VID/FID with learning disabled, and with
the CPU port as the only destination, reached trivially via flooding.
That works, except that standalone ports will deliver all packets to the
CPU. We can leverage the hardware FDB as a MAC DA filter, and disable
flooding towards the CPU port, to force the dropping of packets with
unknown MAC DA.
We handle port promiscuity by re-enabling flooding towards the CPU port.
This is relevant because the bridge puts its automatic (learning +
flooding) ports in promiscuous mode, and this makes some things work
automagically, like for example bridging with a foreign interface.
We don't delve yet into the territory of managing CPU flooding more
aggressively while under a bridge.
The only switch driver that benefits from this work right now is the
NXP LS1028A switch (felix). The others need to implement FDB isolation
first, before DSA is going to install entries to the port's standalone
database. Otherwise, these entries might collide with bridge FDB/MDB
entries.
This work was done mainly to have all the required features in place
before somebody starts seriously architecting DSA support for multiple
CPU ports. Otherwise it is much more difficult to bolt these features on
top of multiple CPU ports.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dsa/slave.c')
-rw-r--r-- | net/dsa/slave.c | 261 |
1 files changed, 206 insertions, 55 deletions
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 089616206b11..42436ac6993b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -23,6 +23,114 @@ #include "dsa_priv.h" +static void dsa_slave_standalone_event_work(struct work_struct *work) +{ + struct dsa_standalone_event_work *standalone_work = + container_of(work, struct dsa_standalone_event_work, work); + const unsigned char *addr = standalone_work->addr; + struct net_device *dev = standalone_work->dev; + struct dsa_port *dp = dsa_slave_to_port(dev); + struct switchdev_obj_port_mdb mdb; + struct dsa_switch *ds = dp->ds; + u16 vid = standalone_work->vid; + int err; + + switch (standalone_work->event) { + case DSA_UC_ADD: + err = dsa_port_standalone_host_fdb_add(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to add %pM vid %d to fdb: %d\n", + dp->index, addr, vid, err); + break; + } + break; + + case DSA_UC_DEL: + err = dsa_port_standalone_host_fdb_del(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from fdb: %d\n", + dp->index, addr, vid, err); + } + + break; + case DSA_MC_ADD: + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = dsa_port_standalone_host_mdb_add(dp, &mdb); + if (err) { + dev_err(ds->dev, + "port %d failed to add %pM vid %d to mdb: %d\n", + dp->index, addr, vid, err); + break; + } + break; + case DSA_MC_DEL: + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = dsa_port_standalone_host_mdb_del(dp, &mdb); + if (err) { + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from mdb: %d\n", + dp->index, addr, vid, err); + } + + break; + } + + kfree(standalone_work); +} + +static int dsa_slave_schedule_standalone_work(struct net_device *dev, + enum dsa_standalone_event event, + const unsigned char *addr, + u16 vid) +{ + struct dsa_standalone_event_work *standalone_work; + + standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); + if (!standalone_work) + return -ENOMEM; + + INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work); + standalone_work->event = event; + standalone_work->dev = dev; + + ether_addr_copy(standalone_work->addr, addr); + standalone_work->vid = vid; + + dsa_schedule_work(&standalone_work->work); + + return 0; +} + +static int dsa_slave_sync_uc(struct net_device *dev, + const unsigned char *addr) +{ + return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); +} + +static int dsa_slave_unsync_uc(struct net_device *dev, + const unsigned char *addr) +{ + return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); +} + +static int dsa_slave_sync_mc(struct net_device *dev, + const unsigned char *addr) +{ + return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); +} + +static int dsa_slave_unsync_mc(struct net_device *dev, + const unsigned char *addr) +{ + return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); +} + /* slave mii_bus handling ***************************************************/ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) { @@ -67,6 +175,7 @@ static int dsa_slave_open(struct net_device *dev) { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; int err; err = dev_open(master, NULL); @@ -75,38 +184,30 @@ static int dsa_slave_open(struct net_device *dev) goto out; } - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { - err = dev_uc_add(master, dev->dev_addr); - if (err < 0) + if (dsa_switch_supports_uc_filtering(ds)) { + err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); + if (err) goto out; } - if (dev->flags & IFF_ALLMULTI) { - err = dev_set_allmulti(master, 1); - if (err < 0) - goto del_unicast; - } - if (dev->flags & IFF_PROMISC) { - err = dev_set_promiscuity(master, 1); + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { + err = dev_uc_add(master, dev->dev_addr); if (err < 0) - goto clear_allmulti; + goto del_host_addr; } err = dsa_port_enable_rt(dp, dev->phydev); if (err) - goto clear_promisc; + goto del_unicast; return 0; -clear_promisc: - if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(master, -1); -clear_allmulti: - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(master, -1); del_unicast: if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); +del_host_addr: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); out: return err; } @@ -115,68 +216,122 @@ static int dsa_slave_close(struct net_device *dev) { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; dsa_port_disable_rt(dp); - dev_mc_unsync(master, dev); - dev_uc_unsync(master, dev); - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(master, -1); - if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(master, -1); - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + return 0; } +/* Keep flooding enabled towards this port's CPU port as long as it serves at + * least one port in the tree that requires it. + */ +static void dsa_port_manage_cpu_flood(struct dsa_port *dp) +{ + struct switchdev_brport_flags flags = { + .mask = BR_FLOOD | BR_MCAST_FLOOD, + }; + struct dsa_switch_tree *dst = dp->ds->dst; + struct dsa_port *cpu_dp = dp->cpu_dp; + struct dsa_port *other_dp; + int err; + + list_for_each_entry(other_dp, &dst->ports, list) { + if (!dsa_port_is_user(other_dp)) + continue; + + if (other_dp->cpu_dp != cpu_dp) + continue; + + if (other_dp->slave->flags & IFF_ALLMULTI) + flags.val |= BR_MCAST_FLOOD; + if (other_dp->slave->flags & IFF_PROMISC) + flags.val |= BR_FLOOD; + } + + err = dsa_port_pre_bridge_flags(dp, flags, NULL); + if (err) + return; + + dsa_port_bridge_flags(cpu_dp, flags, NULL); +} + static void dsa_slave_change_rx_flags(struct net_device *dev, int change) { struct net_device *master = dsa_slave_to_master(dev); - if (dev->flags & IFF_UP) { - if (change & IFF_ALLMULTI) - dev_set_allmulti(master, - dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(master, - dev->flags & IFF_PROMISC ? 1 : -1); - } + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(master, + dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(master, + dev->flags & IFF_PROMISC ? 1 : -1); + + if (dsa_switch_supports_uc_filtering(ds) && + dsa_switch_supports_mc_filtering(ds)) + dsa_port_manage_cpu_flood(dp); } static void dsa_slave_set_rx_mode(struct net_device *dev) { struct net_device *master = dsa_slave_to_master(dev); + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; dev_mc_sync(master, dev); dev_uc_sync(master, dev); + if (dsa_switch_supports_mc_filtering(ds)) + __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc); + if (dsa_switch_supports_uc_filtering(ds)) + __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc); } static int dsa_slave_set_mac_address(struct net_device *dev, void *a) { struct net_device *master = dsa_slave_to_master(dev); + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; struct sockaddr *addr = a; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (!(dev->flags & IFF_UP)) - goto out; + if (dsa_switch_supports_uc_filtering(ds)) { + err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); + if (err) + return err; + } if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { err = dev_uc_add(master, addr->sa_data); if (err < 0) - return err; + goto del_unicast; } if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); -out: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + eth_hw_addr_set(dev, addr->sa_data); return 0; + +del_unicast: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); + + return err; } struct dsa_slave_dump_ctx { @@ -426,7 +581,7 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; - err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_VLAN: if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) @@ -507,7 +662,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; - err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_VLAN: if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) @@ -1948,6 +2103,8 @@ int dsa_slave_create(struct dsa_port *port) else eth_hw_addr_inherit(slave_dev, master); slave_dev->priv_flags |= IFF_NO_QUEUE; + if (dsa_switch_supports_uc_filtering(ds)) + slave_dev->priv_flags |= IFF_UNICAST_FLT; slave_dev->netdev_ops = &dsa_slave_netdev_ops; if (ds->ops->port_max_mtu) slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); @@ -2385,7 +2542,9 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) { struct dsa_switchdev_event_work *switchdev_work = container_of(work, struct dsa_switchdev_event_work, work); + const unsigned char *addr = switchdev_work->addr; struct net_device *dev = switchdev_work->dev; + u16 vid = switchdev_work->vid; struct dsa_switch *ds; struct dsa_port *dp; int err; @@ -2396,19 +2555,15 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: if (switchdev_work->host_addr) - err = dsa_port_host_fdb_add(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_bridge_host_fdb_add(dp, addr, vid); else if (dp->lag) - err = dsa_port_lag_fdb_add(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_lag_fdb_add(dp, addr, vid); else - err = dsa_port_fdb_add(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_fdb_add(dp, addr, vid); if (err) { dev_err(ds->dev, "port %d failed to add %pM vid %d to fdb: %d\n", - dp->index, switchdev_work->addr, - switchdev_work->vid, err); + dp->index, addr, vid, err); break; } dsa_fdb_offload_notify(switchdev_work); @@ -2416,19 +2571,15 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) case SWITCHDEV_FDB_DEL_TO_DEVICE: if (switchdev_work->host_addr) - err = dsa_port_host_fdb_del(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_bridge_host_fdb_del(dp, addr, vid); else if (dp->lag) - err = dsa_port_lag_fdb_del(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_lag_fdb_del(dp, addr, vid); else - err = dsa_port_fdb_del(dp, switchdev_work->addr, - switchdev_work->vid); + err = dsa_port_fdb_del(dp, addr, vid); if (err) { dev_err(ds->dev, "port %d failed to delete %pM vid %d from fdb: %d\n", - dp->index, switchdev_work->addr, - switchdev_work->vid, err); + dp->index, addr, vid, err); } break; |