diff options
38 files changed, 1736 insertions, 469 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 8b3791d73c99..1363168b3c82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1524,7 +1524,8 @@ static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, { return (rxl_a->func == rxl_b->func && rxl_a->local_port == rxl_b->local_port && - rxl_a->trap_id == rxl_b->trap_id); + rxl_a->trap_id == rxl_b->trap_id && + rxl_a->mirror_reason == rxl_b->mirror_reason); } static struct mlxsw_rx_listener_item * @@ -2044,7 +2045,8 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, rxl = &rxl_item->rxl; if ((rxl->local_port == MLXSW_PORT_DONT_CARE || rxl->local_port == local_port) && - rxl->trap_id == rx_info->trap_id) { + rxl->trap_id == rx_info->trap_id && + rxl->mirror_reason == rx_info->mirror_reason) { if (rxl_item->enabled) found = true; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 7d6b0a232789..c1c1e039323a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -61,6 +61,7 @@ void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, struct mlxsw_rx_listener { void (*func)(struct sk_buff *skb, u8 local_port, void *priv); u8 local_port; + u8 mirror_reason; u16 trap_id; }; @@ -176,6 +177,7 @@ struct mlxsw_rx_info { u16 lag_id; } u; u8 lag_port_index; + u8 mirror_reason; int trap_id; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c04ec1a92826..1c64b03ff48e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -547,9 +547,9 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; + struct mlxsw_rx_info rx_info = {}; char *wqe; struct sk_buff *skb; - struct mlxsw_rx_info rx_info; u16 byte_count; int err; @@ -582,6 +582,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); mlxsw_skb_cb(skb)->cookie_index = cookie_index; + } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && + rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && + mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { + rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); } byte_count = mlxsw_pci_cqe_byte_count_get(cqe); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 32c7cabfb261..a2c1fbd3e0d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -176,7 +176,7 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); /* pci_cqe_trap_id * Trap ID that captured the packet. */ -MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); +MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10); /* pci_cqe_crc * Length include CRC. Indicates the length field includes @@ -213,6 +213,11 @@ mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); */ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); +/* pci_cqe_mirror_reason + * Mirror reason. + */ +MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); + /* pci_cqe_owner * Ownership bit. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 76f61bef03f8..408003520602 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5778,7 +5778,7 @@ MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6); * Note: A trap ID can only be associated with a single trap group. The device * will associate the trap ID with the last trap group configured. */ -MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9); +MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 10); enum { MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT, @@ -8662,6 +8662,13 @@ MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN); */ MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); +/* reg_mpat_session_id + * Mirror Session ID. + * Used for MIRROR_SESSION<i> trap. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, session_id, 0x00, 24, 4); + /* reg_mpat_system_port * A unique port identifier for the final destination of the packet. * Access: RW @@ -8719,6 +8726,18 @@ enum mlxsw_reg_mpat_span_type { */ MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4); +/* reg_mpat_pide + * Policer enable. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, pide, 0x0C, 15, 1); + +/* reg_mpat_pid + * Policer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, pid, 0x0C, 0, 14); + /* Remote SPAN - Ethernet VLAN * - - - - - - - - - - - - - - */ @@ -9502,6 +9521,14 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1); */ MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); +/* reg_mogcr_mirroring_pid_base + * Base policer id for mirroring policers. + * Must have an even value (e.g. 1000, not 1001). + * Reserved when SwitchX/-2, Switch-IB/2, Spectrum-1 and Quantum. + * Access: RW + */ +MLXSW_ITEM32(reg, mogcr, mirroring_pid_base, 0x0C, 0, 14); + /* MPAGR - Monitoring Port Analyzer Global Register * ------------------------------------------------ * This register is used for global port analyzer configurations. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 73d56012654b..18444f675100 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -136,11 +136,13 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id) { + struct mlxsw_sp_span_agent_parms agent_parms = {}; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = priv; int err; - err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id); + agent_parms.to_dev = out_dev; + err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index 195e28ab8e65..f30599ad6019 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -27,6 +27,7 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_mall_entry *mall_entry) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_span_agent_parms agent_parms = {}; struct mlxsw_sp_span_trigger_parms parms; enum mlxsw_sp_span_trigger trigger; int err; @@ -36,8 +37,9 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, - &mall_entry->mirror.span_id); + agent_parms.to_dev = mall_entry->mirror.to_dev; + err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, + &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 901acd87353f..a5ce1eec5418 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -1295,10 +1295,13 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .to_dev = mall_entry->mirror.to_dev, + }; int span_id; int err; - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id); + err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 6374765a112d..323eaf979aea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -22,9 +22,13 @@ struct mlxsw_sp_span { struct work_struct work; struct mlxsw_sp *mlxsw_sp; const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; + const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; + size_t span_entry_ops_arr_size; struct list_head analyzed_ports_list; struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ struct list_head trigger_entries_list; + u16 policer_id_base; + refcount_t policer_id_base_ref_count; atomic_t active_entries_count; int entries_count; struct mlxsw_sp_span_entry entries[]; @@ -86,6 +90,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); if (!span) return -ENOMEM; + refcount_set(&span->policer_id_base_ref_count, 0); span->entries_count = entries_count; atomic_set(&span->active_entries_count, 0); mutex_init(&span->analyzed_ports_lock); @@ -126,8 +131,41 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->span); } +static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) +{ + return !dev; +} + +static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + return -EOPNOTSUPP; +} + static int -mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, +mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + return -EOPNOTSUPP; +} + +static void +mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { + .can_handle = mlxsw_sp1_span_cpu_can_handle, + .parms_set = mlxsw_sp1_span_entry_cpu_parms, + .configure = mlxsw_sp1_span_entry_cpu_configure, + .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, +}; + +static int +mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { sparmsp->dest_port = netdev_priv(to_dev); @@ -147,6 +185,8 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); } @@ -403,7 +443,8 @@ out: } static int -mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); @@ -442,6 +483,8 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, @@ -504,7 +547,8 @@ out: } static int -mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); @@ -543,6 +587,8 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, @@ -578,7 +624,8 @@ mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) } static int -mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct net_device *real_dev; @@ -605,6 +652,8 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); @@ -626,7 +675,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { }; static const -struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { +struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { + &mlxsw_sp1_span_entry_ops_cpu, + &mlxsw_sp_span_entry_ops_phys, +#if IS_ENABLED(CONFIG_NET_IPGRE) + &mlxsw_sp_span_entry_ops_gretap4, +#endif +#if IS_ENABLED(CONFIG_IPV6_GRE) + &mlxsw_sp_span_entry_ops_gretap6, +#endif + &mlxsw_sp_span_entry_ops_vlan, +}; + +static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) +{ + return !dev; +} + +static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; + return 0; +} + +static int +mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + /* Mirroring to the CPU port is like mirroring to any other physical + * port. Its local port is used instead of that of the physical port. + */ + return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); +} + +static void +mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + enum mlxsw_reg_mpat_span_type span_type; + + span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; + mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { + .can_handle = mlxsw_sp2_span_cpu_can_handle, + .parms_set = mlxsw_sp2_span_entry_cpu_parms, + .configure = mlxsw_sp2_span_entry_cpu_configure, + .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, +}; + +static const +struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { + &mlxsw_sp2_span_entry_ops_cpu, &mlxsw_sp_span_entry_ops_phys, #if IS_ENABLED(CONFIG_NET_IPGRE) &mlxsw_sp_span_entry_ops_gretap4, @@ -638,7 +741,8 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { }; static int -mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { return mlxsw_sp_span_entry_unoffloadable(sparmsp); @@ -673,16 +777,15 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, goto set_parms; if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { - netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", - sparms.dest_port->dev->name); + dev_err(mlxsw_sp->bus_info->dev, + "Cannot mirror to a port which belongs to a different mlxsw instance\n"); sparms.dest_port = NULL; goto set_parms; } err = span_entry->ops->configure(span_entry, sparms); if (err) { - netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", - sparms.dest_port->dev->name); + dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); sparms.dest_port = NULL; goto set_parms; } @@ -698,6 +801,45 @@ mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) span_entry->ops->deconfigure(span_entry); } +static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, + u16 policer_id) +{ + struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; + u16 policer_id_base; + int err; + + /* Policers set on SPAN agents must be in the range of + * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the + * base is set and the new policer is not within the range, then we + * must error out. + */ + if (refcount_read(&span->policer_id_base_ref_count)) { + if (policer_id < span->policer_id_base || + policer_id >= span->policer_id_base + span->entries_count) + return -EINVAL; + + refcount_inc(&span->policer_id_base_ref_count); + return 0; + } + + /* Base must be even. */ + policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; + err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, + policer_id_base); + if (err) + return err; + + span->policer_id_base = policer_id_base; + refcount_set(&span->policer_id_base_ref_count, 1); + + return 0; +} + +static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) +{ + refcount_dec(&span->policer_id_base_ref_count); +} + static struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, @@ -717,6 +859,15 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, if (!span_entry) return NULL; + if (sparms.policer_enable) { + int err; + + err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, + sparms.policer_id); + if (err) + return NULL; + } + atomic_inc(&mlxsw_sp->span->active_entries_count); span_entry->ops = ops; refcount_set(&span_entry->ref_count, 1); @@ -731,6 +882,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_span_entry_deconfigure(span_entry); atomic_dec(&mlxsw_sp->span->active_entries_count); + if (span_entry->parms.policer_enable) + mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); } struct mlxsw_sp_span_entry * @@ -770,6 +923,24 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) } static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_parms *sparms) +{ + int i; + + for (i = 0; i < mlxsw_sp->span->entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; + + if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && + curr->parms.policer_enable == sparms->policer_enable && + curr->parms.policer_id == sparms->policer_id) + return curr; + } + return NULL; +} + +static struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, const struct mlxsw_sp_span_entry_ops *ops, @@ -777,7 +948,8 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); + span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, + &sparms); if (span_entry) { /* Already exists, just take a reference */ refcount_inc(&span_entry->ref_count); @@ -894,11 +1066,12 @@ static const struct mlxsw_sp_span_entry_ops * mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev) { + struct mlxsw_sp_span *span = mlxsw_sp->span; size_t i; - for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) - if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) - return mlxsw_sp_span_entry_types[i]; + for (i = 0; i < span->span_entry_ops_arr_size; ++i) + if (span->span_entry_ops_arr[i]->can_handle(to_dev)) + return span->span_entry_ops_arr[i]; return NULL; } @@ -920,7 +1093,7 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work) if (!refcount_read(&curr->ref_count)) continue; - err = curr->ops->parms_set(curr->to_dev, &sparms); + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); if (err) continue; @@ -939,9 +1112,10 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) mlxsw_core_schedule_work(&mlxsw_sp->span->work); } -int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, - const struct net_device *to_dev, int *p_span_id) +int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, + const struct mlxsw_sp_span_agent_parms *parms) { + const struct net_device *to_dev = parms->to_dev; const struct mlxsw_sp_span_entry_ops *ops; struct mlxsw_sp_span_entry *span_entry; struct mlxsw_sp_span_parms sparms; @@ -956,10 +1130,12 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, } memset(&sparms, 0, sizeof(sparms)); - err = ops->parms_set(to_dev, &sparms); + err = ops->parms_set(mlxsw_sp, to_dev, &sparms); if (err) return err; + sparms.policer_id = parms->policer_id; + sparms.policer_enable = parms->policer_enable; span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) return -ENOBUFS; @@ -1519,7 +1695,18 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { + size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); + + /* Must be first to avoid NULL pointer dereference by subsequent + * can_handle() callbacks. + */ + if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != + &mlxsw_sp1_span_entry_ops_cpu)) + return -EINVAL; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; return 0; } @@ -1529,14 +1716,32 @@ static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) return mtu * 5 / 2; } +static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base) +{ + return -EOPNOTSUPP; +} + const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { .init = mlxsw_sp1_span_init, .buffsize_get = mlxsw_sp1_span_buffsize_get, + .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, }; static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) { + size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); + + /* Must be first to avoid NULL pointer dereference by subsequent + * can_handle() callbacks. + */ + if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != + &mlxsw_sp2_span_entry_ops_cpu)) + return -EINVAL; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; return 0; } @@ -1556,9 +1761,24 @@ static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); } +static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base) +{ + char mogcr_pl[MLXSW_REG_MOGCR_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); + if (err) + return err; + + mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); +} + const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp2_span_buffsize_get, + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) @@ -1571,4 +1791,5 @@ static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp3_span_buffsize_get, + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 29b96b222e25..1c746dd3b1bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -21,6 +21,8 @@ struct mlxsw_sp_span_parms { union mlxsw_sp_l3addr daddr; union mlxsw_sp_l3addr saddr; u16 vid; + u16 policer_id; + bool policer_enable; }; enum mlxsw_sp_span_trigger { @@ -35,11 +37,19 @@ struct mlxsw_sp_span_trigger_parms { int span_id; }; +struct mlxsw_sp_span_agent_parms { + const struct net_device *to_dev; + u16 policer_id; + bool policer_enable; +}; + struct mlxsw_sp_span_entry_ops; struct mlxsw_sp_span_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); u32 (*buffsize_get)(int mtu, u32 speed); + int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base); }; struct mlxsw_sp_span_entry { @@ -52,7 +62,8 @@ struct mlxsw_sp_span_entry { struct mlxsw_sp_span_entry_ops { bool (*can_handle)(const struct net_device *to_dev); - int (*parms_set)(const struct net_device *to_dev, + int (*parms_set)(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp); int (*configure)(struct mlxsw_sp_span_entry *span_entry, struct mlxsw_sp_span_parms sparms); @@ -73,8 +84,8 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); void mlxsw_sp_span_speed_update_work(struct work_struct *work); -int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, - const struct net_device *to_dev, int *p_span_id); +int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, + const struct mlxsw_sp_span_agent_parms *parms); void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id); int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 28e60697d14e..33909887d0ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -107,8 +107,16 @@ enum { MLXSW_TRAP_ID_ACL2 = 0x1C2, MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3, MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4, + MLXSW_TRAP_ID_MIRROR_SESSION0 = 0x220, + MLXSW_TRAP_ID_MIRROR_SESSION1 = 0x221, + MLXSW_TRAP_ID_MIRROR_SESSION2 = 0x222, + MLXSW_TRAP_ID_MIRROR_SESSION3 = 0x223, + MLXSW_TRAP_ID_MIRROR_SESSION4 = 0x224, + MLXSW_TRAP_ID_MIRROR_SESSION5 = 0x225, + MLXSW_TRAP_ID_MIRROR_SESSION6 = 0x226, + MLXSW_TRAP_ID_MIRROR_SESSION7 = 0x227, - MLXSW_TRAP_ID_MAX = 0x1FF + MLXSW_TRAP_ID_MAX = 0x3FF, }; enum mlxsw_event_trap_id { diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 2ce7304d3753..bb646b65cc95 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4390,9 +4390,9 @@ static int ksz_alloc_desc(struct dev_info *adapter) DESC_ALIGNMENT; adapter->desc_pool.alloc_virt = - pci_zalloc_consistent(adapter->pdev, - adapter->desc_pool.alloc_size, - &adapter->desc_pool.dma_addr); + dma_alloc_coherent(&adapter->pdev->dev, + adapter->desc_pool.alloc_size, + &adapter->desc_pool.dma_addr, GFP_KERNEL); if (adapter->desc_pool.alloc_virt == NULL) { adapter->desc_pool.alloc_size = 0; return 1; @@ -4431,7 +4431,8 @@ static int ksz_alloc_desc(struct dev_info *adapter) static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf, int direction) { - pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction); + dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, + direction); dev_kfree_skb(dma_buf->skb); dma_buf->skb = NULL; dma_buf->dma = 0; @@ -4456,16 +4457,15 @@ static void ksz_init_rx_buffers(struct dev_info *adapter) dma_buf = DMA_BUFFER(desc); if (dma_buf->skb && dma_buf->len != adapter->mtu) - free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE); + free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE); dma_buf->len = adapter->mtu; if (!dma_buf->skb) dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); if (dma_buf->skb && !dma_buf->dma) - dma_buf->dma = pci_map_single( - adapter->pdev, - skb_tail_pointer(dma_buf->skb), - dma_buf->len, - PCI_DMA_FROMDEVICE); + dma_buf->dma = dma_map_single(&adapter->pdev->dev, + skb_tail_pointer(dma_buf->skb), + dma_buf->len, + DMA_FROM_DEVICE); /* Set descriptor. */ set_rx_buf(desc, dma_buf->dma); @@ -4543,11 +4543,10 @@ static void ksz_free_desc(struct dev_info *adapter) /* Free memory. */ if (adapter->desc_pool.alloc_virt) - pci_free_consistent( - adapter->pdev, - adapter->desc_pool.alloc_size, - adapter->desc_pool.alloc_virt, - adapter->desc_pool.dma_addr); + dma_free_coherent(&adapter->pdev->dev, + adapter->desc_pool.alloc_size, + adapter->desc_pool.alloc_virt, + adapter->desc_pool.dma_addr); /* Reset resource pool. */ adapter->desc_pool.alloc_size = 0; @@ -4590,12 +4589,10 @@ static void ksz_free_buffers(struct dev_info *adapter, static void ksz_free_mem(struct dev_info *adapter) { /* Free transmit buffers. */ - ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, - PCI_DMA_TODEVICE); + ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); /* Free receive buffers. */ - ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, - PCI_DMA_FROMDEVICE); + ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); /* Free descriptors. */ ksz_free_desc(adapter); @@ -4657,9 +4654,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) dma_buf->len = skb_headlen(skb); - dma_buf->dma = pci_map_single( - hw_priv->pdev, skb->data, dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, + dma_buf->len, DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); @@ -4676,11 +4672,10 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) dma_buf = DMA_BUFFER(desc); dma_buf->len = skb_frag_size(this_frag); - dma_buf->dma = pci_map_single( - hw_priv->pdev, - skb_frag_address(this_frag), - dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, + skb_frag_address(this_frag), + dma_buf->len, + DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); @@ -4700,9 +4695,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) } else { dma_buf->len = len; - dma_buf->dma = pci_map_single( - hw_priv->pdev, skb->data, dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, + dma_buf->len, DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); } @@ -4756,9 +4750,8 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal) } dma_buf = DMA_BUFFER(desc); - pci_unmap_single( - hw_priv->pdev, dma_buf->dma, dma_buf->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, + dma_buf->len, DMA_TO_DEVICE); /* This descriptor contains the last buffer in the packet. */ if (dma_buf->skb) { @@ -4991,9 +4984,8 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, packet_len = status.rx.frame_len - 4; dma_buf = DMA_BUFFER(desc); - pci_dma_sync_single_for_cpu( - hw_priv->pdev, dma_buf->dma, packet_len + 4, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, + packet_len + 4, DMA_FROM_DEVICE); do { /* skb->data != skb->head */ @@ -6935,8 +6927,8 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) result = -ENODEV; - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) return result; reg_base = pci_resource_start(pdev, 0); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 2373e72d2d29..f6479384dc4f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1742,10 +1742,9 @@ done: static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) { if (tx->head_cpu_ptr) { - pci_free_consistent(tx->adapter->pdev, - sizeof(*tx->head_cpu_ptr), - (void *)(tx->head_cpu_ptr), - tx->head_dma_ptr); + dma_free_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, + tx->head_dma_ptr); tx->head_cpu_ptr = NULL; tx->head_dma_ptr = 0; } @@ -1753,10 +1752,9 @@ static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) tx->buffer_info = NULL; if (tx->ring_cpu_ptr) { - pci_free_consistent(tx->adapter->pdev, - tx->ring_allocation_size, - tx->ring_cpu_ptr, - tx->ring_dma_ptr); + dma_free_coherent(&tx->adapter->pdev->dev, + tx->ring_allocation_size, tx->ring_cpu_ptr, + tx->ring_dma_ptr); tx->ring_allocation_size = 0; tx->ring_cpu_ptr = NULL; tx->ring_dma_ptr = 0; @@ -1780,8 +1778,8 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx) sizeof(struct lan743x_tx_descriptor), PAGE_SIZE); dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, - ring_allocation_size, &dma_ptr); + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -1798,8 +1796,9 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx) } tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, - sizeof(*tx->head_cpu_ptr), &dma_ptr); + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -2281,10 +2280,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) } if (rx->head_cpu_ptr) { - pci_free_consistent(rx->adapter->pdev, - sizeof(*rx->head_cpu_ptr), - rx->head_cpu_ptr, - rx->head_dma_ptr); + dma_free_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, + rx->head_dma_ptr); rx->head_cpu_ptr = NULL; rx->head_dma_ptr = 0; } @@ -2293,10 +2291,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) rx->buffer_info = NULL; if (rx->ring_cpu_ptr) { - pci_free_consistent(rx->adapter->pdev, - rx->ring_allocation_size, - rx->ring_cpu_ptr, - rx->ring_dma_ptr); + dma_free_coherent(&rx->adapter->pdev->dev, + rx->ring_allocation_size, rx->ring_cpu_ptr, + rx->ring_dma_ptr); rx->ring_allocation_size = 0; rx->ring_cpu_ptr = NULL; rx->ring_dma_ptr = 0; @@ -2327,8 +2324,8 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) sizeof(struct lan743x_rx_descriptor), PAGE_SIZE); dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, - ring_allocation_size, &dma_ptr); + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -2345,8 +2342,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) } rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, - sizeof(*rx->head_cpu_ptr), &dma_ptr); + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 67e62603fe3b..a0980f81591e 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -640,11 +640,11 @@ static int init_shared_mem(struct s2io_nic *nic) int k = 0; dma_addr_t tmp_p; void *tmp_v; - tmp_v = pci_alloc_consistent(nic->pdev, - PAGE_SIZE, &tmp_p); + tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE, + &tmp_p, GFP_KERNEL); if (!tmp_v) { DBG_PRINT(INFO_DBG, - "pci_alloc_consistent failed for TxDL\n"); + "dma_alloc_coherent failed for TxDL\n"); return -ENOMEM; } /* If we got a zero DMA address(can happen on @@ -658,11 +658,12 @@ static int init_shared_mem(struct s2io_nic *nic) "%s: Zero DMA address for TxDL. " "Virtual address %p\n", dev->name, tmp_v); - tmp_v = pci_alloc_consistent(nic->pdev, - PAGE_SIZE, &tmp_p); + tmp_v = dma_alloc_coherent(&nic->pdev->dev, + PAGE_SIZE, &tmp_p, + GFP_KERNEL); if (!tmp_v) { DBG_PRINT(INFO_DBG, - "pci_alloc_consistent failed for TxDL\n"); + "dma_alloc_coherent failed for TxDL\n"); return -ENOMEM; } mem_allocated += PAGE_SIZE; @@ -734,8 +735,8 @@ static int init_shared_mem(struct s2io_nic *nic) rx_blocks = &ring->rx_blocks[j]; size = SIZE_OF_BLOCK; /* size is always page size */ - tmp_v_addr = pci_alloc_consistent(nic->pdev, size, - &tmp_p_addr); + tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size, + &tmp_p_addr, GFP_KERNEL); if (tmp_v_addr == NULL) { /* * In case of failure, free_shared_mem() @@ -835,8 +836,8 @@ static int init_shared_mem(struct s2io_nic *nic) /* Allocation and initialization of Statistics block */ size = sizeof(struct stat_block); mac_control->stats_mem = - pci_alloc_consistent(nic->pdev, size, - &mac_control->stats_mem_phy); + dma_alloc_coherent(&nic->pdev->dev, size, + &mac_control->stats_mem_phy, GFP_KERNEL); if (!mac_control->stats_mem) { /* @@ -906,18 +907,18 @@ static void free_shared_mem(struct s2io_nic *nic) fli = &fifo->list_info[mem_blks]; if (!fli->list_virt_addr) break; - pci_free_consistent(nic->pdev, PAGE_SIZE, - fli->list_virt_addr, - fli->list_phy_addr); + dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, + fli->list_virt_addr, + fli->list_phy_addr); swstats->mem_freed += PAGE_SIZE; } /* If we got a zero DMA address during allocation, * free the page now */ if (mac_control->zerodma_virt_addr) { - pci_free_consistent(nic->pdev, PAGE_SIZE, - mac_control->zerodma_virt_addr, - (dma_addr_t)0); + dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, + mac_control->zerodma_virt_addr, + (dma_addr_t)0); DBG_PRINT(INIT_DBG, "%s: Freeing TxDL with zero DMA address. " "Virtual address %p\n", @@ -939,8 +940,8 @@ static void free_shared_mem(struct s2io_nic *nic) tmp_p_addr = ring->rx_blocks[j].block_dma_addr; if (tmp_v_addr == NULL) break; - pci_free_consistent(nic->pdev, size, - tmp_v_addr, tmp_p_addr); + dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr, + tmp_p_addr); swstats->mem_freed += size; kfree(ring->rx_blocks[j].rxds); swstats->mem_freed += sizeof(struct rxd_info) * @@ -993,10 +994,9 @@ static void free_shared_mem(struct s2io_nic *nic) if (mac_control->stats_mem) { swstats->mem_freed += mac_control->stats_mem_sz; - pci_free_consistent(nic->pdev, - mac_control->stats_mem_sz, - mac_control->stats_mem, - mac_control->stats_mem_phy); + dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz, + mac_control->stats_mem, + mac_control->stats_mem_phy); } } @@ -2316,8 +2316,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, txds = txdlp; if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { - pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, - sizeof(u64), PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + (dma_addr_t)txds->Buffer_Pointer, + sizeof(u64), DMA_TO_DEVICE); txds++; } @@ -2326,8 +2327,8 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); return NULL; } - pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, + skb_headlen(skb), DMA_TO_DEVICE); frg_cnt = skb_shinfo(skb)->nr_frags; if (frg_cnt) { txds++; @@ -2335,9 +2336,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; if (!txds->Buffer_Pointer) break; - pci_unmap_page(nic->pdev, + dma_unmap_page(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); } } memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); @@ -2521,11 +2522,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, memset(rxdp, 0, sizeof(struct RxD1)); skb_reserve(skb, NET_IP_ALIGN); rxdp1->Buffer0_ptr = - pci_map_single(ring->pdev, skb->data, + dma_map_single(&ring->pdev->dev, skb->data, size - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp1->Buffer0_ptr)) + DMA_FROM_DEVICE); + if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = @@ -2557,17 +2557,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, if (from_card_up) { rxdp3->Buffer0_ptr = - pci_map_single(ring->pdev, ba->ba_0, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer0_ptr)) + dma_map_single(&ring->pdev->dev, + ba->ba_0, BUF0_LEN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr)) goto pci_map_failed; } else - pci_dma_sync_single_for_device(ring->pdev, - (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&ring->pdev->dev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, + DMA_FROM_DEVICE); rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (ring->rxd_mode == RXD_MODE_3B) { @@ -2577,29 +2576,28 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, * Buffer2 will have L3/L4 header plus * L4 payload */ - rxdp3->Buffer2_ptr = pci_map_single(ring->pdev, + rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev, skb->data, ring->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer2_ptr)) + if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr)) goto pci_map_failed; if (from_card_up) { rxdp3->Buffer1_ptr = - pci_map_single(ring->pdev, + dma_map_single(&ring->pdev->dev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer1_ptr)) { - pci_unmap_single(ring->pdev, + if (dma_mapping_error(&nic->pdev->dev, + rxdp3->Buffer1_ptr)) { + dma_unmap_single(&ring->pdev->dev, (dma_addr_t)(unsigned long) skb->data, ring->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto pci_map_failed; } } @@ -2668,27 +2666,24 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) continue; if (sp->rxd_mode == RXD_MODE_1) { rxdp1 = (struct RxD1 *)rxdp; - pci_unmap_single(sp->pdev, + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp1->Buffer0_ptr, dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); memset(rxdp, 0, sizeof(struct RxD1)); } else if (sp->rxd_mode == RXD_MODE_3B) { rxdp3 = (struct RxD3 *)rxdp; - pci_unmap_single(sp->pdev, + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer1_ptr, - BUF1_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF1_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); + dev->mtu + 4, DMA_FROM_DEVICE); memset(rxdp, 0, sizeof(struct RxD3)); } swstats->mem_freed += skb->truesize; @@ -2919,23 +2914,21 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) } if (ring_data->rxd_mode == RXD_MODE_1) { rxdp1 = (struct RxD1 *)rxdp; - pci_unmap_single(ring_data->pdev, (dma_addr_t) - rxdp1->Buffer0_ptr, + dma_unmap_single(&ring_data->pdev->dev, + (dma_addr_t)rxdp1->Buffer0_ptr, ring_data->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } else if (ring_data->rxd_mode == RXD_MODE_3B) { rxdp3 = (struct RxD3 *)rxdp; - pci_dma_sync_single_for_cpu(ring_data->pdev, - (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(ring_data->pdev, + dma_sync_single_for_cpu(&ring_data->pdev->dev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&ring_data->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, - ring_data->mtu + 4, - PCI_DMA_FROMDEVICE); + ring_data->mtu + 4, DMA_FROM_DEVICE); } prefetch(skb->data); rx_osm_handler(ring_data, rxdp); @@ -4117,9 +4110,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb_headlen(skb); - txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, - frg_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) + txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data, + frg_len, DMA_TO_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp->Host_Control = (unsigned long)skb; @@ -6772,10 +6765,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, * Host Control is NULL */ rxdp1->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, (*skb)->data, + dma_map_single(&sp->pdev->dev, (*skb)->data, size - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) + DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr)) goto memalloc_failed; rxdp->Host_Control = (unsigned long) (*skb); } @@ -6798,37 +6791,34 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, } stats->mem_allocated += (*skb)->truesize; rxdp3->Buffer2_ptr = *temp2 = - pci_map_single(sp->pdev, (*skb)->data, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) + dma_map_single(&sp->pdev->dev, (*skb)->data, + dev->mtu + 4, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr)) goto memalloc_failed; rxdp3->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, - rxdp3->Buffer0_ptr)) { - pci_unmap_single(sp->pdev, + dma_map_single(&sp->pdev->dev, ba->ba_0, + BUF0_LEN, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) { + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto memalloc_failed; } rxdp->Host_Control = (unsigned long) (*skb); /* Buffer-1 will be dummy buffer not used */ rxdp3->Buffer1_ptr = *temp1 = - pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, - rxdp3->Buffer1_ptr)) { - pci_unmap_single(sp->pdev, + dma_map_single(&sp->pdev->dev, ba->ba_1, + BUF1_LEN, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) { + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto memalloc_failed; } } @@ -7675,17 +7665,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) return ret; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__); dma_flag = true; - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { DBG_PRINT(ERR_DBG, - "Unable to obtain 64bit DMA " - "for consistent allocations\n"); + "Unable to obtain 64bit DMA for coherent allocations\n"); pci_disable_device(pdev); return -ENOMEM; } - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__); } else { pci_disable_device(pdev); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 51cd57ab3d95..4f1f90f5e178 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -1102,10 +1102,10 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) hldev = blockpool->hldev; list_for_each_safe(p, n, &blockpool->free_block_list) { - pci_unmap_single(hldev->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&hldev->pdev->dev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + DMA_BIDIRECTIONAL); vxge_os_dma_free(hldev->pdev, ((struct __vxge_hw_blockpool_entry *)p)->memblock, @@ -1178,10 +1178,10 @@ __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, goto blockpool_create_exit; } - dma_addr = pci_map_single(hldev->pdev, memblock, - VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(hldev->pdev, - dma_addr))) { + dma_addr = dma_map_single(&hldev->pdev->dev, memblock, + VXGE_HW_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) { vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; @@ -2264,10 +2264,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, goto exit; } - dma_addr = pci_map_single(devh->pdev, block_addr, length, - PCI_DMA_BIDIRECTIONAL); + dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length, + DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) { vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); blockpool->req_out--; goto exit; @@ -2359,11 +2359,10 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, if (!memblock) goto exit; - dma_object->addr = pci_map_single(devh->pdev, memblock, size, - PCI_DMA_BIDIRECTIONAL); + dma_object->addr = dma_map_single(&devh->pdev->dev, memblock, + size, DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(devh->pdev, - dma_object->addr))) { + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); memblock = NULL; @@ -2410,11 +2409,10 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) if (blockpool->pool_size < blockpool->pool_max) break; - pci_unmap_single( - (blockpool->hldev)->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&(blockpool->hldev)->pdev->dev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + DMA_BIDIRECTIONAL); vxge_os_dma_free( (blockpool->hldev)->pdev, @@ -2445,8 +2443,8 @@ static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, blockpool = &devh->block_pool; if (size != blockpool->block_size) { - pci_unmap_single(devh->pdev, dma_object->addr, size, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&devh->pdev->dev, dma_object->addr, size, + DMA_BIDIRECTIONAL); vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); } else { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5de85b9e9e35..b0faa737b817 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -241,10 +241,10 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) rx_priv = vxge_hw_ring_rxd_private_get(dtrh); rx_priv->skb_data = rx_priv->skb->data; - dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, - rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data, + rx_priv->data_size, DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { + if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) { ring->stats.pci_map_fail++; return -EIO; } @@ -323,8 +323,8 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, struct vxge_rx_priv *rx_priv) { - pci_dma_sync_single_for_device(ring->pdev, - rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma, + rx_priv->data_size, DMA_FROM_DEVICE); vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); vxge_hw_ring_rxd_pre_post(ring->handle, dtr); @@ -425,8 +425,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, if (!vxge_rx_map(dtr, ring)) { skb_put(skb, pkt_length); - pci_unmap_single(ring->pdev, data_dma, - data_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&ring->pdev->dev, + data_dma, data_size, + DMA_FROM_DEVICE); vxge_hw_ring_rxd_pre_post(ringh, dtr); vxge_post(&dtr_cnt, &first_dtr, dtr, @@ -458,9 +459,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, skb_reserve(skb_up, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); - pci_dma_sync_single_for_cpu(ring->pdev, - data_dma, data_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&ring->pdev->dev, + data_dma, data_size, + DMA_FROM_DEVICE); vxge_debug_mem(VXGE_TRACE, "%s: %s:%d skb_up = %p", @@ -585,13 +586,13 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, } /* for unfragmented skb */ - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_headlen(skb), DMA_TO_DEVICE); for (j = 0; j < frg_cnt; j++) { - pci_unmap_page(fifo->pdev, - txd_priv->dma_buffers[i++], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, + txd_priv->dma_buffers[i++], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -897,10 +898,10 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) first_frg_len = skb_headlen(skb); - dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, - PCI_DMA_TODEVICE); + dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data, + first_frg_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { + if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) { vxge_hw_fifo_txdl_free(fifo_hw, dtr); fifo->stats.pci_map_fail++; goto _exit0; @@ -977,12 +978,12 @@ _exit1: j = 0; frag = &skb_shinfo(skb)->frags[0]; - pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++], + skb_headlen(skb), DMA_TO_DEVICE); for (; j < i; j++) { - pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -1012,8 +1013,8 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) if (state != VXGE_HW_RXD_STATE_POSTED) return; - pci_unmap_single(ring->pdev, rx_priv->data_dma, - rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma, + rx_priv->data_size, DMA_FROM_DEVICE); dev_kfree_skb(rx_priv->skb); rx_priv->skb_data = NULL; @@ -1048,12 +1049,12 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) frag = &skb_shinfo(skb)->frags[0]; /* for unfragmented skb */ - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_headlen(skb), DMA_TO_DEVICE); for (j = 0; j < frg_cnt; j++) { - pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -4387,21 +4388,20 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit0; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 64bit DMA", __func__); high_dma = 1; - if (pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { vxge_debug_init(VXGE_ERR, "%s : unable to obtain 64bit DMA for " "consistent allocations", __func__); ret = -ENOMEM; goto _exit1; } - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 32bit DMA", __func__); } else { diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 422a8e5a8d3b..7be86ef5a584 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -65,6 +65,7 @@ enum mac_version { RTL_GIGA_MAC_VER_52, RTL_GIGA_MAC_VER_60, RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, RTL_GIGA_MAC_NONE }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 745e1739e9c8..975f5c10ba47 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -56,6 +56,7 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -146,6 +147,8 @@ static const struct { [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -335,6 +338,7 @@ enum rtl8125_registers { IntrStatus_8125 = 0x3c, TxPoll_8125 = 0x90, MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, }; #define RX_VLAN_INNER_8125 BIT(22) @@ -655,6 +659,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3); MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -966,7 +971,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: r8168g_mdio_write(tp, location, val); break; default: @@ -983,7 +988,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1389,7 +1394,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; @@ -1935,7 +1940,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) u16 val; enum mac_version ver; } mac_info[] = { - /* 8125 family. */ + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, @@ -2073,6 +2081,17 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); } +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) { const u16 w[] = { @@ -2174,7 +2193,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -2208,11 +2227,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - case RTL_GIGA_MAC_VER_52: - case RTL_GIGA_MAC_VER_60: - case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: @@ -2244,11 +2259,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - case RTL_GIGA_MAC_VER_52: - case RTL_GIGA_MAC_VER_60: - case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; case RTL_GIGA_MAC_VER_40: @@ -2279,7 +2290,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; - case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; default: @@ -2442,6 +2453,12 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond) return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -2452,6 +2469,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; default: break; } @@ -3523,18 +3545,27 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) /* disable new tx descriptor format */ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); - r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); - r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); - r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); - r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); udelay(1); @@ -3545,7 +3576,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); - rtl8125a_config_eee_mac(tp); + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); udelay(10); @@ -3617,6 +3651,26 @@ static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) rtl_hw_start_8125_common(tp); } +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + static void rtl_hw_config(struct rtl8169_private *tp) { static const rtl_generic_fct hw_configs[] = { @@ -3667,6 +3721,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, }; if (hw_configs[tp->mac_version]) @@ -3753,6 +3808,15 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) netdev_update_features(dev); rtl_jumbo_config(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + return 0; } @@ -3899,7 +3963,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -5075,7 +5139,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: rtl_hw_init_8125(tp); break; default: diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index bc8bf48bdbb0..913d030d73eb 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -97,6 +97,14 @@ static void rtl8125a_config_eee_phy(struct phy_device *phydev) phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); } +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1147,6 +1155,11 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1250,6 +1263,45 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, rtl8125a_config_eee_phy(phydev); } +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, enum mac_version ver) { @@ -1308,6 +1360,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, }; if (phy_configs[ver]) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c7229d022a27..95dbe5e8e1d8 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -638,6 +638,18 @@ static struct phy_driver realtek_drvs[] = { .read_mmd = rtl8125_read_mmd, .write_mmd = rtl8125_write_mmd, }, { + PHY_ID_MATCH_EXACT(0x001cc840), + .name = "RTL8125B 2.5Gbps internal", + .get_features = rtl8125_get_features, + .config_aneg = rtl8125_config_aneg, + .read_status = rtl8125_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + .read_mmd = rtl8125_read_mmd, + .write_mmd = rtl8125_write_mmd, + }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", .config_init = &rtl8366rb_config_init, diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 51ea56b73a97..3d54c8bbfa86 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -721,7 +721,6 @@ struct qeth_card_options { struct qeth_vnicc_info vnicc; /* VNICC options */ enum qeth_discipline_id layer; enum qeth_ipa_isolation_modes isolation; - enum qeth_ipa_isolation_modes prev_isolation; int sniffer; enum qeth_cq cq; char hsuid[9]; @@ -804,14 +803,13 @@ struct qeth_card { struct workqueue_struct *event_wq; struct workqueue_struct *cmd_wq; wait_queue_head_t wait_q; - DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); DECLARE_HASHTABLE(local_addrs4, 4); DECLARE_HASHTABLE(local_addrs6, 4); spinlock_t local_addrs4_lock; spinlock_t local_addrs6_lock; struct mutex ip_lock; - DECLARE_HASHTABLE(ip_mc_htable, 4); + DECLARE_HASHTABLE(rx_mode_addrs, 4); struct work_struct rx_mode_work; struct work_struct kernel_thread_starter; spinlock_t thread_mask_lock; @@ -1071,6 +1069,9 @@ int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); int qeth_query_card_info(struct qeth_card *card, struct carrier_info *carrier_info); +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode); + unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, @@ -1078,7 +1079,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 88e998de2d03..8a76022fceda 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -969,7 +969,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) spin_lock_irqsave(&card->lock, flags); list_for_each_entry(iob, &card->cmd_waiter_list, list) - qeth_notify_cmd(iob, -EIO); + qeth_notify_cmd(iob, -ECANCELED); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -1647,6 +1647,7 @@ static void qeth_setup_card(struct qeth_card *card) qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); + hash_init(card->rx_mode_addrs); hash_init(card->local_addrs4); hash_init(card->local_addrs6); spin_lock_init(&card->local_addrs4_lock); @@ -2025,7 +2026,7 @@ static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, } static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, - void *data, + const void *data, unsigned int data_length) { struct qeth_cmd_buffer *iob; @@ -2436,6 +2437,17 @@ static int qeth_cm_setup(struct qeth_card *card) return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); } +static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) +{ + if (link_type == QETH_LINK_TYPE_LANE_TR || + link_type == QETH_LINK_TYPE_HSTR) { + dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); + return false; + } + + return true; +} + static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) { struct net_device *dev = card->dev; @@ -2495,8 +2507,8 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, { __u16 mtu, framesize; __u16 len; - __u8 link_type; struct qeth_cmd_buffer *iob; + u8 link_type = 0; QETH_CARD_TEXT(card, 2, "ulpenacb"); @@ -2516,9 +2528,11 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { memcpy(&link_type, QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); - card->info.link_type = link_type; - } else - card->info.link_type = 0; + if (!qeth_is_supported_link_type(card, link_type)) + return -EPROTONOSUPPORT; + } + + card->info.link_type = link_type; QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); return 0; } @@ -3100,7 +3114,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, enum qeth_prot_versions prot, unsigned int data_length) { - enum qeth_link_types link_type = card->info.link_type; struct qeth_cmd_buffer *iob; struct qeth_ipacmd_hdr *hdr; @@ -3116,7 +3129,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, hdr->command = cmd_code; hdr->initiator = IPA_CMD_INITIATOR_HOST; /* hdr->seqno is set by qeth_send_control_data() */ - hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1; + hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; hdr->rel_adapter_no = (u8) card->dev->dev_port; hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; hdr->param_count = 1; @@ -3199,18 +3212,22 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_query_cmds_supp *query_cmd; QETH_CARD_TEXT(card, 3, "quyadpcb"); if (qeth_setadpparms_inspect_rc(cmd)) return -EIO; - if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { - card->info.link_type = - cmd->data.setadapterparms.data.query_cmds_supp.lan_type; + query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; + if (query_cmd->lan_type & 0x7f) { + if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) + return -EPROTONOSUPPORT; + + card->info.link_type = query_cmd->lan_type; QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); } - card->options.adp.supported = - cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; + + card->options.adp.supported = query_cmd->supported_cmds; return 0; } @@ -4541,7 +4558,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; - int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); @@ -4555,70 +4571,54 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, cmd->data.setadapterparms.hdr.return_code); switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: - if (card->options.isolation == ISOLATION_MODE_NONE) { + if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); - } else { + else dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); - } - break; + return 0; case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", CARD_DEVID(card)); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return 0; case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", CARD_DEVID(card)); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return 0; case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EPERM; case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: dev_err(&card->gdev->dev, "The adjacent switch port does not " "support reflective relay mode\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_REFLREL_FAILED: dev_err(&card->gdev->dev, "The reflective relay mode cannot be " "enabled at the adjacent switch port"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EREMOTEIO; case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: dev_warn(&card->gdev->dev, "Turning off reflective relay mode " "at the adjacent switch failed\n"); - break; + /* benign error while disabling ISOLATION_MODE_FWD */ + return 0; default: - /* this should never happen */ - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EIO; } - return (cmd->hdr.return_code) ? -EIO : 0; } -static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, - enum qeth_ipa_isolation_modes isolation, int fallback) +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode) { int rc; struct qeth_cmd_buffer *iob; @@ -4627,42 +4627,28 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setacctl"); + if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + dev_err(&card->gdev->dev, + "Adapter does not support QDIO data connection isolation\n"); + return -EOPNOTSUPP; + } + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, SETADP_DATA_SIZEOF(set_access_ctrl)); if (!iob) return -ENOMEM; cmd = __ipa_cmd(iob); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; - access_ctrl_req->subcmd_code = isolation; + access_ctrl_req->subcmd_code = mode; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, - &fallback); - QETH_CARD_TEXT_(card, 2, "rc=%d", rc); - return rc; -} - -int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) -{ - int rc = 0; - - QETH_CARD_TEXT(card, 4, "setactlo"); - - if ((IS_OSD(card) || IS_OSX(card)) && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { - rc = qeth_setadpparms_set_access_ctrl(card, - card->options.isolation, fallback); - if (rc) { - QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", - rc, CARD_DEVID(card)); - rc = -EOPNOTSUPP; - } - } else if (card->options.isolation != ISOLATION_MODE_NONE) { - card->options.isolation = ISOLATION_MODE_NONE; - - dev_err(&card->gdev->dev, "Adapter does not " - "support QDIO data connection isolation\n"); - rc = -EOPNOTSUPP; + NULL); + if (rc) { + QETH_CARD_TEXT_(card, 2, "rc=%d", rc); + QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", + rc, CARD_DEVID(card)); } + return rc; } @@ -4850,26 +4836,24 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) } static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) + struct qeth_reply *reply, + unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; - struct qeth_qoat_priv *priv; - char *resdata; + struct qeth_qoat_priv *priv = reply->param; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); if (qeth_setadpparms_inspect_rc(cmd)) return -EIO; - priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; - resdata = (char *)data + 28; if (resdatalen > (priv->buffer_len - priv->response_len)) return -ENOSPC; - memcpy((priv->buffer + priv->response_len), resdata, - resdatalen); + memcpy(priv->buffer + priv->response_len, + &cmd->data.setadapterparms.hdr, resdatalen); priv->response_len += resdatalen; if (cmd->data.setadapterparms.hdr.seq_no < @@ -4890,24 +4874,17 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) QETH_CARD_TEXT(card, 3, "qoatcmd"); - if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { - rc = -EOPNOTSUPP; - goto out; - } + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) + return -EOPNOTSUPP; - if (copy_from_user(&oat_data, udata, - sizeof(struct qeth_query_oat_data))) { - rc = -EFAULT; - goto out; - } + if (copy_from_user(&oat_data, udata, sizeof(oat_data))) + return -EFAULT; priv.buffer_len = oat_data.buffer_len; priv.response_len = 0; priv.buffer = vzalloc(oat_data.buffer_len); - if (!priv.buffer) { - rc = -ENOMEM; - goto out; - } + if (!priv.buffer) + return -ENOMEM; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, SETADP_DATA_SIZEOF(query_oat)); @@ -4919,30 +4896,19 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; - rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, - &priv); + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { - if (is_compat_task()) - tmp = compat_ptr(oat_data.ptr); - else - tmp = (void __user *)(unsigned long)oat_data.ptr; - - if (copy_to_user(tmp, priv.buffer, - priv.response_len)) { - rc = -EFAULT; - goto out_free; - } - + tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : + u64_to_user_ptr(oat_data.ptr); oat_data.response_len = priv.response_len; - if (copy_to_user(udata, &oat_data, - sizeof(struct qeth_query_oat_data))) + if (copy_to_user(tmp, priv.buffer, priv.response_len) || + copy_to_user(udata, &oat_data, sizeof(oat_data))) rc = -EFAULT; } out_free: vfree(priv.buffer); -out: return rc; } @@ -5331,9 +5297,12 @@ retriable: (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) card->info.hwtrap = 0; - rc = qeth_set_access_ctrl_online(card, 0); - if (rc) - goto out; + if (card->options.isolation != ISOLATION_MODE_NONE) { + rc = qeth_setadpparms_set_access_ctrl(card, + card->options.isolation); + if (rc) + goto out; + } rc = qeth_init_qdio_queues(card); if (rc) { @@ -6841,7 +6810,7 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, /* Traffic with local next-hop is not eligible for some offloads: */ if (skb->ip_summed == CHECKSUM_PARTIAL && - card->options.isolation != ISOLATION_MODE_FWD) { + READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { netdev_features_t restricted = 0; if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index e3f4866c158e..68c2588b9dcc 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -10,7 +10,7 @@ #include <asm/cio.h> #include "qeth_core_mpc.h" -unsigned char IDX_ACTIVATE_READ[] = { +const unsigned char IDX_ACTIVATE_READ[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, @@ -18,7 +18,7 @@ unsigned char IDX_ACTIVATE_READ[] = { 0x00, 0x00 }; -unsigned char IDX_ACTIVATE_WRITE[] = { +const unsigned char IDX_ACTIVATE_WRITE[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, @@ -26,7 +26,7 @@ unsigned char IDX_ACTIVATE_WRITE[] = { 0x00, 0x00 }; -unsigned char CM_ENABLE[] = { +const unsigned char CM_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, 0x10, 0x00, 0x00, 0x01, @@ -45,7 +45,7 @@ unsigned char CM_ENABLE[] = { 0xff, 0xff, 0xff }; -unsigned char CM_SETUP[] = { +const unsigned char CM_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0x10, 0x00, 0x00, 0x01, @@ -65,7 +65,7 @@ unsigned char CM_SETUP[] = { 0x04, 0x06, 0xc8, 0x00 }; -unsigned char ULP_ENABLE[] = { +const unsigned char ULP_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b, 0x10, 0x00, 0x00, 0x01, @@ -85,7 +85,7 @@ unsigned char ULP_ENABLE[] = { 0xf1, 0x00, 0x00 }; -unsigned char ULP_SETUP[] = { +const unsigned char ULP_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c, 0x10, 0x00, 0x00, 0x01, @@ -107,7 +107,7 @@ unsigned char ULP_SETUP[] = { 0x00, 0x00, 0x00, 0x00 }; -unsigned char DM_ACT[] = { +const unsigned char DM_ACT[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55, 0x10, 0x00, 0x00, 0x01, @@ -123,7 +123,7 @@ unsigned char DM_ACT[] = { 0x05, 0x40, 0x01, 0x01, 0x00 }; -unsigned char IPA_PDU_HEADER[] = { +const unsigned char IPA_PDU_HEADER[] = { 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 9d6f39d8f9ab..b459def0fb26 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -13,13 +13,13 @@ #include <uapi/linux/if_ether.h> #include <uapi/linux/in6.h> +extern const unsigned char IPA_PDU_HEADER[]; #define IPA_PDU_HEADER_SIZE 0x40 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e) #define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26) #define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29) #define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a) -extern unsigned char IPA_PDU_HEADER[]; #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_SEQ_NO_LENGTH 4 @@ -858,7 +858,7 @@ extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); /* END OF IP Assist related definitions */ /*****************************************************************************/ -extern unsigned char CM_ENABLE[]; +extern const unsigned char CM_ENABLE[]; #define CM_ENABLE_SIZE 0x63 #define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c) #define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) @@ -868,7 +868,7 @@ extern unsigned char CM_ENABLE[]; (PDU_ENCAPSULATION(buffer) + 0x13) -extern unsigned char CM_SETUP[]; +extern const unsigned char CM_SETUP[]; #define CM_SETUP_SIZE 0x64 #define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -877,7 +877,7 @@ extern unsigned char CM_SETUP[]; #define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ (PDU_ENCAPSULATION(buffer) + 0x1a) -extern unsigned char ULP_ENABLE[]; +extern const unsigned char ULP_ENABLE[]; #define ULP_ENABLE_SIZE 0x6b #define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61) #define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c) @@ -898,7 +898,7 @@ extern unsigned char ULP_ENABLE[]; #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50) #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19) -extern unsigned char ULP_SETUP[]; +extern const unsigned char ULP_SETUP[]; #define ULP_SETUP_SIZE 0x6c #define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -910,7 +910,7 @@ extern unsigned char ULP_SETUP[]; (PDU_ENCAPSULATION(buffer) + 0x1a) -extern unsigned char DM_ACT[]; +extern const unsigned char DM_ACT[]; #define DM_ACT_SIZE 0x55 #define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -921,9 +921,8 @@ extern unsigned char DM_ACT[]; #define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c) #define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20) -extern unsigned char IDX_ACTIVATE_READ[]; -extern unsigned char IDX_ACTIVATE_WRITE[]; - +extern const unsigned char IDX_ACTIVATE_READ[]; +extern const unsigned char IDX_ACTIVATE_WRITE[]; #define IDX_ACTIVATE_SIZE 0x22 #define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b) #define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index c901c942fed7..8def82336f53 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -448,19 +448,17 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, rc = -EINVAL; goto out; } - rc = count; - - /* defer IP assist if device is offline (until discipline->set_online)*/ - card->options.prev_isolation = card->options.isolation; - card->options.isolation = isolation; - if (qeth_card_hw_is_reachable(card)) { - int ipa_rc = qeth_set_access_ctrl_online(card, 1); - if (ipa_rc != 0) - rc = ipa_rc; - } + + if (qeth_card_hw_is_reachable(card)) + rc = qeth_setadpparms_set_access_ctrl(card, isolation); + + if (!rc) + WRITE_ONCE(card->options.isolation, isolation); + out: mutex_unlock(&card->conf_mutex); - return rc; + + return rc ? rc : count; } static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2d3bca3c0141..ef7a2db7a724 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -156,7 +156,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) struct hlist_node *tmp; int i; - hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { hash_del(&mac->hnode); kfree(mac); } @@ -438,7 +438,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); struct qeth_mac *mac; - hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { + hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) { if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) { mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; return; @@ -452,7 +452,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) ether_addr_copy(mac->mac_addr, ha->addr); mac->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->mac_htable, &mac->hnode, mac_hash); + hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash); } static void qeth_l2_rx_mode_work(struct work_struct *work) @@ -475,7 +475,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) qeth_l2_add_mac(card, ha); netif_addr_unlock_bh(dev); - hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { switch (mac->disp_flag) { case QETH_DISP_ADDR_DELETE: qeth_l2_remove_mac(card, mac->mac_addr); @@ -601,7 +601,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) return rc; } - hash_init(card->mac_htable); INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); return 0; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 1e50aa0297a3..15a12487ff7a 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -58,7 +58,7 @@ static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, struct qeth_ipaddr *addr; if (query->is_multicast) { - hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key) if (qeth_l3_addr_match_ip(addr, query)) return addr; } else { @@ -239,7 +239,7 @@ static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) struct hlist_node *tmp; int i; - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { hash_del(&addr->hnode); kfree(addr); } @@ -1093,7 +1093,7 @@ static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) if (!ipm) continue; - hash_add(card->ip_mc_htable, &ipm->hnode, + hash_add(card->rx_mode_addrs, &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); } @@ -1124,8 +1124,8 @@ walk_ipv6: if (!ipm) continue; - hash_add(card->ip_mc_htable, - &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + hash_add(card->rx_mode_addrs, &ipm->hnode, + qeth_l3_ipaddr_hash(ipm)); } read_unlock_bh(&in6_dev->lock); @@ -1219,7 +1219,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card); rtnl_unlock(); - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { switch (addr->disp_flag) { case QETH_DISP_ADDR_DELETE: rc = qeth_l3_deregister_addr_entry(card, addr); @@ -1919,12 +1919,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return rc; if (IS_OSD(card) || IS_OSX(card)) { - if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || - (card->info.link_type == QETH_LINK_TYPE_HSTR)) { - pr_info("qeth_l3: ignoring TR device\n"); - return -ENODEV; - } - card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; /*IPv6 address autoconfiguration stuff*/ @@ -2004,7 +1998,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) } } - hash_init(card->ip_mc_htable); INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); return 0; } diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b3a8d3054af0..6479a38e52fa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -49,6 +49,7 @@ struct br_ip_list { #define BR_ISOLATED BIT(16) #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) +#define BR_MRP_LOST_IN_CONT BIT(19) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index b8c059b4e06d..ff2246914301 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -76,6 +76,10 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_RING_TEST_MRP, SWITCHDEV_OBJ_ID_RING_ROLE_MRP, SWITCHDEV_OBJ_ID_RING_STATE_MRP, + SWITCHDEV_OBJ_ID_IN_TEST_MRP, + SWITCHDEV_OBJ_ID_IN_ROLE_MRP, + SWITCHDEV_OBJ_ID_IN_STATE_MRP, + #endif }; @@ -155,6 +159,40 @@ struct switchdev_obj_ring_state_mrp { #define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \ container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj) +/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */ +struct switchdev_obj_in_test_mrp { + struct switchdev_obj obj; + /* The value is in us and a value of 0 represents to stop */ + u32 interval; + u32 in_id; + u32 period; + u8 max_miss; +}; + +#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_test_mrp, obj) + +/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */ +struct switchdev_obj_in_role_mrp { + struct switchdev_obj obj; + struct net_device *i_port; + u32 ring_id; + u16 in_id; + u8 in_role; +}; + +#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_role_mrp, obj) + +struct switchdev_obj_in_state_mrp { + struct switchdev_obj obj; + u32 in_id; + u8 in_state; +}; + +#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_state_mrp, obj) + #endif typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c114c1c2bd53..c1227aecd38f 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -167,6 +167,9 @@ enum { IFLA_BRIDGE_MRP_RING_ROLE, IFLA_BRIDGE_MRP_START_TEST, IFLA_BRIDGE_MRP_INFO, + IFLA_BRIDGE_MRP_IN_ROLE, + IFLA_BRIDGE_MRP_IN_STATE, + IFLA_BRIDGE_MRP_START_IN_TEST, __IFLA_BRIDGE_MRP_MAX, }; @@ -240,11 +243,47 @@ enum { IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + IFLA_BRIDGE_MRP_INFO_I_IFINDEX, + IFLA_BRIDGE_MRP_INFO_IN_STATE, + IFLA_BRIDGE_MRP_INFO_IN_ROLE, + IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, __IFLA_BRIDGE_MRP_INFO_MAX, }; #define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1) +enum { + IFLA_BRIDGE_MRP_IN_STATE_UNSPEC, + IFLA_BRIDGE_MRP_IN_STATE_IN_ID, + IFLA_BRIDGE_MRP_IN_STATE_STATE, + __IFLA_BRIDGE_MRP_IN_STATE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC, + IFLA_BRIDGE_MRP_IN_ROLE_RING_ID, + IFLA_BRIDGE_MRP_IN_ROLE_IN_ID, + IFLA_BRIDGE_MRP_IN_ROLE_ROLE, + IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX, + __IFLA_BRIDGE_MRP_IN_ROLE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC, + IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID, + IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD, + __IFLA_BRIDGE_MRP_START_IN_TEST_MAX, +}; + +#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1) + struct br_mrp_instance { __u32 ring_id; __u32 p_ifindex; @@ -270,6 +309,25 @@ struct br_mrp_start_test { __u32 monitor; }; +struct br_mrp_in_state { + __u32 in_state; + __u16 in_id; +}; + +struct br_mrp_in_role { + __u32 ring_id; + __u32 in_role; + __u32 i_ifindex; + __u16 in_id; +}; + +struct br_mrp_start_in_test { + __u32 interval; + __u32 max_miss; + __u32 period; + __u16 in_id; +}; + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cc185a007ade..26842ffd0501 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h index bee366540212..6aeb13ef0b1e 100644 --- a/include/uapi/linux/mrp_bridge.h +++ b/include/uapi/linux/mrp_bridge.h @@ -21,11 +21,22 @@ enum br_mrp_ring_role_type { BR_MRP_RING_ROLE_MRA, }; +enum br_mrp_in_role_type { + BR_MRP_IN_ROLE_DISABLED, + BR_MRP_IN_ROLE_MIC, + BR_MRP_IN_ROLE_MIM, +}; + enum br_mrp_ring_state_type { BR_MRP_RING_STATE_OPEN, BR_MRP_RING_STATE_CLOSED, }; +enum br_mrp_in_state_type { + BR_MRP_IN_STATE_OPEN, + BR_MRP_IN_STATE_CLOSED, +}; + enum br_mrp_port_state_type { BR_MRP_PORT_STATE_DISABLED, BR_MRP_PORT_STATE_BLOCKED, @@ -36,6 +47,7 @@ enum br_mrp_port_state_type { enum br_mrp_port_role_type { BR_MRP_PORT_ROLE_PRIMARY, BR_MRP_PORT_ROLE_SECONDARY, + BR_MRP_PORT_ROLE_INTER, }; enum br_mrp_tlv_header_type { @@ -45,6 +57,10 @@ enum br_mrp_tlv_header_type { BR_MRP_TLV_HEADER_RING_TOPO = 0x3, BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4, BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5, + BR_MRP_TLV_HEADER_IN_TEST = 0x6, + BR_MRP_TLV_HEADER_IN_TOPO = 0x7, + BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8, + BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9, BR_MRP_TLV_HEADER_OPTION = 0x7f, }; @@ -118,4 +134,26 @@ struct br_mrp_oui_hdr { __u8 oui[MRP_OUI_LENGTH]; }; +struct br_mrp_in_test_hdr { + __be16 id; + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 state; + __be16 transitions; + __be32 timestamp; +}; + +struct br_mrp_in_topo_hdr { + __u8 sa[ETH_ALEN]; + __be16 id; + __be16 interval; +}; + +struct br_mrp_in_link_hdr { + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 id; + __be16 interval; +}; + #endif diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index 90592af9db61..b36689e6e7cb 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -4,6 +4,27 @@ #include "br_private_mrp.h" static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 }; +static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 }; + +static bool br_mrp_is_ring_port(struct net_bridge_port *p_port, + struct net_bridge_port *s_port, + struct net_bridge_port *port) +{ + if (port == p_port || + port == s_port) + return true; + + return false; +} + +static bool br_mrp_is_in_port(struct net_bridge_port *i_port, + struct net_bridge_port *port) +{ + if (port == i_port) + return true; + + return false; +} static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br, u32 ifindex) @@ -37,6 +58,22 @@ static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id) return res; } +static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id) +{ + struct br_mrp *res = NULL; + struct br_mrp *mrp; + + list_for_each_entry_rcu(mrp, &br->mrp_list, list, + lockdep_rtnl_is_held()) { + if (mrp->in_id == in_id) { + res = mrp; + break; + } + } + + return res; +} + static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) { struct br_mrp *mrp; @@ -52,6 +89,10 @@ static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) p = rtnl_dereference(mrp->s_port); if (p && p->dev->ifindex == ifindex) return false; + + p = rtnl_dereference(mrp->i_port); + if (p && p->dev->ifindex == ifindex) + return false; } return true; @@ -66,7 +107,8 @@ static struct br_mrp *br_mrp_find_port(struct net_bridge *br, list_for_each_entry_rcu(mrp, &br->mrp_list, list, lockdep_rtnl_is_held()) { if (rcu_access_pointer(mrp->p_port) == p || - rcu_access_pointer(mrp->s_port) == p) { + rcu_access_pointer(mrp->s_port) == p || + rcu_access_pointer(mrp->i_port) == p) { res = mrp; break; } @@ -160,6 +202,36 @@ static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp, return skb; } +static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp, + struct net_bridge_port *p, + enum br_mrp_port_role_type port_role) +{ + struct br_mrp_in_test_hdr *hdr = NULL; + struct sk_buff *skb = NULL; + + if (!p) + return NULL; + + skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac); + if (!skb) + return NULL; + + br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr)); + hdr = skb_put(skb, sizeof(*hdr)); + + hdr->id = cpu_to_be16(mrp->in_id); + ether_addr_copy(hdr->sa, p->br->dev->dev_addr); + hdr->port_role = cpu_to_be16(port_role); + hdr->state = cpu_to_be16(mrp->in_state); + hdr->transitions = cpu_to_be16(mrp->in_transitions); + hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); + + br_mrp_skb_common(skb, mrp); + br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); + + return skb; +} + /* This function is continuously called in the following cases: * - when node role is MRM, in this case test_monitor is always set to false * because it needs to notify the userspace that the ring is open and needs to @@ -213,7 +285,7 @@ static void br_mrp_test_work_expired(struct work_struct *work) } if (notify_open && !mrp->ring_role_offloaded) - br_mrp_port_open(p->dev, true); + br_mrp_ring_port_open(p->dev, true); } p = rcu_dereference(mrp->s_port); @@ -229,7 +301,7 @@ static void br_mrp_test_work_expired(struct work_struct *work) } if (notify_open && !mrp->ring_role_offloaded) - br_mrp_port_open(p->dev, true); + br_mrp_ring_port_open(p->dev, true); } out: @@ -239,6 +311,83 @@ out: usecs_to_jiffies(mrp->test_interval)); } +/* This function is continuously called when the node has the interconnect role + * MIM. It would generate interconnect test frames and will send them on all 3 + * ports. But will also check if it stop receiving interconnect test frames. + */ +static void br_mrp_in_test_work_expired(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work); + struct net_bridge_port *p; + bool notify_open = false; + struct sk_buff *skb; + + if (time_before_eq(mrp->in_test_end, jiffies)) + return; + + if (mrp->in_test_count_miss < mrp->in_test_max_miss) { + mrp->in_test_count_miss++; + } else { + /* Notify that the interconnect ring is open only if the + * interconnect ring state is closed, otherwise it would + * continue to notify at every interval. + */ + if (mrp->in_state == BR_MRP_IN_STATE_CLOSED) + notify_open = true; + } + + rcu_read_lock(); + + p = rcu_dereference(mrp->p_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_PRIMARY); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + + p = rcu_dereference(mrp->s_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_SECONDARY); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + + p = rcu_dereference(mrp->i_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_INTER); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + +out: + rcu_read_unlock(); + + queue_delayed_work(system_wq, &mrp->in_test_work, + usecs_to_jiffies(mrp->in_test_interval)); +} + /* Deletes the MRP instance. * note: called under rtnl_lock */ @@ -251,6 +400,10 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) cancel_delayed_work_sync(&mrp->test_work); br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0); + /* Stop sending MRP_InTest frames if has an interconnect role */ + cancel_delayed_work_sync(&mrp->in_test_work); + br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); + br_mrp_switchdev_del(br, mrp); /* Reset the ports */ @@ -278,6 +431,18 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) rcu_assign_pointer(mrp->s_port, NULL); } + p = rtnl_dereference(mrp->i_port); + if (p) { + spin_lock_bh(&br->lock); + state = netif_running(br->dev) ? + BR_STATE_FORWARDING : BR_STATE_DISABLED; + p->state = state; + p->flags &= ~BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + br_mrp_port_switchdev_set_state(p, state); + rcu_assign_pointer(mrp->i_port, NULL); + } + list_del_rcu(&mrp->list); kfree_rcu(mrp, rcu); } @@ -329,6 +494,7 @@ int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance) rcu_assign_pointer(mrp->s_port, p); INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired); + INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired); list_add_tail_rcu(&mrp->list, &br->mrp_list); err = br_mrp_switchdev_add(br, mrp); @@ -511,6 +677,180 @@ int br_mrp_start_test(struct net_bridge *br, return 0; } +/* Set in state, int state can be only Open or Closed + * note: already called with rtnl_lock + */ +int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state) +{ + struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id); + + if (!mrp) + return -EINVAL; + + if (mrp->in_state == BR_MRP_IN_STATE_CLOSED && + state->in_state != BR_MRP_IN_STATE_CLOSED) + mrp->in_transitions++; + + mrp->in_state = state->in_state; + + br_mrp_switchdev_set_in_state(br, mrp, state->in_state); + + return 0; +} + +/* Set in role, in role can be only MIM(Media Interconnection Manager) or + * MIC(Media Interconnection Client). + * note: already called with rtnl_lock + */ +int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role) +{ + struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); + struct net_bridge_port *p; + int err; + + if (!mrp) + return -EINVAL; + + if (!br_mrp_get_port(br, role->i_ifindex)) + return -EINVAL; + + if (role->in_role == BR_MRP_IN_ROLE_DISABLED) { + u8 state; + + /* It is not allowed to disable a port that doesn't exist */ + p = rtnl_dereference(mrp->i_port); + if (!p) + return -EINVAL; + + /* Stop the generating MRP_InTest frames */ + cancel_delayed_work_sync(&mrp->in_test_work); + br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); + + /* Remove the port */ + spin_lock_bh(&br->lock); + state = netif_running(br->dev) ? + BR_STATE_FORWARDING : BR_STATE_DISABLED; + p->state = state; + p->flags &= ~BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + br_mrp_port_switchdev_set_state(p, state); + rcu_assign_pointer(mrp->i_port, NULL); + + mrp->in_role = role->in_role; + mrp->in_id = 0; + + return 0; + } + + /* It is not possible to have the same port part of multiple rings */ + if (!br_mrp_unique_ifindex(br, role->i_ifindex)) + return -EINVAL; + + /* It is not allowed to set a different interconnect port if the mrp + * instance has already one. First it needs to be disabled and after + * that set the new port + */ + if (rcu_access_pointer(mrp->i_port)) + return -EINVAL; + + p = br_mrp_get_port(br, role->i_ifindex); + spin_lock_bh(&br->lock); + p->state = BR_STATE_FORWARDING; + p->flags |= BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + rcu_assign_pointer(mrp->i_port, p); + + mrp->in_role = role->in_role; + mrp->in_id = role->in_id; + + /* If there is an error just bailed out */ + err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id, + role->ring_id, role->in_role); + if (err && err != -EOPNOTSUPP) + return err; + + /* Now detect if the HW actually applied the role or not. If the HW + * applied the role it means that the SW will not to do those operations + * anymore. For example if the role is MIM then the HW will notify the + * SW when interconnect ring is open, but if the is not pushed to the HW + * the SW will need to detect when the interconnect ring is open. + */ + mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; + + return 0; +} + +/* Start to generate MRP_InTest frames, the frames are generated by + * HW and if it fails, they are generated by the SW. + * note: already called with rtnl_lock + */ +int br_mrp_start_in_test(struct net_bridge *br, + struct br_mrp_start_in_test *in_test) +{ + struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id); + + if (!mrp) + return -EINVAL; + + if (mrp->in_role != BR_MRP_IN_ROLE_MIM) + return -EINVAL; + + /* Try to push it to the HW and if it fails then continue with SW + * implementation and if that also fails then return error. + */ + if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval, + in_test->max_miss, in_test->period)) + return 0; + + mrp->in_test_interval = in_test->interval; + mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period); + mrp->in_test_max_miss = in_test->max_miss; + mrp->in_test_count_miss = 0; + queue_delayed_work(system_wq, &mrp->in_test_work, + usecs_to_jiffies(in_test->interval)); + + return 0; +} + +/* Determin if the frame type is a ring frame */ +static bool br_mrp_ring_frame(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST || + hdr->type == BR_MRP_TLV_HEADER_RING_TOPO || + hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN || + hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP || + hdr->type == BR_MRP_TLV_HEADER_OPTION) + return true; + + return false; +} + +/* Determin if the frame type is an interconnect frame */ +static bool br_mrp_in_frame(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST || + hdr->type == BR_MRP_TLV_HEADER_IN_TOPO || + hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN || + hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP) + return true; + + return false; +} + /* Process only MRP Test frame. All the other MRP frames are processed by * userspace application * note: already called with rcu_read_lock @@ -537,7 +877,7 @@ static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port, * not closed */ if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED) - br_mrp_port_open(port->dev, false); + br_mrp_ring_port_open(port->dev, false); } /* Determin if the test hdr has a better priority than the node */ @@ -591,17 +931,92 @@ static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br, mrp->test_count_miss = 0; } -/* This will just forward the frame to the other mrp ring port(MRC role) or will - * not do anything. +/* Process only MRP InTest frame. All the other MRP frames are processed by + * userspace application + * note: already called with rcu_read_lock + */ +static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port, + struct sk_buff *skb) +{ + const struct br_mrp_in_test_hdr *in_hdr; + struct br_mrp_in_test_hdr _in_hdr; + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + /* Each MRP header starts with a version field which is 16 bits. + * Therefore skip the version and get directly the TLV header. + */ + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + /* The check for InTest frame type was already done */ + in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), + sizeof(_in_hdr), &_in_hdr); + if (!in_hdr) + return false; + + /* It needs to process only it's own InTest frames. */ + if (mrp->in_id != ntohs(in_hdr->id)) + return false; + + mrp->in_test_count_miss = 0; + + /* Notify the userspace that the ring is closed only when the ring is + * not closed + */ + if (mrp->in_state != BR_MRP_IN_STATE_CLOSED) + br_mrp_in_port_open(port->dev, false); + + return true; +} + +/* Get the MRP frame type + * note: already called with rcu_read_lock + */ +static u8 br_mrp_get_frame_type(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + /* Each MRP header starts with a version field which is 16 bits. + * Therefore skip the version and get directly the TLV header. + */ + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return 0xff; + + return hdr->type; +} + +static bool br_mrp_mrm_behaviour(struct br_mrp *mrp) +{ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRM || + (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor)) + return true; + + return false; +} + +static bool br_mrp_mrc_behaviour(struct br_mrp *mrp) +{ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRC || + (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor)) + return true; + + return false; +} + +/* This will just forward the frame to the other mrp ring ports, depending on + * the frame type, ring role and interconnect role * note: already called with rcu_read_lock */ static int br_mrp_rcv(struct net_bridge_port *p, struct sk_buff *skb, struct net_device *dev) { - struct net_device *s_dev, *p_dev, *d_dev; - struct net_bridge_port *p_port, *s_port; + struct net_bridge_port *p_port, *s_port, *i_port = NULL; + struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL; struct net_bridge *br; - struct sk_buff *nskb; struct br_mrp *mrp; /* If port is disabled don't accept any frames */ @@ -616,46 +1031,139 @@ static int br_mrp_rcv(struct net_bridge_port *p, p_port = rcu_dereference(mrp->p_port); if (!p_port) return 0; + p_dst = p_port; s_port = rcu_dereference(mrp->s_port); if (!s_port) return 0; + s_dst = s_port; - /* If the role is MRM then don't forward the frames */ - if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { - br_mrp_mrm_process(mrp, p, skb); - return 1; - } - - /* If the role is MRA then don't forward the frames if it behaves as - * MRM node + /* If the frame is a ring frame then it is not required to check the + * interconnect role and ports to process or forward the frame */ - if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { - if (!mrp->test_monitor) { + if (br_mrp_ring_frame(skb)) { + /* If the role is MRM then don't forward the frames */ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { br_mrp_mrm_process(mrp, p, skb); - return 1; + goto no_forward; } - br_mrp_mra_process(mrp, br, p, skb); + /* If the role is MRA then don't forward the frames if it + * behaves as MRM node + */ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { + if (!mrp->test_monitor) { + br_mrp_mrm_process(mrp, p, skb); + goto no_forward; + } + + br_mrp_mra_process(mrp, br, p, skb); + } + + goto forward; } - /* Clone the frame and forward it on the other MRP port */ - nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - return 0; + if (br_mrp_in_frame(skb)) { + u8 in_type = br_mrp_get_frame_type(skb); - p_dev = p_port->dev; - s_dev = s_port->dev; + i_port = rcu_dereference(mrp->i_port); + i_dst = i_port; - if (p_dev == dev) - d_dev = s_dev; - else - d_dev = p_dev; + /* If the ring port is in block state it should not forward + * In_Test frames + */ + if (br_mrp_is_ring_port(p_port, s_port, p) && + p->state == BR_STATE_BLOCKING && + in_type == BR_MRP_TLV_HEADER_IN_TEST) + goto no_forward; + + /* Nodes that behaves as MRM needs to stop forwarding the + * frames in case the ring is closed, otherwise will be a loop. + * In this case the frame is no forward between the ring ports. + */ + if (br_mrp_mrm_behaviour(mrp) && + br_mrp_is_ring_port(p_port, s_port, p) && + (s_port->state != BR_STATE_FORWARDING || + p_port->state != BR_STATE_FORWARDING)) { + p_dst = NULL; + s_dst = NULL; + } + + /* A node that behaves as MRC and doesn't have a interconnect + * role then it should forward all frames between the ring ports + * because it doesn't have an interconnect port + */ + if (br_mrp_mrc_behaviour(mrp) && + mrp->in_role == BR_MRP_IN_ROLE_DISABLED) + goto forward; + + if (mrp->in_role == BR_MRP_IN_ROLE_MIM) { + if (in_type == BR_MRP_TLV_HEADER_IN_TEST) { + /* MIM should not forward it's own InTest + * frames + */ + if (br_mrp_mim_process(mrp, p, skb)) { + goto no_forward; + } else { + if (br_mrp_is_ring_port(p_port, s_port, + p)) + i_dst = NULL; + + if (br_mrp_is_in_port(i_port, p)) + goto no_forward; + } + } else { + /* MIM should forward IntLinkChange and + * IntTopoChange between ring ports but MIM + * should not forward IntLinkChange and + * IntTopoChange if the frame was received at + * the interconnect port + */ + if (br_mrp_is_ring_port(p_port, s_port, p)) + i_dst = NULL; + + if (br_mrp_is_in_port(i_port, p)) + goto no_forward; + } + } + + if (mrp->in_role == BR_MRP_IN_ROLE_MIC) { + /* MIC should forward InTest frames on all ports + * regardless of the received port + */ + if (in_type == BR_MRP_TLV_HEADER_IN_TEST) + goto forward; + + /* MIC should forward IntLinkChange frames only if they + * are received on ring ports to all the ports + */ + if (br_mrp_is_ring_port(p_port, s_port, p) && + (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP || + in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN)) + goto forward; + + /* Should forward the InTopo frames only between the + * ring ports + */ + if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) { + i_dst = NULL; + goto forward; + } + + /* In all the other cases don't forward the frames */ + goto no_forward; + } + } - nskb->dev = d_dev; - skb_push(nskb, ETH_HLEN); - dev_queue_xmit(nskb); +forward: + if (p_dst) + br_forward(p_dst, skb, true, false); + if (s_dst) + br_forward(s_dst, skb, true, false); + if (i_dst) + br_forward(i_dst, skb, true, false); +no_forward: return 1; } diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index c4f5c356811f..2a2fdf3500c5 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -14,6 +14,9 @@ static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = { [IFLA_BRIDGE_MRP_RING_STATE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_RING_ROLE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_START_TEST] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_IN_ROLE] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_IN_STATE] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -235,6 +238,121 @@ static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr, return br_mrp_start_test(br, &test); } +static const struct nla_policy +br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = { + [IFLA_BRIDGE_MRP_IN_STATE_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_IN_STATE_IN_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_STATE_STATE] = { .type = NLA_U32 }, +}; + +static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1]; + struct br_mrp_in_state state; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr, + br_mrp_in_state_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] || + !tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: IN_ID or STATE"); + return -EINVAL; + } + + memset(&state, 0x0, sizeof(state)); + + state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]); + state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]); + + return br_mrp_set_in_state(br, &state); +} + +static const struct nla_policy +br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = { + [IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] = { .type = NLA_U16 }, + [IFLA_BRIDGE_MRP_IN_ROLE_ROLE] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] = { .type = NLA_U32 }, +}; + +static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1]; + struct br_mrp_in_role role; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr, + br_mrp_in_role_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX"); + return -EINVAL; + } + + memset(&role, 0x0, sizeof(role)); + + role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]); + role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]); + role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]); + role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]); + + return br_mrp_set_in_role(br, &role); +} + +static const struct nla_policy +br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = { + [IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD] = { .type = NLA_U32 }, +}; + +static int br_mrp_start_in_test_parse(struct net_bridge *br, + struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1]; + struct br_mrp_start_in_test test; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr, + br_mrp_start_in_test_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD"); + return -EINVAL; + } + + memset(&test, 0x0, sizeof(test)); + + test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]); + test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]); + test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]); + test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]); + + return br_mrp_start_in_test(br, &test); +} + int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *attr, int cmd, struct netlink_ext_ack *extack) { @@ -301,6 +419,28 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, return err; } + if (tb[IFLA_BRIDGE_MRP_IN_STATE]) { + err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE], + extack); + if (err) + return err; + } + + if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) { + err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE], + extack); + if (err) + return err; + } + + if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) { + err = br_mrp_start_in_test_parse(br, + tb[IFLA_BRIDGE_MRP_START_IN_TEST], + extack); + if (err) + return err; + } + return 0; } @@ -334,6 +474,11 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) p->dev->ifindex)) goto nla_put_failure; + p = rcu_dereference(mrp->i_port); + if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX, + p->dev->ifindex)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO, mrp->prio)) goto nla_put_failure; @@ -353,6 +498,19 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) mrp->test_monitor)) goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE, + mrp->in_state)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE, + mrp->in_role)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, + mrp->in_test_interval)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, + mrp->in_test_max_miss)) + goto nla_put_failure; + nla_nest_end(skb, tb); } nla_nest_end(skb, mrp_tb); @@ -368,7 +526,7 @@ nla_info_failure: return -EMSGSIZE; } -int br_mrp_port_open(struct net_device *dev, u8 loc) +int br_mrp_ring_port_open(struct net_device *dev, u8 loc) { struct net_bridge_port *p; int err = 0; @@ -389,3 +547,25 @@ int br_mrp_port_open(struct net_device *dev, u8 loc) out: return err; } + +int br_mrp_in_port_open(struct net_device *dev, u8 loc) +{ + struct net_bridge_port *p; + int err = 0; + + p = br_port_get_rcu(dev); + if (!p) { + err = -EINVAL; + goto out; + } + + if (loc) + p->flags |= BR_MRP_LOST_IN_CONT; + else + p->flags &= ~BR_MRP_LOST_IN_CONT; + + br_ifinfo_notify(RTM_NEWLINK, NULL, p); + +out: + return err; +} diff --git a/net/bridge/br_mrp_switchdev.c b/net/bridge/br_mrp_switchdev.c index 0da68a0da4b5..ed547e03ace1 100644 --- a/net/bridge/br_mrp_switchdev.c +++ b/net/bridge/br_mrp_switchdev.c @@ -107,6 +107,68 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br, return 0; } +int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp, + u16 in_id, u32 ring_id, + enum br_mrp_in_role_type role) +{ + struct switchdev_obj_in_role_mrp mrp_role = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_ROLE_MRP, + .in_role = role, + .in_id = mrp->in_id, + .ring_id = mrp->ring_id, + .i_port = rtnl_dereference(mrp->i_port)->dev, + }; + int err; + + if (role == BR_MRP_IN_ROLE_DISABLED) + err = switchdev_port_obj_del(br->dev, &mrp_role.obj); + else + err = switchdev_port_obj_add(br->dev, &mrp_role.obj, NULL); + + return err; +} + +int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp, + enum br_mrp_in_state_type state) +{ + struct switchdev_obj_in_state_mrp mrp_state = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_STATE_MRP, + .in_state = state, + .in_id = mrp->in_id, + }; + int err; + + err = switchdev_port_obj_add(br->dev, &mrp_state.obj, NULL); + + if (err && err != -EOPNOTSUPP) + return err; + + return 0; +} + +int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp, + u32 interval, u8 max_miss, u32 period) +{ + struct switchdev_obj_in_test_mrp test = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_TEST_MRP, + .interval = interval, + .max_miss = max_miss, + .in_id = mrp->in_id, + .period = period, + }; + int err; + + if (interval == 0) + err = switchdev_port_obj_del(br->dev, &test.obj); + else + err = switchdev_port_obj_add(br->dev, &test.obj, NULL); + + return err; +} + int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, enum br_mrp_port_state_type state) { diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index c532fa65c983..147d52596e17 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -152,6 +152,7 @@ static inline size_t br_port_info_size(void) #endif + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */ + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */ + 0; } @@ -216,6 +217,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_NEIGH_SUPPRESS)) || nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags & BR_MRP_LOST_CONT)) || + nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, + !!(p->flags & BR_MRP_LOST_IN_CONT)) || nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) return -EMSGSIZE; diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 315eb37d89f0..af0e9eff6549 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -12,8 +12,10 @@ struct br_mrp { struct net_bridge_port __rcu *p_port; struct net_bridge_port __rcu *s_port; + struct net_bridge_port __rcu *i_port; u32 ring_id; + u16 in_id; u16 prio; enum br_mrp_ring_role_type ring_role; @@ -21,6 +23,11 @@ struct br_mrp { enum br_mrp_ring_state_type ring_state; u32 ring_transitions; + enum br_mrp_in_role_type in_role; + u8 in_role_offloaded; + enum br_mrp_in_state_type in_state; + u32 in_transitions; + struct delayed_work test_work; u32 test_interval; unsigned long test_end; @@ -28,6 +35,12 @@ struct br_mrp { u32 test_max_miss; bool test_monitor; + struct delayed_work in_test_work; + u32 in_test_interval; + unsigned long in_test_end; + u32 in_test_count_miss; + u32 in_test_max_miss; + u32 seq_id; struct rcu_head rcu; @@ -44,6 +57,10 @@ int br_mrp_set_ring_state(struct net_bridge *br, struct br_mrp_ring_state *state); int br_mrp_set_ring_role(struct net_bridge *br, struct br_mrp_ring_role *role); int br_mrp_start_test(struct net_bridge *br, struct br_mrp_start_test *test); +int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state); +int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role); +int br_mrp_start_in_test(struct net_bridge *br, + struct br_mrp_start_in_test *test); /* br_mrp_switchdev.c */ int br_mrp_switchdev_add(struct net_bridge *br, struct br_mrp *mrp); @@ -59,8 +76,16 @@ int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, enum br_mrp_port_state_type state); int br_mrp_port_switchdev_set_role(struct net_bridge_port *p, enum br_mrp_port_role_type role); +int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp, + u16 in_id, u32 ring_id, + enum br_mrp_in_role_type role); +int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp, + enum br_mrp_in_state_type state); +int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp, + u32 interval, u8 max_miss, u32 period); /* br_mrp_netlink.c */ -int br_mrp_port_open(struct net_device *dev, u8 loc); +int br_mrp_ring_port_open(struct net_device *dev, u8 loc); +int br_mrp_in_port_open(struct net_device *dev, u8 loc); #endif /* _BR_PRIVATE_MRP_H */ diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index cafedbbfefbe..781e482dc499 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) |