diff options
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/dev.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 107 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 143 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 38 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 59 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/main.c | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 3 | ||||
-rw-r--r-- | net/core/devlink.c | 3 |
13 files changed, 263 insertions, 147 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 50862275544e..1972ddd12704 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev) return found; } -int mlx5_register_device(struct mlx5_core_dev *dev) +void mlx5_register_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; @@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev) list_for_each_entry(intf, &intf_list, list) mlx5_add_device(intf, priv); mutex_unlock(&mlx5_intf_mutex); - - return 0; } void mlx5_unregister_device(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index b7bb81b8c49b..bdeb291f6b67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -90,7 +90,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, { struct mlx5_core_dev *dev = devlink_priv(devlink); - return mlx5_unload_one(dev, false); + mlx5_unload_one(dev, false); + return 0; } static int mlx5_devlink_reload_up(struct devlink *devlink, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a33d15156ed5..d7fa89276ea3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1246,8 +1246,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, case TC_SETUP_CLSFLOWER: memcpy(&tmp, f, sizeof(*f)); - if (!mlx5_esw_chains_prios_supported(esw) || - tmp.common.chain_index) + if (!mlx5_esw_chains_prios_supported(esw)) return -EOPNOTSUPP; /* Re-use tc offload path by moving the ft flow to the diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1b7d7f6509a0..901b5fa5568f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3058,7 +3058,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, */ NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct"); - return -EOPNOTSUPP; + return false; } } else { actions = flow->nic_attr->action; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8fc351240f4c..7f618a443bfd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2067,12 +2067,54 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) } } -int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) +static void +mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) +{ + const u32 *out; + + WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); + + if (num_vfs < 0) + return; + + if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { + esw->esw_funcs.num_vfs = num_vfs; + return; + } + + out = mlx5_esw_query_functions(esw->dev); + if (IS_ERR(out)) + return; + + esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, + host_params_context.host_num_of_vfs); + kvfree(out); +} + +/** + * mlx5_eswitch_enable_locked - Enable eswitch + * @esw: Pointer to eswitch + * @mode: Eswitch mode to enable + * @num_vfs: Enable eswitch for given number of VFs. This is optional. + * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. + * Caller should pass num_vfs > 0 when enabling eswitch for + * vf vports. Caller should pass num_vfs = 0, when eswitch + * is enabled without sriov VFs or when caller + * is unaware of the sriov state of the host PF on ECPF based + * eswitch. Caller should pass < 0 when num_vfs should be + * completely ignored. This is typically the case when eswitch + * is enabled without sriov regardless of PF/ECPF system. + * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads + * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. + * It returns 0 on success or error code on failure. + */ +int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) { int err; - if (!ESW_ALLOWED(esw) || - !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { + lockdep_assert_held(&esw->mode_lock); + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; } @@ -2085,6 +2127,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) mlx5_eswitch_get_devlink_param(esw); + mlx5_eswitch_update_num_of_vfs(esw, num_vfs); + esw_create_tsar(esw); esw->mode = mode; @@ -2121,11 +2165,34 @@ abort: return err; } -void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) +/** + * mlx5_eswitch_enable - Enable eswitch + * @esw: Pointer to eswitch + * @num_vfs: Enable eswitch swich for given number of VFs. + * Caller must pass num_vfs > 0 when enabling eswitch for + * vf vports. + * mlx5_eswitch_enable() returns 0 on success or error code on failure. + */ +int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) +{ + int ret; + + if (!ESW_ALLOWED(esw)) + return 0; + + mutex_lock(&esw->mode_lock); + ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); + mutex_unlock(&esw->mode_lock); + return ret; +} + +void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) { int old_mode; - if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) + lockdep_assert_held_write(&esw->mode_lock); + + if (esw->mode == MLX5_ESWITCH_NONE) return; esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", @@ -2154,6 +2221,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); } +void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) +{ + if (!ESW_ALLOWED(esw)) + return; + + mutex_lock(&esw->mode_lock); + mlx5_eswitch_disable_locked(esw, clear_vf); + mutex_unlock(&esw->mode_lock); +} + int mlx5_eswitch_init(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw; @@ -2205,6 +2282,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) hash_init(esw->offloads.mod_hdr.hlist); atomic64_set(&esw->offloads.num_flows, 0); mutex_init(&esw->state_lock); + mutex_init(&esw->mode_lock); mlx5_esw_for_all_vports(esw, i, vport) { vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); @@ -2239,6 +2317,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); esw_offloads_cleanup_reps(esw); + mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); mutex_destroy(&esw->offloads.mod_hdr.lock); mutex_destroy(&esw->offloads.encap_tbl_lock); @@ -2811,22 +2890,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); } -void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) -{ - const u32 *out; - WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); - - if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { - esw->esw_funcs.num_vfs = num_vfs; - return; - } - - out = mlx5_esw_query_functions(esw->dev); - if (IS_ERR(out)) - return; - - esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, - host_params_context.host_num_of_vfs); - kvfree(out); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 95532b258c2b..39f42f985fbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -258,6 +258,11 @@ struct mlx5_eswitch { */ struct mutex state_lock; + /* Protects eswitch mode change that occurs via one or more + * user commands, i.e. sriov state change, devlink commands. + */ + struct mutex mode_lock; + struct { bool enabled; u32 root_tsar_id; @@ -296,7 +301,11 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); -int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode); + +#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) +int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); +int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); +void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, u8 mac[ETH_ALEN]); @@ -635,7 +644,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); -void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int @@ -673,7 +681,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} -static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } +static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } @@ -682,14 +690,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) return ERR_PTR(-EOPNOTSUPP); } -static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} - static inline struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { return ERR_PTR(-EOPNOTSUPP); } - #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0b4b43ebae9a..612bc7d1cdcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1069,6 +1069,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) struct mlx5_flow_spec *spec; void *misc; + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) + return ERR_PTR(-EOPNOTSUPP); + spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return ERR_PTR(-ENOMEM); @@ -1477,6 +1480,9 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw) { struct mlx5_esw_offload *offloads = &esw->offloads; + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) + return; + mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); mlx5_destroy_flow_group(offloads->restore_group); mlx5_destroy_flow_table(offloads->ft_offloads_restore); @@ -1496,6 +1502,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) u32 *flow_group_in; int err = 0; + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) + return 0; + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); if (!ns) { esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); @@ -1557,6 +1566,8 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) esw->offloads.restore_group = g; esw->offloads.restore_copy_hdr_id = mod_hdr; + kvfree(flow_group_in); + return 0; err_mod_hdr: @@ -1581,13 +1592,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return -EINVAL; } - mlx5_eswitch_disable(esw, false); - mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); - err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); + mlx5_eswitch_disable_locked(esw, false); + err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, + esw->dev->priv.sriov.num_vfs); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); - err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, + MLX5_ESWITCH_IGNORE_NUM_VFS); if (err1) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch back to legacy"); @@ -2342,14 +2354,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) mutex_init(&esw->offloads.termtbl_mutex); mlx5_rdma_enable_roce(esw->dev); - err = esw_offloads_steering_init(esw); - if (err) - goto err_steering_init; err = esw_set_passing_vport_metadata(esw, true); if (err) goto err_vport_metadata; + err = esw_offloads_steering_init(esw); + if (err) + goto err_steering_init; + /* Representor will control the vport link state */ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; @@ -2371,9 +2384,9 @@ err_vports: esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); err_uplink: esw_set_passing_vport_metadata(esw, false); -err_vport_metadata: - esw_offloads_steering_cleanup(esw); err_steering_init: + esw_offloads_steering_cleanup(esw); +err_vport_metadata: mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); return err; @@ -2384,11 +2397,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable(esw, false); - err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); + mlx5_eswitch_disable_locked(esw, false); + err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, + MLX5_ESWITCH_IGNORE_NUM_VFS); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); - err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, + MLX5_ESWITCH_IGNORE_NUM_VFS); if (err1) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch back to offloads"); @@ -2494,17 +2509,23 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) if(!MLX5_ESWITCH_MANAGER(dev)) return -EPERM; - if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE && - !mlx5_core_is_ecpf_esw_manager(dev)) - return -EOPNOTSUPP; - return 0; } +static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) +{ + /* devlink commands in NONE eswitch mode are currently supported only + * on ECPF. + */ + return (esw->mode == MLX5_ESWITCH_NONE && + !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; u16 cur_mlx5_mode, mlx5_mode = 0; int err; @@ -2512,32 +2533,50 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (err) return err; - cur_mlx5_mode = dev->priv.eswitch->mode; - if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) + goto unlock; + + cur_mlx5_mode = esw->mode; + if (cur_mlx5_mode == mlx5_mode) - return 0; + goto unlock; if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) - return esw_offloads_start(dev->priv.eswitch, extack); + err = esw_offloads_start(esw, extack); else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) - return esw_offloads_stop(dev->priv.eswitch, extack); + err = esw_offloads_stop(esw, extack); else - return -EINVAL; + err = -EINVAL; + +unlock: + mutex_unlock(&esw->mode_lock); + return err; } int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; int err; err = mlx5_eswitch_check(dev); if (err) return err; - return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); + if (err) + goto unlock; + + err = esw_mode_to_devlink(esw->mode, mode); +unlock: + mutex_unlock(&esw->mode_lock); + return err; } int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, @@ -2552,14 +2591,20 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (err) return err; + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) + goto out; + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) - return 0; + goto out; /* fall through */ case MLX5_CAP_INLINE_MODE_L2: NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto out; case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: break; } @@ -2567,7 +2612,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set inline mode when flows are configured"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto out; } err = esw_inline_mode_from_devlink(mode, &mlx5_mode); @@ -2584,6 +2630,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, } esw->offloads.inline_mode = mlx5_mode; + mutex_unlock(&esw->mode_lock); return 0; revert_inline_mode: @@ -2593,6 +2640,7 @@ revert_inline_mode: vport, esw->offloads.inline_mode); out: + mutex_unlock(&esw->mode_lock); return err; } @@ -2606,7 +2654,15 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) if (err) return err; - return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) + goto unlock; + + err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); +unlock: + mutex_unlock(&esw->mode_lock); + return err; } int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, @@ -2621,26 +2677,36 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, if (err) return err; + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) + goto unlock; + if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || - !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) - return -EOPNOTSUPP; + !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { + err = -EOPNOTSUPP; + goto unlock; + } - if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) - return -EOPNOTSUPP; + if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { + err = -EOPNOTSUPP; + goto unlock; + } if (esw->mode == MLX5_ESWITCH_LEGACY) { esw->offloads.encap = encap; - return 0; + goto unlock; } if (esw->offloads.encap == encap) - return 0; + goto unlock; if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set encapsulation when flows are configured"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto unlock; } esw_destroy_offloads_fdb_tables(esw); @@ -2656,6 +2722,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, (void)esw_create_offloads_fdb_tables(esw, esw->nvports); } +unlock: + mutex_unlock(&esw->mode_lock); return err; } @@ -2670,7 +2738,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, if (err) return err; + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) + goto unlock; + *encap = esw->offloads.encap; +unlock: + mutex_unlock(&esw->mode_lock); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c index 1e275a8441de..184cea62254f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c @@ -280,7 +280,8 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain) u32 index; int err; - if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw)) + if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) || + !mlx5_esw_chains_prios_supported(esw)) return 0; err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index); @@ -335,6 +336,18 @@ err_mod_hdr: return err; } +static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain) +{ + struct mlx5_eswitch *esw = fdb_chain->esw; + + if (!fdb_chain->miss_modify_hdr) + return; + + mlx5_del_flow_rules(fdb_chain->restore_rule); + mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr); + mapping_remove(esw_chains_mapping(esw), fdb_chain->id); +} + static struct fdb_chain * mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) { @@ -361,11 +374,7 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) return fdb_chain; err_insert: - if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { - mlx5_del_flow_rules(fdb_chain->restore_rule); - mlx5_modify_header_dealloc(esw->dev, - fdb_chain->miss_modify_hdr); - } + destroy_fdb_chain_restore(fdb_chain); err_restore: kvfree(fdb_chain); return ERR_PTR(err); @@ -379,14 +388,7 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, chain_params); - if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { - mlx5_del_flow_rules(fdb_chain->restore_rule); - mlx5_modify_header_dealloc(esw->dev, - fdb_chain->miss_modify_hdr); - - mapping_remove(esw_chains_mapping(esw), fdb_chain->id); - } - + destroy_fdb_chain_restore(fdb_chain); kvfree(fdb_chain); } @@ -423,7 +425,7 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, dest.ft = next_fdb; if (next_fdb == tc_end_fdb(esw) && - fdb_modify_header_fwd_to_table_supported(esw)) { + mlx5_esw_chains_prios_supported(esw)) { act.modify_hdr = fdb_chain->miss_modify_hdr; act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; } @@ -728,7 +730,8 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) struct mlx5_flow_table * mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw) { - int chain, prio, level, err; + u32 chain, prio, level; + int err; if (!fdb_ignore_flow_level_supported(esw)) { err = -EOPNOTSUPP; @@ -783,6 +786,9 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); + } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { + esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { /* Disabled when ttl workaround is needed, e.g * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c93bd55fab06..62ce2b9417ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1322,7 +1322,7 @@ add_rule_fte(struct fs_fte *fte, fte->node.active = true; fte->status |= FS_FTE_STATUS_EXISTING; - atomic_inc(&fte->node.version); + atomic_inc(&fg->node.version); out: return handle; @@ -1577,28 +1577,19 @@ struct match_list { struct mlx5_flow_group *g; }; -struct match_list_head { - struct list_head list; - struct match_list first; -}; - -static void free_match_list(struct match_list_head *head, bool ft_locked) +static void free_match_list(struct match_list *head, bool ft_locked) { - if (!list_empty(&head->list)) { - struct match_list *iter, *match_tmp; + struct match_list *iter, *match_tmp; - list_del(&head->first.list); - tree_put_node(&head->first.g->node, ft_locked); - list_for_each_entry_safe(iter, match_tmp, &head->list, - list) { - tree_put_node(&iter->g->node, ft_locked); - list_del(&iter->list); - kfree(iter); - } + list_for_each_entry_safe(iter, match_tmp, &head->list, + list) { + tree_put_node(&iter->g->node, ft_locked); + list_del(&iter->list); + kfree(iter); } } -static int build_match_list(struct match_list_head *match_head, +static int build_match_list(struct match_list *match_head, struct mlx5_flow_table *ft, const struct mlx5_flow_spec *spec, bool ft_locked) @@ -1615,14 +1606,8 @@ static int build_match_list(struct match_list_head *match_head, rhl_for_each_entry_rcu(g, tmp, list, hash) { struct match_list *curr_match; - if (likely(list_empty(&match_head->list))) { - if (!tree_get_node(&g->node)) - continue; - match_head->first.g = g; - list_add_tail(&match_head->first.list, - &match_head->list); + if (unlikely(!tree_get_node(&g->node))) continue; - } curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { @@ -1630,10 +1615,6 @@ static int build_match_list(struct match_list_head *match_head, err = -ENOMEM; goto out; } - if (!tree_get_node(&g->node)) { - kfree(curr_match); - continue; - } curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } @@ -1699,7 +1680,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, struct match_list *iter; bool take_write = false; struct fs_fte *fte; - u64 version; + u64 version = 0; int err; fte = alloc_fte(ft, spec, flow_act); @@ -1707,10 +1688,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, return ERR_PTR(-ENOMEM); search_again_locked: - version = matched_fgs_get_version(match_head); if (flow_act->flags & FLOW_ACT_NO_APPEND) goto skip_search; - /* Try to find a fg that already contains a matching fte */ + version = matched_fgs_get_version(match_head); + /* Try to find an fte with identical match value and attempt update its + * action. + */ list_for_each_entry(iter, match_head, list) { struct fs_fte *fte_tmp; @@ -1738,10 +1721,12 @@ skip_search: goto out; } - /* Check the fgs version, for case the new FTE with the - * same values was added while the fgs weren't locked + /* Check the fgs version. If version have changed it could be that an + * FTE with the same match value was added while the fgs weren't + * locked. */ - if (version != matched_fgs_get_version(match_head)) { + if (!(flow_act->flags & FLOW_ACT_NO_APPEND) && + version != matched_fgs_get_version(match_head)) { take_write = true; goto search_again_locked; } @@ -1785,9 +1770,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, { struct mlx5_flow_steering *steering = get_steering(&ft->node); - struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; - struct match_list_head match_head; + struct match_list match_head; + struct mlx5_flow_group *g; bool take_write = false; struct fs_fte *fte; int version; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 204a26bf0a5f..4a9d9cae8628 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1211,15 +1211,10 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) goto err_devlink_reg; } - if (mlx5_device_registered(dev)) { + if (mlx5_device_registered(dev)) mlx5_attach_device(dev); - } else { - err = mlx5_register_device(dev); - if (err) { - mlx5_core_err(dev, "register device failed %d\n", err); - goto err_reg_dev; - } - } + else + mlx5_register_device(dev); set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: @@ -1227,9 +1222,6 @@ out: return err; -err_reg_dev: - if (boot) - mlx5_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: mlx5_unload(dev); err_load: @@ -1243,7 +1235,7 @@ function_teardown: return err; } -int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) +void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { if (cleanup) { mlx5_unregister_device(dev); @@ -1272,7 +1264,6 @@ int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) mlx5_function_teardown(dev, cleanup); out: mutex_unlock(&dev->intf_state_mutex); - return 0; } static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) @@ -1393,12 +1384,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_crdump_disable(dev); mlx5_devlink_unregister(devlink); - if (mlx5_unload_one(dev, true)) { - mlx5_core_err(dev, "mlx5_unload_one failed\n"); - mlx5_health_flush(dev); - return; - } - + mlx5_unload_one(dev, true); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); mlx5_devlink_free(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index da67b28d6e23..a8fb43a85d1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); void mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); bool mlx5_device_registered(struct mlx5_core_dev *dev); -int mlx5_register_device(struct mlx5_core_dev *dev); +void mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); @@ -244,6 +244,6 @@ enum { u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); -int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); +void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 03f037811f1d..3094d20297a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) if (!MLX5_ESWITCH_MANAGER(dev)) goto enable_vfs_hca; - mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); - err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY); + err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); diff --git a/net/core/devlink.c b/net/core/devlink.c index 73bb8fbe3393..a9036af7e002 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6187,7 +6187,8 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_eswitch_get_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_ESWITCH_SET, |