diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 644 |
1 files changed, 388 insertions, 256 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e9c428071df3..94fe253d4956 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -60,6 +60,7 @@ #include "mlx5_ib.h" #include "ib_rep.h" #include "cmd.h" +#include "srq.h" #include <linux/mlx5/fs_helpers.h> #include <linux/mlx5/accel.h> #include <rdma/uverbs_std_types.h> @@ -82,10 +83,13 @@ static char mlx5_version[] = struct mlx5_ib_event_work { struct work_struct work; - struct mlx5_core_dev *dev; - void *context; - enum mlx5_dev_event event; - unsigned long param; + union { + struct mlx5_ib_dev *dev; + struct mlx5_ib_multiport_info *mpi; + }; + bool is_slave; + unsigned int event; + void *param; }; enum { @@ -146,7 +150,7 @@ static int get_port_state(struct ib_device *ibdev, int ret; memset(&attr, 0, sizeof(attr)); - ret = ibdev->query_port(ibdev, port_num, &attr); + ret = ibdev->ops.query_port(ibdev, port_num, &attr); if (!ret) *state = attr.state; return ret; @@ -168,7 +172,6 @@ static int mlx5_netdev_event(struct notifier_block *this, switch (event) { case NETDEV_REGISTER: - case NETDEV_UNREGISTER: write_lock(&roce->netdev_lock); if (ibdev->rep) { struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch; @@ -177,15 +180,20 @@ static int mlx5_netdev_event(struct notifier_block *this, rep_ndev = mlx5_ib_get_rep_netdev(esw, ibdev->rep->vport); if (rep_ndev == ndev) - roce->netdev = (event == NETDEV_UNREGISTER) ? - NULL : ndev; + roce->netdev = ndev; } else if (ndev->dev.parent == &mdev->pdev->dev) { - roce->netdev = (event == NETDEV_UNREGISTER) ? - NULL : ndev; + roce->netdev = ndev; } write_unlock(&roce->netdev_lock); break; + case NETDEV_UNREGISTER: + write_lock(&roce->netdev_lock); + if (roce->netdev == ndev) + roce->netdev = NULL; + write_unlock(&roce->netdev_lock); + break; + case NETDEV_CHANGE: case NETDEV_UP: case NETDEV_DOWN: { @@ -441,7 +449,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, if (!ndev) goto out; - if (mlx5_lag_is_active(dev->mdev)) { + if (dev->lag_active) { rcu_read_lock(); upper = netdev_master_upper_dev_get_rcu(ndev); if (upper) { @@ -1014,6 +1022,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_GEN(mdev, cqe_128_always)) resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; + if (MLX5_CAP_GEN(mdev, qp_packet_based)) + resp.flags |= + MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; } if (field_avail(typeof(resp), sw_parsing_caps, @@ -1094,31 +1105,28 @@ enum mlx5_ib_width { MLX5_IB_WIDTH_12X = 1 << 4 }; -static int translate_active_width(struct ib_device *ibdev, u8 active_width, +static void translate_active_width(struct ib_device *ibdev, u8 active_width, u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - int err = 0; - if (active_width & MLX5_IB_WIDTH_1X) { + if (active_width & MLX5_IB_WIDTH_1X) *ib_width = IB_WIDTH_1X; - } else if (active_width & MLX5_IB_WIDTH_2X) { - mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", - (int)active_width); - err = -EINVAL; - } else if (active_width & MLX5_IB_WIDTH_4X) { + else if (active_width & MLX5_IB_WIDTH_2X) + *ib_width = IB_WIDTH_2X; + else if (active_width & MLX5_IB_WIDTH_4X) *ib_width = IB_WIDTH_4X; - } else if (active_width & MLX5_IB_WIDTH_8X) { + else if (active_width & MLX5_IB_WIDTH_8X) *ib_width = IB_WIDTH_8X; - } else if (active_width & MLX5_IB_WIDTH_12X) { + else if (active_width & MLX5_IB_WIDTH_12X) *ib_width = IB_WIDTH_12X; - } else { - mlx5_ib_dbg(dev, "Invalid active_width %d\n", + else { + mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", (int)active_width); - err = -EINVAL; + *ib_width = IB_WIDTH_4X; } - return err; + return; } static int mlx5_mtu_to_ib_mtu(int mtu) @@ -1221,14 +1229,15 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, props->subnet_timeout = rep->subnet_timeout; props->init_type_reply = rep->init_type_reply; + if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) + props->port_cap_flags2 = rep->cap_mask2; + err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); if (err) goto out; - err = translate_active_width(ibdev, ib_link_width_oper, - &props->active_width); - if (err) - goto out; + translate_active_width(ibdev, ib_link_width_oper, &props->active_width); + err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); if (err) goto out; @@ -1759,7 +1768,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, #endif if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { - err = mlx5_ib_devx_create(dev); + err = mlx5_ib_devx_create(dev, true); if (err < 0) goto out_uars; context->devx_uid = err; @@ -1851,7 +1860,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, context->lib_caps = req.lib_caps; print_lib_caps(dev, context->lib_caps); - if (mlx5_lag_is_active(dev->mdev)) { + if (dev->lag_active) { u8 port = mlx5_core_native_port_num(dev->mdev); atomic_set(&context->tx_port_affinity, @@ -2676,11 +2685,11 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, ntohs(ib_spec->gre.val.protocol)); memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, - gre_key_h), + gre_key.nvgre.hi), &ib_spec->gre.mask.key, sizeof(ib_spec->gre.mask.key)); memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, - gre_key_h), + gre_key.nvgre.hi), &ib_spec->gre.val.key, sizeof(ib_spec->gre.val.key)); break; @@ -3713,7 +3722,8 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev, struct mlx5_flow_destination *dst, struct mlx5_ib_flow_matcher *fs_matcher, struct mlx5_flow_act *flow_act, - void *cmd_in, int inlen) + void *cmd_in, int inlen, + int dst_num) { struct mlx5_ib_flow_handler *handler; struct mlx5_flow_spec *spec; @@ -3735,7 +3745,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev, spec->match_criteria_enable = fs_matcher->match_criteria_enable; handler->rule = mlx5_add_flow_rules(ft, spec, - flow_act, dst, 1); + flow_act, dst, dst_num); if (IS_ERR(handler->rule)) { err = PTR_ERR(handler->rule); @@ -3798,12 +3808,14 @@ struct mlx5_ib_flow_handler * mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, struct mlx5_flow_act *flow_act, + u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type) { struct mlx5_flow_destination *dst; struct mlx5_ib_flow_prio *ft_prio; struct mlx5_ib_flow_handler *handler; + int dst_num = 0; bool mcast; int err; @@ -3813,7 +3825,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) return ERR_PTR(-ENOMEM); - dst = kzalloc(sizeof(*dst), GFP_KERNEL); + dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); @@ -3827,20 +3839,28 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, } if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { - dst->type = dest_type; - dst->tir_num = dest_id; + dst[dst_num].type = dest_type; + dst[dst_num].tir_num = dest_id; flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { - dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; - dst->ft_num = dest_id; + dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; + dst[dst_num].ft_num = dest_id; flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } else { - dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; + dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT; flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; } + dst_num++; + + if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dst[dst_num].counter_id = counter_id; + dst_num++; + } + handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act, - cmd_in, inlen); + cmd_in, inlen, dst_num); if (IS_ERR(handler)) { err = PTR_ERR(handler); @@ -4233,6 +4253,63 @@ static void delay_drop_handler(struct work_struct *work) mutex_unlock(&delay_drop->lock); } +static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, + struct ib_event *ibev) +{ + switch (eqe->sub_type) { + case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: + schedule_work(&ibdev->delay_drop.delay_drop_work); + break; + default: /* do nothing */ + return; + } +} + +static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, + struct ib_event *ibev) +{ + u8 port = (eqe->data.port.port >> 4) & 0xf; + + ibev->element.port_num = port; + + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + /* In RoCE, port up/down events are handled in + * mlx5_netdev_event(). + */ + if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == + IB_LINK_LAYER_ETHERNET) + return -EINVAL; + + ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + break; + + case MLX5_PORT_CHANGE_SUBTYPE_LID: + ibev->event = IB_EVENT_LID_CHANGE; + break; + + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + ibev->event = IB_EVENT_PKEY_CHANGE; + schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); + break; + + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + ibev->event = IB_EVENT_GID_CHANGE; + break; + + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + ibev->event = IB_EVENT_CLIENT_REREGISTER; + break; + default: + return -EINVAL; + } + + return 0; +} + static void mlx5_ib_handle_event(struct work_struct *_work) { struct mlx5_ib_event_work *work = @@ -4240,65 +4317,37 @@ static void mlx5_ib_handle_event(struct work_struct *_work) struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = (u8)work->param; - if (mlx5_core_is_mp_slave(work->dev)) { - ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); + if (work->is_slave) { + ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); if (!ibdev) goto out; } else { - ibdev = work->context; + ibdev = work->dev; } switch (work->event) { case MLX5_DEV_EVENT_SYS_ERROR: ibev.event = IB_EVENT_DEVICE_FATAL; mlx5_ib_handle_internal_error(ibdev); + ibev.element.port_num = (u8)(unsigned long)work->param; fatal = true; break; - - case MLX5_DEV_EVENT_PORT_UP: - case MLX5_DEV_EVENT_PORT_DOWN: - case MLX5_DEV_EVENT_PORT_INITIALIZED: - /* In RoCE, port up/down events are handled in - * mlx5_netdev_event(). - */ - if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == - IB_LINK_LAYER_ETHERNET) + case MLX5_EVENT_TYPE_PORT_CHANGE: + if (handle_port_change(ibdev, work->param, &ibev)) goto out; - - ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ? - IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; - break; - - case MLX5_DEV_EVENT_LID_CHANGE: - ibev.event = IB_EVENT_LID_CHANGE; - break; - - case MLX5_DEV_EVENT_PKEY_CHANGE: - ibev.event = IB_EVENT_PKEY_CHANGE; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); - break; - - case MLX5_DEV_EVENT_GUID_CHANGE: - ibev.event = IB_EVENT_GID_CHANGE; break; - - case MLX5_DEV_EVENT_CLIENT_REREG: - ibev.event = IB_EVENT_CLIENT_REREGISTER; - break; - case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: - schedule_work(&ibdev->delay_drop.delay_drop_work); - goto out; + case MLX5_EVENT_TYPE_GENERAL_EVENT: + handle_general_event(ibdev, work->param, &ibev); + /* fall through */ default: goto out; } - ibev.device = &ibdev->ib_dev; - ibev.element.port_num = port; + ibev.device = &ibdev->ib_dev; - if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { - mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); + if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { + mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); goto out; } @@ -4311,22 +4360,43 @@ out: kfree(work); } -static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, - enum mlx5_dev_event event, unsigned long param) +static int mlx5_ib_event(struct notifier_block *nb, + unsigned long event, void *param) { struct mlx5_ib_event_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) - return; + return NOTIFY_DONE; INIT_WORK(&work->work, mlx5_ib_handle_event); - work->dev = dev; + work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); + work->is_slave = false; work->param = param; - work->context = context; work->event = event; queue_work(mlx5_ib_event_wq, &work->work); + + return NOTIFY_OK; +} + +static int mlx5_ib_event_slave_port(struct notifier_block *nb, + unsigned long event, void *param) +{ + struct mlx5_ib_event_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return NOTIFY_DONE; + + INIT_WORK(&work->work, mlx5_ib_handle_event); + work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); + work->is_slave = true; + work->param = param; + work->event = event; + queue_work(mlx5_ib_event_wq, &work->work); + + return NOTIFY_OK; } static int set_has_smi_cap(struct mlx5_ib_dev *dev) @@ -4794,7 +4864,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) struct mlx5_flow_table *ft; int err; - if (!ns || !mlx5_lag_is_active(mdev)) + if (!ns || !mlx5_lag_is_roce(mdev)) return 0; err = mlx5_cmd_create_vport_lag(mdev); @@ -4808,6 +4878,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) } dev->flow_db->lag_demux_ft = ft; + dev->lag_active = true; return 0; err_destroy_vport_lag: @@ -4819,7 +4890,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; - if (dev->flow_db->lag_demux_ft) { + if (dev->lag_active) { + dev->lag_active = false; + mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); dev->flow_db->lag_demux_ft = NULL; @@ -5045,6 +5118,9 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) { int err = 0; int i; + bool is_shared; + + is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; for (i = 0; i < dev->num_ports; i++) { err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); @@ -5054,8 +5130,10 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, dev->port[i].cnts.offsets); - err = mlx5_core_alloc_q_counter(dev->mdev, - &dev->port[i].cnts.set_id); + err = mlx5_cmd_alloc_q_counter(dev->mdev, + &dev->port[i].cnts.set_id, + is_shared ? + MLX5_SHARED_RESOURCE_UID : 0); if (err) { mlx5_ib_warn(dev, "couldn't allocate queue counter for port %d, err %d\n", @@ -5332,14 +5410,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); } -static const struct cpumask * -mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) -{ - struct mlx5_ib_dev *dev = to_mdev(ibdev); - - return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); -} - /* The mlx5_ib_multiport_mutex should be held when calling this function */ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) @@ -5357,6 +5427,11 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, spin_unlock(&port->mp.mpi_lock); return; } + + if (mpi->mdev_events.notifier_call) + mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); + mpi->mdev_events.notifier_call = NULL; + mpi->ibdev = NULL; spin_unlock(&port->mp.mpi_lock); @@ -5412,6 +5487,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, ibdev->port[port_num].mp.mpi = mpi; mpi->ibdev = ibdev; + mpi->mdev_events.notifier_call = NULL; spin_unlock(&ibdev->port[port_num].mp.mpi_lock); err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); @@ -5429,6 +5505,9 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, goto unbind; } + mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; + mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); + err = mlx5_ib_init_cong_debugfs(ibdev, port_num); if (err) goto unbind; @@ -5558,30 +5637,17 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE( UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, enum mlx5_ib_uapi_flow_action_flags)); -static int populate_specs_root(struct mlx5_ib_dev *dev) -{ - const struct uverbs_object_tree_def **trees = dev->driver_trees; - size_t num_trees = 0; - - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_DEVICE) - trees[num_trees++] = &mlx5_ib_flow_action; - - if (MLX5_CAP_DEV_MEM(dev->mdev, memic)) - trees[num_trees++] = &mlx5_ib_dm; - - if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) - trees[num_trees++] = mlx5_ib_get_devx_tree(); - - num_trees += mlx5_ib_get_flow_trees(trees + num_trees); - - WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); - trees[num_trees] = NULL; - dev->ib_dev.driver_specs = trees; +static const struct uapi_definition mlx5_ib_defs[] = { +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) + UAPI_DEF_CHAIN(mlx5_ib_devx_defs), + UAPI_DEF_CHAIN(mlx5_ib_flow_defs), +#endif - return 0; -} + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, + &mlx5_ib_flow_action), + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), + {} +}; static int mlx5_ib_read_counters(struct ib_counters *counters, struct ib_counters_read_attr *read_attr, @@ -5658,6 +5724,8 @@ void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) mlx5_ib_cleanup_multiport_master(dev); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING cleanup_srcu_struct(&dev->mr_srcu); + drain_workqueue(dev->advise_mr_wq); + destroy_workqueue(dev->advise_mr_wq); #endif kfree(dev->port); } @@ -5701,8 +5769,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.phys_port_cnt = dev->num_ports; - dev->ib_dev.num_comp_vectors = - dev->mdev->priv.eq_table.num_comp_vectors; + dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); dev->ib_dev.dev.parent = &mdev->pdev->dev; mutex_init(&dev->cap_mask_mutex); @@ -5713,9 +5780,17 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->memic.dev = mdev; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + dev->advise_mr_wq = alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0); + if (!dev->advise_mr_wq) { + err = -ENOMEM; + goto err_mp; + } + err = init_srcu_struct(&dev->mr_srcu); - if (err) - goto err_free_port; + if (err) { + destroy_workqueue(dev->advise_mr_wq); + goto err_mp; + } #endif return 0; @@ -5759,6 +5834,94 @@ static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) kfree(dev->flow_db); } +static const struct ib_device_ops mlx5_ib_dev_ops = { + .add_gid = mlx5_ib_add_gid, + .alloc_mr = mlx5_ib_alloc_mr, + .alloc_pd = mlx5_ib_alloc_pd, + .alloc_ucontext = mlx5_ib_alloc_ucontext, + .attach_mcast = mlx5_ib_mcg_attach, + .check_mr_status = mlx5_ib_check_mr_status, + .create_ah = mlx5_ib_create_ah, + .create_counters = mlx5_ib_create_counters, + .create_cq = mlx5_ib_create_cq, + .create_flow = mlx5_ib_create_flow, + .create_qp = mlx5_ib_create_qp, + .create_srq = mlx5_ib_create_srq, + .dealloc_pd = mlx5_ib_dealloc_pd, + .dealloc_ucontext = mlx5_ib_dealloc_ucontext, + .del_gid = mlx5_ib_del_gid, + .dereg_mr = mlx5_ib_dereg_mr, + .destroy_ah = mlx5_ib_destroy_ah, + .destroy_counters = mlx5_ib_destroy_counters, + .destroy_cq = mlx5_ib_destroy_cq, + .destroy_flow = mlx5_ib_destroy_flow, + .destroy_flow_action = mlx5_ib_destroy_flow_action, + .destroy_qp = mlx5_ib_destroy_qp, + .destroy_srq = mlx5_ib_destroy_srq, + .detach_mcast = mlx5_ib_mcg_detach, + .disassociate_ucontext = mlx5_ib_disassociate_ucontext, + .drain_rq = mlx5_ib_drain_rq, + .drain_sq = mlx5_ib_drain_sq, + .get_dev_fw_str = get_dev_fw_str, + .get_dma_mr = mlx5_ib_get_dma_mr, + .get_link_layer = mlx5_ib_port_link_layer, + .map_mr_sg = mlx5_ib_map_mr_sg, + .mmap = mlx5_ib_mmap, + .modify_cq = mlx5_ib_modify_cq, + .modify_device = mlx5_ib_modify_device, + .modify_port = mlx5_ib_modify_port, + .modify_qp = mlx5_ib_modify_qp, + .modify_srq = mlx5_ib_modify_srq, + .poll_cq = mlx5_ib_poll_cq, + .post_recv = mlx5_ib_post_recv, + .post_send = mlx5_ib_post_send, + .post_srq_recv = mlx5_ib_post_srq_recv, + .process_mad = mlx5_ib_process_mad, + .query_ah = mlx5_ib_query_ah, + .query_device = mlx5_ib_query_device, + .query_gid = mlx5_ib_query_gid, + .query_pkey = mlx5_ib_query_pkey, + .query_qp = mlx5_ib_query_qp, + .query_srq = mlx5_ib_query_srq, + .read_counters = mlx5_ib_read_counters, + .reg_user_mr = mlx5_ib_reg_user_mr, + .req_notify_cq = mlx5_ib_arm_cq, + .rereg_user_mr = mlx5_ib_rereg_user_mr, + .resize_cq = mlx5_ib_resize_cq, +}; + +static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { + .create_flow_action_esp = mlx5_ib_create_flow_action_esp, + .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, +}; + +static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { + .rdma_netdev_get_params = mlx5_ib_rn_get_params, +}; + +static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { + .get_vf_config = mlx5_ib_get_vf_config, + .get_vf_stats = mlx5_ib_get_vf_stats, + .set_vf_guid = mlx5_ib_set_vf_guid, + .set_vf_link_state = mlx5_ib_set_vf_link_state, +}; + +static const struct ib_device_ops mlx5_ib_dev_mw_ops = { + .alloc_mw = mlx5_ib_alloc_mw, + .dealloc_mw = mlx5_ib_dealloc_mw, +}; + +static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { + .alloc_xrcd = mlx5_ib_alloc_xrcd, + .dealloc_xrcd = mlx5_ib_dealloc_xrcd, +}; + +static const struct ib_device_ops mlx5_ib_dev_dm_ops = { + .alloc_dm = mlx5_ib_alloc_dm, + .dealloc_dm = mlx5_ib_dealloc_dm, + .reg_dm_mr = mlx5_ib_reg_dm_mr, +}; + int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; @@ -5797,104 +5960,45 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) | - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ); - - dev->ib_dev.query_device = mlx5_ib_query_device; - dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; - dev->ib_dev.query_gid = mlx5_ib_query_gid; - dev->ib_dev.add_gid = mlx5_ib_add_gid; - dev->ib_dev.del_gid = mlx5_ib_del_gid; - dev->ib_dev.query_pkey = mlx5_ib_query_pkey; - dev->ib_dev.modify_device = mlx5_ib_modify_device; - dev->ib_dev.modify_port = mlx5_ib_modify_port; - dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; - dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; - dev->ib_dev.mmap = mlx5_ib_mmap; - dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; - dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; - dev->ib_dev.create_ah = mlx5_ib_create_ah; - dev->ib_dev.query_ah = mlx5_ib_query_ah; - dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; - dev->ib_dev.create_srq = mlx5_ib_create_srq; - dev->ib_dev.modify_srq = mlx5_ib_modify_srq; - dev->ib_dev.query_srq = mlx5_ib_query_srq; - dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; - dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; - dev->ib_dev.create_qp = mlx5_ib_create_qp; - dev->ib_dev.modify_qp = mlx5_ib_modify_qp; - dev->ib_dev.query_qp = mlx5_ib_query_qp; - dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; - dev->ib_dev.drain_sq = mlx5_ib_drain_sq; - dev->ib_dev.drain_rq = mlx5_ib_drain_rq; - dev->ib_dev.post_send = mlx5_ib_post_send; - dev->ib_dev.post_recv = mlx5_ib_post_recv; - dev->ib_dev.create_cq = mlx5_ib_create_cq; - dev->ib_dev.modify_cq = mlx5_ib_modify_cq; - dev->ib_dev.resize_cq = mlx5_ib_resize_cq; - dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; - dev->ib_dev.poll_cq = mlx5_ib_poll_cq; - dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; - dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; - dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; - dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; - dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; - dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; - dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; - dev->ib_dev.process_mad = mlx5_ib_process_mad; - dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; - dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; - dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; + (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) - dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params; - - if (mlx5_core_is_pf(mdev)) { - dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; - dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; - dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; - dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; - } + ib_set_device_ops(&dev->ib_dev, + &mlx5_ib_dev_ipoib_enhanced_ops); - dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; + if (mlx5_core_is_pf(mdev)) + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); if (MLX5_CAP_GEN(mdev, imaicl)) { - dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; - dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); } if (MLX5_CAP_GEN(mdev, xrc)) { - dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; - dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); } - if (MLX5_CAP_DEV_MEM(mdev, memic)) { - dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm; - dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm; - dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr; - } + if (MLX5_CAP_DEV_MEM(mdev, memic)) + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); - dev->ib_dev.create_flow = mlx5_ib_create_flow; - dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; - dev->ib_dev.uverbs_ex_cmd_mask |= - (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); - dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp; - dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action; - dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp; + if (mlx5_accel_ipsec_device_caps(dev->mdev) & + MLX5_ACCEL_IPSEC_CAP_DEVICE) + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; - dev->ib_dev.create_counters = mlx5_ib_create_counters; - dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters; - dev->ib_dev.read_counters = mlx5_ib_read_counters; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); + + if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) + dev->ib_dev.driver_def = mlx5_ib_defs; err = init_node_data(dev); if (err) @@ -5908,22 +6012,37 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) return 0; } +static const struct ib_device_ops mlx5_ib_dev_port_ops = { + .get_port_immutable = mlx5_port_immutable, + .query_port = mlx5_ib_query_port, +}; + static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) { - dev->ib_dev.get_port_immutable = mlx5_port_immutable; - dev->ib_dev.query_port = mlx5_ib_query_port; - + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); return 0; } +static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { + .get_port_immutable = mlx5_port_rep_immutable, + .query_port = mlx5_ib_rep_query_port, +}; + int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) { - dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable; - dev->ib_dev.query_port = mlx5_ib_rep_query_port; - + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); return 0; } +static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { + .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, + .create_wq = mlx5_ib_create_wq, + .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, + .destroy_wq = mlx5_ib_destroy_wq, + .get_netdev = mlx5_ib_get_netdev, + .modify_wq = mlx5_ib_modify_wq, +}; + static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) { u8 port_num; @@ -5935,19 +6054,13 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) dev->roce[i].last_port_state = IB_PORT_DOWN; } - dev->ib_dev.get_netdev = mlx5_ib_get_netdev; - dev->ib_dev.create_wq = mlx5_ib_create_wq; - dev->ib_dev.modify_wq = mlx5_ib_modify_wq; - dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; - dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; - dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; - dev->ib_dev.uverbs_ex_cmd_mask |= (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); port_num = mlx5_core_native_port_num(dev->mdev) - 1; @@ -6041,11 +6154,20 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) return mlx5_ib_odp_init_one(dev); } +void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) +{ + mlx5_ib_odp_cleanup_one(dev); +} + +static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = { + .alloc_hw_stats = mlx5_ib_alloc_hw_stats, + .get_hw_stats = mlx5_ib_get_hw_stats, +}; + int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { - dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; - dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); return mlx5_ib_alloc_counters(dev); } @@ -6103,17 +6225,12 @@ void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) mlx5_free_bfreg(dev->mdev, &dev->bfreg); } -static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev) -{ - return populate_specs_root(dev); -} - int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { const char *name; rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); - if (!mlx5_lag_is_active(dev->mdev)) + if (!mlx5_lag_is_roce(dev->mdev)) name = "mlx5_%d"; else name = "mlx5_bond_%d"; @@ -6147,16 +6264,32 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) cancel_delay_drop(dev); } -static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) { - mlx5_ib_register_vport_reps(dev); - + dev->mdev_events.notifier_call = mlx5_ib_event; + mlx5_notifier_register(dev->mdev, &dev->mdev_events); return 0; } -static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) +{ + mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); +} + +static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev) +{ + int uid; + + uid = mlx5_ib_devx_create(dev, false); + if (uid > 0) + dev->devx_whitelist_uid = uid; + + return 0; +} +static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev) { - mlx5_ib_unregister_vport_reps(dev); + if (dev->devx_whitelist_uid) + mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); } void __mlx5_ib_remove(struct mlx5_ib_dev *dev, @@ -6169,10 +6302,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev, if (profile->stage[stage].cleanup) profile->stage[stage].cleanup(dev); } - - if (dev->devx_whitelist_uid) - mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); - ib_dealloc_device((struct ib_device *)dev); } void *__mlx5_ib_add(struct mlx5_ib_dev *dev, @@ -6180,7 +6309,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev, { int err; int i; - int uid; for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { if (profile->stage[i].init) { @@ -6190,10 +6318,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev, } } - uid = mlx5_ib_devx_create(dev); - if (uid > 0) - dev->devx_whitelist_uid = uid; - dev->profile = profile; dev->ib_active = true; @@ -6221,12 +6345,18 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_roce_init, mlx5_ib_stage_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_SRQ, + mlx5_init_srq_table, + mlx5_cleanup_srq_table), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_stage_dev_res_init, mlx5_ib_stage_dev_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, + mlx5_ib_stage_dev_notifier_init, + mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_ODP, mlx5_ib_stage_odp_init, - NULL), + mlx5_ib_stage_odp_cleanup), STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_stage_counters_init, mlx5_ib_stage_counters_cleanup), @@ -6242,9 +6372,9 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, NULL, mlx5_ib_stage_pre_ib_reg_umr_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_SPECS, - mlx5_ib_stage_populate_specs, - NULL), + STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, + mlx5_ib_stage_devx_init, + mlx5_ib_stage_devx_cleanup), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), @@ -6272,9 +6402,15 @@ static const struct mlx5_ib_profile nic_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_ROCE, mlx5_ib_stage_rep_roce_init, mlx5_ib_stage_rep_roce_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_SRQ, + mlx5_init_srq_table, + mlx5_cleanup_srq_table), STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, mlx5_ib_stage_dev_res_init, mlx5_ib_stage_dev_res_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, + mlx5_ib_stage_dev_notifier_init, + mlx5_ib_stage_dev_notifier_cleanup), STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_stage_counters_init, mlx5_ib_stage_counters_cleanup), @@ -6287,18 +6423,12 @@ static const struct mlx5_ib_profile nic_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, NULL, mlx5_ib_stage_pre_ib_reg_umr_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_SPECS, - mlx5_ib_stage_populate_specs, - NULL), STAGE_CREATE(MLX5_IB_STAGE_IB_REG, mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_cleanup), STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), - STAGE_CREATE(MLX5_IB_STAGE_REP_REG, - mlx5_ib_stage_rep_reg_init, - mlx5_ib_stage_rep_reg_cleanup), }; static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) @@ -6366,8 +6496,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (MLX5_ESWITCH_MANAGER(mdev) && mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); - - return __mlx5_ib_add(dev, &nic_rep_profile); + dev->profile = &nic_rep_profile; + mlx5_ib_register_vport_reps(dev); + return dev; } return __mlx5_ib_add(dev, &pf_profile); @@ -6389,16 +6520,17 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) } dev = context; - __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); + if (dev->profile == &nic_rep_profile) + mlx5_ib_unregister_vport_reps(dev); + else + __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); + + ib_dealloc_device((struct ib_device *)dev); } static struct mlx5_interface mlx5_ib_interface = { .add = mlx5_ib_add, .remove = mlx5_ib_remove, - .event = mlx5_ib_event, -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - .pfault = mlx5_ib_pfault, -#endif .protocol = MLX5_INTERFACE_PROTOCOL_IB, }; |