diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/devx.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/doorbell.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/gsi.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.c | 77 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 192 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 232 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/std_types.c | 10 |
10 files changed, 374 insertions, 221 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 7abeb576b3c5..a190fb581591 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, u32 *cqb = NULL; void *cqc; int cqe_size; - unsigned int irqn; int eqn; int err; @@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } - err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, vector, &eqn); if (err) goto err_cqb; @@ -997,7 +996,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD)); MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); MLX5_SET(cqc, cqc, uar_page, index); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) MLX5_SET(cqc, cqc, oi, 1); @@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_cqb; mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); - cq->mcq.irqn = irqn; if (udata) cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; else diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index eb9b0a2707f8..e95967aefe78 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( struct mlx5_ib_dev *dev; int user_vector; int dev_eqn; - unsigned int irqn; int err; if (uverbs_copy_from(&user_vector, attrs, @@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( return PTR_ERR(c); dev = to_mdev(c->ibucontext.device); - err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); if (err < 0) return err; @@ -1437,11 +1436,10 @@ out: rcu_read_unlock(); } -static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in) +static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in) { if (!MLX5_CAP_GEN(dev->mdev, apu) || - !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), - apu_thread_cq)) + !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq)) return false; return true; @@ -1501,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, cmd_in_len, cmd_out, cmd_out_len); } else if (opcode == MLX5_CMD_OP_CREATE_CQ && - !is_apu_thread_cq(dev, cmd_in)) { + !is_apu_cq(dev, cmd_in)) { obj->flags |= DEVX_OBJ_FLAGS_CQ; obj->core_cq.comp = devx_cq_comp; err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 9ca2e61807ec..6398e2f48579 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, list_add(&page->list, &context->db_page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); + db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + + (virt & ~PAGE_MASK); db->u.user_page = page; ++page->refcnt; diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c index 7fcad9135276..3ad8f637c589 100644 --- a/drivers/infiniband/hw/mlx5/gsi.c +++ b/drivers/infiniband/hw/mlx5/gsi.c @@ -116,8 +116,6 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, goto err_free_tx; } - mutex_lock(&dev->devr.mutex); - if (dev->devr.ports[port_num - 1].gsi) { mlx5_ib_warn(dev, "GSI QP already exists on port %d\n", port_num); @@ -147,35 +145,20 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, hw_init_attr.cap.max_inline_data = 0; } - gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL); + gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); if (IS_ERR(gsi->rx_qp)) { mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n", PTR_ERR(gsi->rx_qp)); ret = PTR_ERR(gsi->rx_qp); goto err_destroy_cq; } - gsi->rx_qp->device = pd->device; - gsi->rx_qp->pd = pd; - gsi->rx_qp->real_qp = gsi->rx_qp; - - gsi->rx_qp->qp_type = hw_init_attr.qp_type; - gsi->rx_qp->send_cq = hw_init_attr.send_cq; - gsi->rx_qp->recv_cq = hw_init_attr.recv_cq; - gsi->rx_qp->event_handler = hw_init_attr.event_handler; - spin_lock_init(&gsi->rx_qp->mr_lock); - INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs); - INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs); dev->devr.ports[attr->port_num - 1].gsi = gsi; - - mutex_unlock(&dev->devr.mutex); - return 0; err_destroy_cq: ib_free_cq(gsi->cq); err_free_wrs: - mutex_unlock(&dev->devr.mutex); kfree(gsi->outstanding_wrs); err_free_tx: kfree(gsi->tx_qps); @@ -190,16 +173,13 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp) int qp_index; int ret; - mutex_lock(&dev->devr.mutex); - ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL); + ret = ib_destroy_qp(gsi->rx_qp); if (ret) { mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n", ret); - mutex_unlock(&dev->devr.mutex); return ret; } dev->devr.ports[port_num - 1].gsi = NULL; - mutex_unlock(&dev->devr.mutex); gsi->rx_qp = NULL; for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { @@ -213,8 +193,6 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp) kfree(gsi->outstanding_wrs); kfree(gsi->tx_qps); - kfree(mqp); - return 0; } @@ -339,23 +317,13 @@ err_destroy_qp: WARN_ON_ONCE(qp); } -static void setup_qps(struct mlx5_ib_gsi_qp *gsi) -{ - struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); - u16 qp_index; - - mutex_lock(&dev->devr.mutex); - for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) - setup_qp(gsi, qp_index); - mutex_unlock(&dev->devr.mutex); -} - int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask) { struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; + u16 qp_index; int ret; mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state); @@ -366,8 +334,11 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, return ret; } - if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS) - setup_qps(gsi); + if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS) + return 0; + + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) + setup_qp(gsi, qp_index); return 0; } @@ -511,8 +482,8 @@ int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi) { - if (!gsi) - return; + u16 qp_index; - setup_qps(gsi); + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) + setup_qp(gsi, qp_index); } diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index b25e0b33a11a..52821485371a 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -8,13 +8,15 @@ #include "srq.h" static int -mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) +mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep, + int vport_index) { struct mlx5_ib_dev *ibdev; - int vport_index; ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB); - vport_index = rep->vport_index; + if (!ibdev) + return -EINVAL; ibdev->port[vport_index].rep = rep; rep->rep_data[REP_IB].priv = ibdev; @@ -26,19 +28,39 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) return 0; } +static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev); + static int mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { u32 num_ports = mlx5_eswitch_get_total_vports(dev); const struct mlx5_ib_profile *profile; + struct mlx5_core_dev *peer_dev; struct mlx5_ib_dev *ibdev; + u32 peer_num_ports; int vport_index; int ret; + vport_index = rep->vport_index; + + if (mlx5_lag_is_shared_fdb(dev)) { + peer_dev = mlx5_lag_get_peer_mdev(dev); + peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev); + if (mlx5_lag_is_master(dev)) { + /* Only 1 ib port is the representor for both uplinks */ + num_ports += peer_num_ports - 1; + } else { + if (rep->vport == MLX5_VPORT_UPLINK) + return 0; + vport_index += peer_num_ports; + dev = peer_dev; + } + } + if (rep->vport == MLX5_VPORT_UPLINK) profile = &raw_eth_profile; else - return mlx5_ib_set_vport_rep(dev, rep); + return mlx5_ib_set_vport_rep(dev, rep, vport_index); ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!ibdev) @@ -64,6 +86,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) goto fail_add; rep->rep_data[REP_IB].priv = ibdev; + if (mlx5_lag_is_shared_fdb(dev)) + mlx5_ib_register_peer_vport_reps(dev); return 0; @@ -82,18 +106,45 @@ static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) static void mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) { + struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw); struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep); + int vport_index = rep->vport_index; struct mlx5_ib_port *port; - port = &dev->port[rep->vport_index]; + if (WARN_ON(!mdev)) + return; + + if (mlx5_lag_is_shared_fdb(mdev) && + !mlx5_lag_is_master(mdev)) { + struct mlx5_core_dev *peer_mdev; + + if (rep->vport == MLX5_VPORT_UPLINK) + return; + peer_mdev = mlx5_lag_get_peer_mdev(mdev); + vport_index += mlx5_eswitch_get_total_vports(peer_mdev); + } + + if (!dev) + return; + + port = &dev->port[vport_index]; write_lock(&port->roce.netdev_lock); port->roce.netdev = NULL; write_unlock(&port->roce.netdev_lock); rep->rep_data[REP_IB].priv = NULL; port->rep = NULL; - if (rep->vport == MLX5_VPORT_UPLINK) + if (rep->vport == MLX5_VPORT_UPLINK) { + struct mlx5_core_dev *peer_mdev; + struct mlx5_eswitch *esw; + + if (mlx5_lag_is_shared_fdb(mdev)) { + peer_mdev = mlx5_lag_get_peer_mdev(mdev); + esw = peer_mdev->priv.eswitch; + mlx5_eswitch_unregister_vport_reps(esw, REP_IB); + } __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); + } } static const struct mlx5_eswitch_rep_ops rep_ops = { @@ -102,6 +153,18 @@ static const struct mlx5_eswitch_rep_ops rep_ops = { .get_proto_dev = mlx5_ib_rep_to_dev, }; +static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev) +{ + struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev); + struct mlx5_eswitch *esw; + + if (!peer_mdev) + return; + + esw = peer_mdev->priv.eswitch; + mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB); +} + struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, u16 vport_num) { @@ -123,7 +186,7 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, rep = dev->port[port - 1].rep; - return mlx5_eswitch_add_send_to_vport_rule(esw, rep, sq->base.mqp.qpn); + return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn); } static int mlx5r_rep_probe(struct auxiliary_device *adev, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 094c976b1eed..8664bcf6d3f5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -126,6 +126,7 @@ static int get_port_state(struct ib_device *ibdev, static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, struct net_device *ndev, + struct net_device *upper, u32 *port_num) { struct net_device *rep_ndev; @@ -137,6 +138,14 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, if (!port->rep) continue; + if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) { + *port_num = i + 1; + return &port->roce; + } + + if (upper && port->rep->vport == MLX5_VPORT_UPLINK) + continue; + read_lock(&port->roce.netdev_lock); rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, port->rep->vport); @@ -196,11 +205,12 @@ static int mlx5_netdev_event(struct notifier_block *this, } if (ibdev->is_rep) - roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); + roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); if (!roce) return NOTIFY_DONE; - if ((upper == ndev || (!upper && ndev == roce->netdev)) - && ibdev->ib_active) { + if ((upper == ndev || + ((!upper || ibdev->is_rep) && ndev == roce->netdev)) && + ibdev->ib_active) { struct ib_event ibev = { }; enum ib_port_state port_state; @@ -1174,6 +1184,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } + if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) { + resp.response_length += sizeof(resp.dci_streams_caps); + + resp.dci_streams_caps.max_log_num_concurent = + MLX5_CAP_GEN(mdev, log_max_dci_stream_channels); + + resp.dci_streams_caps.max_log_num_errored = + MLX5_CAP_GEN(mdev, log_max_dci_errored_streams); + } + if (uhw_outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); @@ -2491,6 +2511,13 @@ static void pkey_change_handler(struct work_struct *work) container_of(work, struct mlx5_ib_port_resources, pkey_change_work); + if (!ports->gsi) + /* + * We got this event before device was fully configured + * and MAD registration code wasn't called/finished yet. + */ + return; + mlx5_ib_gsi_pkey_change(ports->gsi); } @@ -2785,33 +2812,16 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) if (!MLX5_CAP_GEN(dev->mdev, xrc)) return -EOPNOTSUPP; - mutex_init(&devr->mutex); + devr->p0 = ib_alloc_pd(ibdev, 0); + if (IS_ERR(devr->p0)) + return PTR_ERR(devr->p0); - devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!devr->p0) - return -ENOMEM; - - devr->p0->device = ibdev; - devr->p0->uobject = NULL; - atomic_set(&devr->p0->usecnt, 0); - - ret = mlx5_ib_alloc_pd(devr->p0, NULL); - if (ret) - goto error0; - - devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); - if (!devr->c0) { - ret = -ENOMEM; + devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); + if (IS_ERR(devr->c0)) { + ret = PTR_ERR(devr->c0); goto error1; } - devr->c0->device = &dev->ib_dev; - atomic_set(&devr->c0->usecnt, 0); - - ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); - if (ret) - goto err_create_cq; - ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0); if (ret) goto error2; @@ -2826,45 +2836,22 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) attr.srq_type = IB_SRQT_XRC; attr.ext.cq = devr->c0; - devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); - if (!devr->s0) { - ret = -ENOMEM; - goto error4; - } - - devr->s0->device = &dev->ib_dev; - devr->s0->pd = devr->p0; - devr->s0->srq_type = IB_SRQT_XRC; - devr->s0->ext.cq = devr->c0; - ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); - if (ret) + devr->s0 = ib_create_srq(devr->p0, &attr); + if (IS_ERR(devr->s0)) { + ret = PTR_ERR(devr->s0); goto err_create; - - atomic_inc(&devr->s0->ext.cq->usecnt); - atomic_inc(&devr->p0->usecnt); - atomic_set(&devr->s0->usecnt, 0); + } memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_BASIC; - devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); - if (!devr->s1) { - ret = -ENOMEM; - goto error5; - } - - devr->s1->device = &dev->ib_dev; - devr->s1->pd = devr->p0; - devr->s1->srq_type = IB_SRQT_BASIC; - devr->s1->ext.cq = devr->c0; - ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); - if (ret) + devr->s1 = ib_create_srq(devr->p0, &attr); + if (IS_ERR(devr->s1)) { + ret = PTR_ERR(devr->s1); goto error6; - - atomic_inc(&devr->p0->usecnt); - atomic_set(&devr->s1->usecnt, 0); + } for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) INIT_WORK(&devr->ports[port].pkey_change_work, @@ -2873,23 +2860,15 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) return 0; error6: - kfree(devr->s1); -error5: - mlx5_ib_destroy_srq(devr->s0, NULL); + ib_destroy_srq(devr->s0); err_create: - kfree(devr->s0); -error4: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); error3: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); error2: - mlx5_ib_destroy_cq(devr->c0, NULL); -err_create_cq: - kfree(devr->c0); + ib_destroy_cq(devr->c0); error1: - mlx5_ib_dealloc_pd(devr->p0, NULL); -error0: - kfree(devr->p0); + ib_dealloc_pd(devr->p0); return ret; } @@ -2898,20 +2877,21 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) struct mlx5_ib_resources *devr = &dev->devr; int port; - mlx5_ib_destroy_srq(devr->s1, NULL); - kfree(devr->s1); - mlx5_ib_destroy_srq(devr->s0, NULL); - kfree(devr->s0); - mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); - mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); - mlx5_ib_destroy_cq(devr->c0, NULL); - kfree(devr->c0); - mlx5_ib_dealloc_pd(devr->p0, NULL); - kfree(devr->p0); - - /* Make sure no change P_Key work items are still executing */ + /* + * Make sure no change P_Key work items are still executing. + * + * At this stage, the mlx5_ib_event should be unregistered + * and it ensures that no new works are added. + */ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) cancel_work_sync(&devr->ports[port].pkey_change_work); + + ib_destroy_srq(devr->s1); + ib_destroy_srq(devr->s0); + mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); + mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); + ib_destroy_cq(devr->c0); + ib_dealloc_pd(devr->p0); } static u32 get_core_cap_flags(struct ib_device *ibdev, @@ -3012,7 +2992,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) struct mlx5_flow_table *ft; int err; - if (!ns || !mlx5_lag_is_roce(mdev)) + if (!ns || !mlx5_lag_is_active(mdev)) return 0; err = mlx5_cmd_create_vport_lag(mdev); @@ -3074,9 +3054,11 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev) { int err; - err = mlx5_nic_vport_enable_roce(dev->mdev); - if (err) - return err; + if (!dev->is_rep && dev->profile != &raw_eth_profile) { + err = mlx5_nic_vport_enable_roce(dev->mdev); + if (err) + return err; + } err = mlx5_eth_lag_init(dev); if (err) @@ -3085,7 +3067,8 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev) return 0; err_disable_roce: - mlx5_nic_vport_disable_roce(dev->mdev); + if (!dev->is_rep && dev->profile != &raw_eth_profile) + mlx5_nic_vport_disable_roce(dev->mdev); return err; } @@ -3093,7 +3076,8 @@ err_disable_roce: static void mlx5_disable_eth(struct mlx5_ib_dev *dev) { mlx5_eth_lag_cleanup(dev); - mlx5_nic_vport_disable_roce(dev->mdev); + if (!dev->is_rep && dev->profile != &raw_eth_profile) + mlx5_nic_vport_disable_roce(dev->mdev); } static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num, @@ -3785,6 +3769,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), }; @@ -3950,12 +3935,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev) /* Register only for native ports */ err = mlx5_add_netdev_notifier(dev, port_num); - if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev)) - /* - * We don't enable ETH interface for - * 1. IB representors - * 2. User disabled ROCE through devlink interface - */ + if (err) return err; err = mlx5_enable_eth(dev); @@ -3980,8 +3960,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev) ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); if (ll == IB_LINK_LAYER_ETHERNET) { - if (!dev->is_rep) - mlx5_disable_eth(dev); + mlx5_disable_eth(dev); port_num = mlx5_core_native_port_num(dev->mdev) - 1; mlx5_remove_netdev_notifier(dev, port_num); @@ -4037,7 +4016,7 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { const char *name; - if (!mlx5_lag_is_roce(dev->mdev)) + if (!mlx5_lag_is_active(dev->mdev)) name = "mlx5_%d"; else name = "mlx5_bond_%d"; @@ -4053,7 +4032,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "mr cache cleanup failed\n"); if (dev->umrc.qp) - mlx5_ib_destroy_qp(dev->umrc.qp, NULL); + ib_destroy_qp(dev->umrc.qp); if (dev->umrc.cq) ib_free_cq(dev->umrc.cq); if (dev->umrc.pd) @@ -4106,23 +4085,17 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) init_attr->cap.max_send_sge = 1; init_attr->qp_type = MLX5_IB_QPT_REG_UMR; init_attr->port_num = 1; - qp = mlx5_ib_create_qp(pd, init_attr, NULL); + qp = ib_create_qp(pd, init_attr); if (IS_ERR(qp)) { mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); ret = PTR_ERR(qp); goto error_3; } - qp->device = &dev->ib_dev; - qp->real_qp = qp; - qp->uobject = NULL; - qp->qp_type = MLX5_IB_QPT_REG_UMR; - qp->send_cq = init_attr->send_cq; - qp->recv_cq = init_attr->recv_cq; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | - IB_QP_PORT, NULL); + ret = ib_modify_qp(qp, attr, + IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); goto error_4; @@ -4132,7 +4105,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) attr->qp_state = IB_QPS_RTR; attr->path_mtu = IB_MTU_256; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); goto error_4; @@ -4140,7 +4113,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTS; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); goto error_4; @@ -4163,7 +4136,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) return 0; error_4: - mlx5_ib_destroy_qp(qp, NULL); + ib_destroy_qp(qp); dev->umrc.qp = NULL; error_3: @@ -4454,7 +4427,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev) mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); - list_del(&mpi->list); + else + list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); kfree(mpi); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 585fb00bdce8..bf20a388eabe 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -795,8 +795,6 @@ struct mlx5_ib_resources { struct ib_srq *s0; struct ib_srq *s1; struct mlx5_ib_port_resources ports[2]; - /* Protects changes to the port resources */ - struct mutex mutex; }; struct mlx5_ib_counters { @@ -1221,9 +1219,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); -struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata); +int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3263851ea574..a520ac8ab68c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) */ spin_unlock_irq(&ent->lock); need_delay = need_resched() || someone_adding(cache) || - time_after(jiffies, - READ_ONCE(cache->last_add) + 300 * HZ); + !time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); spin_lock_irq(&ent->lock); if (ent->disabled) goto out; @@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) orig_sg_length = sg.length; cur_mtt = mtt; - rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap, + rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, + mr->umem->sgt_append.sgt.nents, BIT(mr->page_shift)) { if (cur_mtt == (void *)mtt + sg.length) { dma_sync_single_for_device(ddev, sg.addr, sg.length, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a77db29f8391..b2fca110346c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1906,7 +1906,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev, static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_create_qp_params *params) { - struct mlx5_ib_create_qp *ucmd = params->ucmd; struct ib_qp_init_attr *attr = params->attr; u32 uidx = params->uidx; struct mlx5_ib_resources *devr = &dev->devr; @@ -1926,8 +1925,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (!in) return -ENOMEM; - if (MLX5_CAP_GEN(mdev, ece_support) && ucmd) - MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC); @@ -1982,6 +1979,167 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, return 0; } +static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, + struct mlx5_create_qp_params *params) +{ + struct ib_qp_init_attr *init_attr = params->attr; + struct mlx5_ib_create_qp *ucmd = params->ucmd; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + struct ib_udata *udata = params->udata; + u32 uidx = params->uidx; + struct mlx5_ib_resources *devr = &dev->devr; + int inlen = MLX5_ST_SZ_BYTES(create_qp_in); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_ib_cq *send_cq; + struct mlx5_ib_cq *recv_cq; + unsigned long flags; + struct mlx5_ib_qp_base *base; + int ts_format; + int mlx5_st; + void *qpc; + u32 *in; + int err; + + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + + mlx5_st = to_mlx5_st(qp->type); + if (mlx5_st < 0) + return -EINVAL; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; + + base = &qp->trans_qp.base; + + qp->has_rq = qp_has_rq(init_attr); + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + return err; + } + + if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || + ucmd->rq_wqe_count != qp->rq.wqe_cnt) + return -EINVAL; + + if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) + return -EINVAL; + + ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + + if (ts_format < 0) + return ts_format; + + err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, + &inlen, base, ucmd); + if (err) + return err; + + if (MLX5_CAP_GEN(mdev, ece_support)) + MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + + MLX5_SET(qpc, qpc, st, mlx5_st); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); + + if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) + MLX5_SET(qpc, qpc, wq_signature, 1); + + if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) + MLX5_SET(qpc, qpc, cd_master, 1); + if (qp->flags & IB_QP_CREATE_MANAGED_SEND) + MLX5_SET(qpc, qpc, cd_slave_send, 1); + if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) + configure_requester_scat_cqe(dev, qp, init_attr, qpc); + + if (qp->rq.wqe_cnt) { + MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); + MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); + } + + if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) { + MLX5_SET(qpc, qpc, log_num_dci_stream_channels, + ucmd->dci_streams.log_num_concurent); + MLX5_SET(qpc, qpc, log_num_dci_errored_streams, + ucmd->dci_streams.log_num_errored); + } + + MLX5_SET(qpc, qpc, ts_format, ts_format); + MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); + + MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); + + /* Set default resources */ + if (init_attr->srq) { + MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, + to_msrq(init_attr->srq)->msrq.srqn); + } else { + MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, + to_msrq(devr->s1)->msrq.srqn); + } + + if (init_attr->send_cq) + MLX5_SET(qpc, qpc, cqn_snd, + to_mcq(init_attr->send_cq)->mcq.cqn); + + if (init_attr->recv_cq) + MLX5_SET(qpc, qpc, cqn_rcv, + to_mcq(init_attr->recv_cq)->mcq.cqn); + + MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); + + /* 0xffffff means we ask to work with cqe version 0 */ + if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) + MLX5_SET(qpc, qpc, user_index, uidx); + + if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { + MLX5_SET(qpc, qpc, end_padding_mode, + MLX5_WQ_END_PAD_MODE_ALIGN); + /* Special case to clean flag */ + qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; + } + + err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); + + kvfree(in); + if (err) + goto err_create; + + base->container_mibqp = qp; + base->mqp.event = mlx5_ib_qp_event; + if (MLX5_CAP_GEN(mdev, ece_support)) + params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); + + get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, + &send_cq, &recv_cq); + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); + mlx5_ib_lock_cqs(send_cq, recv_cq); + /* Maintain device to QPs access, needed for further handling via reset + * flow + */ + list_add_tail(&qp->qps_list, &dev->qp_list); + /* Maintain CQ to QPs access, needed for further handling via reset flow + */ + if (send_cq) + list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); + if (recv_cq) + list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); + mlx5_ib_unlock_cqs(send_cq, recv_cq); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); + + return 0; + +err_create: + destroy_qp(dev, qp, base, udata); + return err; +} + static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct mlx5_create_qp_params *params) @@ -2512,7 +2670,6 @@ static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd, } qp->state = IB_QPS_RESET; - rdma_restrack_no_track(&qp->ibqp.res); return 0; } @@ -2653,6 +2810,9 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); + process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM, + MLX5_CAP_GEN(mdev, log_max_dci_stream_channels), + qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, @@ -2847,6 +3007,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, switch (qp->type) { case MLX5_IB_QPT_DCT: err = create_dct(dev, pd, qp, params); + rdma_restrack_no_track(&qp->ibqp.res); + break; + case MLX5_IB_QPT_DCI: + err = create_dci(dev, pd, qp, params); break; case IB_QPT_XRC_TGT: err = create_xrc_tgt_qp(dev, qp, params); @@ -2854,6 +3018,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, case IB_QPT_GSI: err = mlx5_ib_create_gsi(pd, qp, params->attr); break; + case MLX5_IB_QPT_HW_GSI: + case MLX5_IB_QPT_REG_UMR: + rdma_restrack_no_track(&qp->ibqp.res); + fallthrough; default: if (params->udata) err = create_user_qp(dev, pd, qp, params); @@ -2942,7 +3110,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) } kfree(mqp->dct.in); - kfree(mqp); return 0; } @@ -2980,25 +3147,23 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, return ret ? 0 : -EINVAL; } -struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, - struct ib_udata *udata) +int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, + struct ib_udata *udata) { struct mlx5_create_qp_params params = {}; - struct mlx5_ib_dev *dev; - struct mlx5_ib_qp *qp; + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct ib_pd *pd = ibqp->pd; enum ib_qp_type type; int err; - dev = pd ? to_mdev(pd->device) : - to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device); - err = check_qp_type(dev, attr, &type); if (err) - return ERR_PTR(err); + return err; err = check_valid_flow(dev, pd, attr, udata); if (err) - return ERR_PTR(err); + return err; params.udata = udata; params.uidx = MLX5_IB_DEFAULT_UIDX; @@ -3008,49 +3173,43 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, if (udata) { err = process_udata_size(dev, ¶ms); if (err) - return ERR_PTR(err); + return err; err = check_ucmd_data(dev, ¶ms); if (err) - return ERR_PTR(err); + return err; params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); if (!params.ucmd) - return ERR_PTR(-ENOMEM); + return -ENOMEM; err = ib_copy_from_udata(params.ucmd, udata, params.inlen); if (err) goto free_ucmd; } - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) { - err = -ENOMEM; - goto free_ucmd; - } - mutex_init(&qp->mutex); qp->type = type; if (udata) { err = process_vendor_flags(dev, qp, params.ucmd, attr); if (err) - goto free_qp; + goto free_ucmd; err = get_qp_uidx(qp, ¶ms); if (err) - goto free_qp; + goto free_ucmd; } err = process_create_flags(dev, qp, attr); if (err) - goto free_qp; + goto free_ucmd; err = check_qp_attr(dev, qp, attr); if (err) - goto free_qp; + goto free_ucmd; err = create_qp(dev, pd, qp, ¶ms); if (err) - goto free_qp; + goto free_ucmd; kfree(params.ucmd); params.ucmd = NULL; @@ -3065,7 +3224,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, if (err) goto destroy_qp; - return &qp->ibqp; + return 0; destroy_qp: switch (qp->type) { @@ -3076,22 +3235,12 @@ destroy_qp: mlx5_ib_destroy_gsi(qp); break; default: - /* - * These lines below are temp solution till QP allocation - * will be moved to be under IB/core responsiblity. - */ - qp->ibqp.send_cq = attr->send_cq; - qp->ibqp.recv_cq = attr->recv_cq; - qp->ibqp.pd = pd; destroy_qp_common(dev, qp, udata); } - qp = NULL; -free_qp: - kfree(qp); free_ucmd: kfree(params.ucmd); - return ERR_PTR(err); + return err; } int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) @@ -3106,9 +3255,6 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) return mlx5_ib_destroy_dct(mqp); destroy_qp_common(dev, mqp, udata); - - kfree(mqp); - return 0; } diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c index c0ddf7b3c6e2..bbfcce3bdc84 100644 --- a/drivers/infiniband/hw/mlx5/std_types.c +++ b/drivers/infiniband/hw/mlx5/std_types.c @@ -114,14 +114,18 @@ out: static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num, struct mlx5_ib_uapi_query_port *info) { - struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_eswitch_rep *rep; + struct mlx5_core_dev *mdev; int err; rep = dev->port[port_num - 1].rep; if (!rep) return -EOPNOTSUPP; + mdev = mlx5_eswitch_get_core_dev(rep->esw); + if (!mdev) + return -EINVAL; + info->vport = rep->vport; info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT; @@ -138,9 +142,9 @@ static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num, if (err) return err; - if (mlx5_eswitch_vport_match_metadata_enabled(mdev->priv.eswitch)) { + if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) { info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match( - mdev->priv.eswitch, rep->vport); + rep->esw, rep->vport); info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask(); info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0; } |