diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/counters.c | 42 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/devx.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/dm.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/doorbell.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/fs.c | 20 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 20 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 177 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qpc.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/wr.c | 14 |
14 files changed, 228 insertions, 147 deletions
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index e365341057cb..224ba36f2946 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -161,22 +161,29 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num) return cnts->set_id; } -static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, - u32 port_num) +static struct rdma_hw_stats * +mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - const struct mlx5_ib_counters *cnts; - bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); + const struct mlx5_ib_counters *cnts = &dev->port[0].cnts; - if ((is_switchdev && port_num) || (!is_switchdev && !port_num)) - return NULL; + return rdma_alloc_hw_stats_struct(cnts->names, + cnts->num_q_counters + + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} - cnts = get_counters(dev, port_num - 1); +static struct rdma_hw_stats * +mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; return rdma_alloc_hw_stats_struct(cnts->names, cnts->num_q_counters + - cnts->num_cong_counters + - cnts->num_ext_ppcnt_counters, + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -666,7 +673,17 @@ void mlx5_ib_counters_clear_description(struct ib_counters *counters) } static const struct ib_device_ops hw_stats_ops = { - .alloc_hw_stats = mlx5_ib_alloc_hw_stats, + .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats, + .get_hw_stats = mlx5_ib_get_hw_stats, + .counter_bind_qp = mlx5_ib_counter_bind_qp, + .counter_unbind_qp = mlx5_ib_counter_unbind_qp, + .counter_dealloc = mlx5_ib_counter_dealloc, + .counter_alloc_stats = mlx5_ib_counter_alloc_stats, + .counter_update_stats = mlx5_ib_counter_update_stats, +}; + +static const struct ib_device_ops hw_switchdev_stats_ops = { + .alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats, .get_hw_stats = mlx5_ib_get_hw_stats, .counter_bind_qp = mlx5_ib_counter_bind_qp, .counter_unbind_qp = mlx5_ib_counter_unbind_qp, @@ -690,7 +707,10 @@ int mlx5_ib_counters_init(struct mlx5_ib_dev *dev) if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) return 0; - ib_set_device_ops(&dev->ib_dev, &hw_stats_ops); + if (is_mdev_switchdev_mode(dev->mdev)) + ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops); + else + ib_set_device_ops(&dev->ib_dev, &hw_stats_ops); return mlx5_ib_alloc_counters(dev); } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index eb92cefffd77..b8e5e371bb19 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -227,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; wc->wc_flags |= g ? IB_WC_GRH : 0; - if (unlikely(is_qp1(qp->ibqp.qp_type))) { + if (is_qp1(qp->type)) { u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff; ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, @@ -725,7 +725,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, return -EFAULT; if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | - MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX))) + MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX | + MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS))) return -EINVAL; if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || @@ -750,7 +751,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, goto err_umem; } - err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); + err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); if (err) goto err_umem; @@ -826,6 +827,9 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; } + if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS) + cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS; + MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid); return 0; @@ -849,15 +853,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) ib_umem_release(cq->buf.umem); } -static void init_cq_frag_buf(struct mlx5_ib_cq *cq, - struct mlx5_ib_cq_buf *buf) +static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64; for (i = 0; i < buf->nent; i++) { - cqe = get_cqe(cq, i); + cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; cqe64->op_own = MLX5_CQE_INVALID << 4; } @@ -883,7 +886,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (err) goto err_db; - init_cq_frag_buf(cq, &cq->buf); + init_cq_frag_buf(&cq->buf); *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * @@ -942,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, u32 *cqb = NULL; void *cqc; int cqe_size; - unsigned int irqn; int eqn; int err; @@ -981,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } - err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, vector, &eqn); if (err) goto err_cqb; @@ -1004,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_cqb; mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); - cq->mcq.irqn = irqn; if (udata) cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; else @@ -1184,7 +1185,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (err) goto ex; - init_cq_frag_buf(cq, cq->resize_buf); + init_cq_frag_buf(cq->resize_buf); return 0; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index a0b677accd96..c869b2a91a28 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, case UVERBS_OBJECT_QP: { struct mlx5_ib_qp *qp = to_mqp(uobj->object); - enum ib_qp_type qp_type = qp->ibqp.qp_type; - if (qp_type == IB_QPT_RAW_PACKET || + if (qp->type == IB_QPT_RAW_PACKET || (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, sq->tisn) == obj_id); } - if (qp_type == MLX5_IB_QPT_DCT) + if (qp->type == MLX5_IB_QPT_DCT) return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, qp->dct.mdct.mqp.qpn) == obj_id; - return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, qp->ibqp.qp_num) == obj_id; } @@ -977,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( struct mlx5_ib_dev *dev; int user_vector; int dev_eqn; - unsigned int irqn; int err; if (uverbs_copy_from(&user_vector, attrs, @@ -989,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( return PTR_ERR(c); dev = to_mdev(c->ibucontext.device); - err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); + err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); if (err < 0) return err; diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c index 094bf85589db..001d766cf291 100644 --- a/drivers/infiniband/hw/mlx5/dm.c +++ b/drivers/infiniband/hw/mlx5/dm.c @@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)( if (err) return err; + if (op >= BITS_PER_TYPE(u32)) + return -EOPNOTSUPP; + if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op))) return -EOPNOTSUPP; diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 61475b571531..9ca2e61807ec 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -41,10 +41,10 @@ struct mlx5_ib_user_db_page { struct ib_umem *umem; unsigned long user_virt; int refcnt; + struct mm_struct *mm; }; -int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, struct mlx5_db *db) { struct mlx5_ib_user_db_page *page; @@ -53,7 +53,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, mutex_lock(&context->db_page_mutex); list_for_each_entry(page, &context->db_page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if ((current->mm == page->mm) && + (page->user_virt == (virt & PAGE_MASK))) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -71,6 +72,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, kfree(page); goto out; } + mmgrab(current->mm); + page->mm = current->mm; list_add(&page->list, &context->db_page_list); @@ -91,6 +94,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) if (!--db->u.user_page->refcnt) { list_del(&db->u.user_page->list); + mmdrop(db->u.user_page->mm); ib_umem_release(db->u.user_page->umem); kfree(db->u.user_page); } diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 2fc6a60c4e77..5fbc0a8454b9 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, goto free_ucmd; } - if (flow_attr->port > dev->num_ports || - (flow_attr->flags & - ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) { + if (flow_attr->flags & + ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) { err = -EINVAL; goto free_ucmd; } @@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( if (err) goto end; + if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB && + mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) { + err = -EINVAL; + goto end; + } + uobj->object = obj; obj->mdev = dev->mdev; atomic_set(&obj->usecnt, 0); @@ -2280,6 +2285,7 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx( u8 ft_type, u8 dv_prt, void *in, size_t len) { + struct mlx5_pkt_reformat_params reformat_params; enum mlx5_flow_namespace_type namespace; u8 prm_prt; int ret; @@ -2292,9 +2298,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx( if (ret) return ret; + memset(&reformat_params, 0, sizeof(reformat_params)); + reformat_params.type = prm_prt; + reformat_params.size = len; + reformat_params.data = in; maction->flow_action_raw.pkt_reformat = - mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, - in, namespace); + mlx5_packet_reformat_alloc(dev->mdev, &reformat_params, + namespace); if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); return ret; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6d1dd09a4388..094c976b1eed 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1816,7 +1816,17 @@ static int set_ucontext_resp(struct ib_ucontext *uctx, if (MLX5_CAP_GEN(dev->mdev, ece_support)) resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE; + if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) && + rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) && + rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format))) + resp->comp_mask |= + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS; + resp->num_dyn_bfregs = bfregi->num_dyn_bfregs; + + if (MLX5_CAP_GEN(dev->mdev, drain_sigerr)) + resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS; + return 0; } @@ -3178,8 +3188,6 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, port->mp.mpi = NULL; - list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); - spin_unlock(&port->mp.mpi_lock); err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); @@ -3327,7 +3335,10 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) } else { mlx5_ib_dbg(dev, "unbinding port_num: %u\n", i + 1); - mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); + list_add_tail(&dev->port[i].mp.mpi->list, + &mlx5_ib_unaffiliated_port_list); + mlx5_ib_unbind_slave_port(dev, + dev->port[i].mp.mpi); } } } @@ -3738,6 +3749,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .disassociate_ucontext = mlx5_ib_disassociate_ucontext, .drain_rq = mlx5_ib_drain_rq, .drain_sq = mlx5_ib_drain_sq, + .device_group = &mlx5_attr_group, .enable_driver = mlx5_ib_enable_driver, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mlx5_ib_get_dma_mr, @@ -4025,7 +4037,6 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) { const char *name; - rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); if (!mlx5_lag_is_roce(dev->mdev)) name = "mlx5_%d"; else @@ -4419,6 +4430,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev, if (bound) { rdma_roce_rescan_device(&dev->ib_dev); + mpi->ibdev->ib_active = true; break; } } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e9a3f34a30b8..585fb00bdce8 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -512,7 +512,6 @@ struct mlx5_ib_qp { /* * IB/core doesn't store low-level QP types, so * store both MLX and IBTA types in the field below. - * IB_QPT_DRIVER will be break to DCI/DCT subtypes. */ enum ib_qp_type type; /* A flag to indicate if there's a new counter is configured @@ -550,6 +549,7 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) enum mlx5_ib_cq_pr_flags { MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, + MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1, }; struct mlx5_ib_cq { @@ -1198,8 +1198,7 @@ to_mmmap(struct rdma_user_mmap_entry *rdma_entry) struct mlx5_user_mmap_entry, rdma_entry); } -int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, - struct ib_udata *udata, unsigned long virt, +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, struct mlx5_db *db); void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); @@ -1265,7 +1264,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, int page_shift, int flags); int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags); struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, - struct ib_udata *udata, int access_flags); void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr); @@ -1614,4 +1612,10 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); } + +static inline bool rt_supported(int ts_cap) +{ + return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME || + ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; +} #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 4388afeff251..3f1c5a4f158b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -68,6 +68,7 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, struct ib_pd *pd) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev); MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); @@ -77,10 +78,10 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) MLX5_SET(mkc, mkc, relaxed_ordering_write, - !!(acc & IB_ACCESS_RELAXED_ORDERING)); + (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled); if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) MLX5_SET(mkc, mkc, relaxed_ordering_read, - !!(acc & IB_ACCESS_RELAXED_ORDERING)); + (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled); MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, qpn, 0xffffff); @@ -530,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) */ spin_unlock_irq(&ent->lock); need_delay = need_resched() || someone_adding(cache) || - time_after(jiffies, - READ_ONCE(cache->last_add) + 300 * HZ); + !time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); spin_lock_irq(&ent->lock); if (ent->disabled) goto out; @@ -743,10 +744,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / MLX5_IB_UMR_OCTOWORD; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; - if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && + if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && !dev->is_rep && mlx5_core_is_pf(dev->mdev) && mlx5_ib_can_load_pas_with_umr(dev, 0)) - ent->limit = dev->mdev->profile->mr_cache[i].limit; + ent->limit = dev->mdev->profile.mr_cache[i].limit; else ent->limit = 0; spin_lock_irq(&ent->lock); @@ -811,7 +812,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, length64, 1); - set_mkc_access_pd_addr_fields(mkc, acc, 0, pd); + set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, + pd); err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) @@ -1510,7 +1512,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) return ERR_PTR(-EINVAL); - mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags); + mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); if (IS_ERR(mr)) return ERR_CAST(mr); return &mr->ibmr; @@ -1940,8 +1942,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) mlx5r_deref_wait_odp_mkey(&mr->mmkey); if (ibmr->type == IB_MR_TYPE_INTEGRITY) { - xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr, - NULL, GFP_KERNEL); + xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), + mr->sig, NULL, GFP_KERNEL); if (mr->mtt_mr) { rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); @@ -2010,7 +2012,7 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); /* This is only used from the kernel, so setting the PD is OK. */ - set_mkc_access_pd_addr_fields(mkc, 0, 0, pd); + set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, translations_octword_size, ndescs); MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 782b2af8f211..d0d98e584ebc 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -418,7 +418,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, if (IS_ERR(odp)) return ERR_CAST(odp); - ret = mr = mlx5_mr_cache_alloc( + mr = mlx5_mr_cache_alloc( mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags); if (IS_ERR(mr)) { ib_umem_odp_release(odp); @@ -478,7 +478,6 @@ out_mr: } struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, - struct ib_udata *udata, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); @@ -1096,7 +1095,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & MLX5_WQE_CTRL_OPCODE_MASK; - if (qp->ibqp.qp_type == IB_QPT_XRC_INI) + if (qp->type == IB_QPT_XRC_INI) *wqe += sizeof(struct mlx5_wqe_xrc_seg); if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) { @@ -1559,12 +1558,16 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) } eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; - param = (struct mlx5_eq_param){ - .irq_index = 0, + param = (struct mlx5_eq_param) { .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; + if (!zalloc_cpumask_var(¶m.affinity, GFP_KERNEL)) { + err = -ENOMEM; + goto err_wq; + } eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); + free_cpumask_var(param.affinity); if (IS_ERR(eq->core)) { err = PTR_ERR(eq->core); goto err_wq; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 9282eb10bfae..a77db29f8391 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -835,7 +835,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas, offset); - err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db); + err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db); if (err) { mlx5_ib_dbg(dev, "map failed\n"); goto err_umem; @@ -961,7 +961,7 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, resp->bfreg_index = MLX5_IB_INVALID_BFREG; qp->bfregn = bfregn; - err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db); + err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); if (err) { mlx5_ib_dbg(dev, "map failed\n"); goto err_free; @@ -1173,69 +1173,79 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq) sq->flow_rule = NULL; } -static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) +static bool fr_supported(int ts_cap) { - bool fr_supported = - MLX5_CAP_GEN(dev->mdev, rq_ts_format) == - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || - MLX5_CAP_GEN(dev->mdev, rq_ts_format) == - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; + return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || + ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; +} - if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) { - if (!fr_supported) { - mlx5_ib_dbg(dev, "Free running TS format is not supported\n"); +static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + bool fr_sup, bool rt_sup) +{ + if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) { + if (!rt_sup) { + mlx5_ib_dbg(dev, + "Real time TS format is not supported\n"); return -EOPNOTSUPP; } - return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; + return MLX5_TIMESTAMP_FORMAT_REAL_TIME; } - return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; + if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) { + if (!fr_sup) { + mlx5_ib_dbg(dev, + "Free running TS format is not supported\n"); + return -EOPNOTSUPP; + } + return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + } + return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_TIMESTAMP_FORMAT_DEFAULT; +} + +static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq) +{ + u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format); + + return get_ts_format(dev, recv_cq, fr_supported(ts_cap), + rt_supported(ts_cap)); } static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) { - bool fr_supported = - MLX5_CAP_GEN(dev->mdev, sq_ts_format) == - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || - MLX5_CAP_GEN(dev->mdev, sq_ts_format) == - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; + u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format); - if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) { - if (!fr_supported) { - mlx5_ib_dbg(dev, "Free running TS format is not supported\n"); - return -EOPNOTSUPP; - } - return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; - } - return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; + return get_ts_format(dev, send_cq, fr_supported(ts_cap), + rt_supported(ts_cap)); } static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) { - bool fr_supported = - MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || - MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; - int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; - - if (recv_cq && - recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) - ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING; - - if (send_cq && - send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) - ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING; - - if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING && - !fr_supported) { - mlx5_ib_dbg(dev, "Free running TS format is not supported\n"); + u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format); + bool fr_sup = fr_supported(ts_cap); + bool rt_sup = rt_supported(ts_cap); + u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_TIMESTAMP_FORMAT_DEFAULT; + int send_ts_format = + send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : + default_ts; + int recv_ts_format = + recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) : + default_ts; + + if (send_ts_format < 0 || recv_ts_format < 0) + return -EOPNOTSUPP; + + if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && + recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && + send_ts_format != recv_ts_format) { + mlx5_ib_dbg( + dev, + "The send ts_format does not match the receive ts_format\n"); return -EOPNOTSUPP; } - return ts_format; + + return send_ts_format == default_ts ? recv_ts_format : send_ts_format; } static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, @@ -3089,7 +3099,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); - if (unlikely(qp->qp_type == IB_QPT_GSI)) + if (mqp->type == IB_QPT_GSI) return mlx5_ib_destroy_gsi(mqp); if (mqp->type == MLX5_IB_QPT_DCT) @@ -3128,7 +3138,7 @@ static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp, if (access_flags & IB_ACCESS_REMOTE_ATOMIC) { int atomic_mode; - atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); + atomic_mode = get_atomic_mode(dev, qp->type); if (atomic_mode < 0) return -EOPNOTSUPP; @@ -3300,10 +3310,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32), ah->roce.dmac); - if ((qp->ibqp.qp_type == IB_QPT_RC || - qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_XRC_INI || - qp->ibqp.qp_type == IB_QPT_XRC_TGT) && + if ((qp->type == IB_QPT_RC || + qp->type == IB_QPT_UC || + qp->type == IB_QPT_XRC_INI || + qp->type == IB_QPT_XRC_TGT) && (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) && (attr_mask & IB_QP_DEST_QPN)) mlx5_set_path_udp_sport(path, ah, @@ -3342,7 +3352,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, MLX5_SET(ads, path, ack_timeout, alt ? attr->alt_timeout : attr->timeout); - if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) + if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) return modify_raw_packet_eth_prio(dev->mdev, &qp->raw_packet_qp.sq, sl & 0xf, qp->ibqp.pd); @@ -3453,6 +3463,17 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q MLX5_QP_OPTPAR_RRE, }, }, + [MLX5_QP_STATE_SQD] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RRE, + }, + }, }; static int ib_nr_to_mlx5_nr(int ib_mask) @@ -3848,6 +3869,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, [MLX5_QP_STATE_SQD] = { [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP, }, [MLX5_QP_STATE_SQER] = { [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, @@ -3910,12 +3932,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity)) optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF; - if (is_sqp(ibqp->qp_type)) { + if (is_sqp(qp->type)) { MLX5_SET(qpc, qpc, mtu, IB_MTU_256); MLX5_SET(qpc, qpc, log_msg_max, 8); - } else if ((ibqp->qp_type == IB_QPT_UD && + } else if ((qp->type == IB_QPT_UD && !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) || - ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { + qp->type == MLX5_IB_QPT_REG_UMR) { MLX5_SET(qpc, qpc, mtu, IB_MTU_4096); MLX5_SET(qpc, qpc, log_msg_max, 12); } else if (attr_mask & IB_QP_PATH_MTU) { @@ -3941,7 +3963,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, /* todo implement counter_index functionality */ - if (is_sqp(ibqp->qp_type)) + if (is_sqp(qp->type)) MLX5_SET(ads, pri_path, vhca_port_num, qp->port); if (attr_mask & IB_QP_PORT) @@ -3969,7 +3991,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; } - get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, + get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, &recv_cq); MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); @@ -4048,7 +4070,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, optpar |= ib_mask_to_mlx5_opt(attr_mask); optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + if (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) { struct mlx5_modify_raw_qp_param raw_qp_param = {}; @@ -4121,7 +4143,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && - !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) { + !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) { mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq) : NULL); if (send_cq != recv_cq) @@ -4314,13 +4336,12 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, } static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, - struct mlx5_ib_qp *qp, - enum ib_qp_type qp_type) + struct mlx5_ib_qp *qp) { if (dev->profile != &raw_eth_profile) return true; - if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR) + if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR) return true; /* Internal QP used for wc testing, with NOPs in wq */ @@ -4341,7 +4362,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state cur_state, new_state; int err = -EINVAL; - if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type)) + if (!mlx5_ib_modify_qp_allowed(dev, qp)) return -EOPNOTSUPP; if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) @@ -4370,11 +4391,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } - if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + if (qp->type == IB_QPT_GSI) return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); - qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI : - qp->type; + qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type; if (qp_type == MLX5_IB_QPT_DCT) return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata); @@ -4395,7 +4415,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask)) { mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", - cur_state, new_state, ibqp->qp_type, attr_mask); + cur_state, new_state, qp->type, attr_mask); goto out; } else if (qp_type == MLX5_IB_QPT_DCI && !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) { @@ -4668,9 +4688,8 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path); - if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_XRC_INI || - qp->ibqp.qp_type == IB_QPT_XRC_TGT) { + if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC || + qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) { to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path); to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path); qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index); @@ -4763,7 +4782,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, if (ibqp->rwq_ind_tbl) return -ENOSYS; - if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + if (qp->type == IB_QPT_GSI) return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); @@ -4777,7 +4796,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, mutex_lock(&qp->mutex); - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + if (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) { err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); if (err) @@ -4804,7 +4823,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->cap.max_send_sge = 0; } - qp_init_attr->qp_type = ibqp->qp_type; + qp_init_attr->qp_type = qp->type; qp_init_attr->recv_cq = ibqp->recv_cq; qp_init_attr->send_cq = ibqp->send_cq; qp_init_attr->srq = ibqp->srq; @@ -5309,10 +5328,8 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); - curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ? - wq_attr->curr_wq_state : wq->state; - wq_state = (wq_attr_mask & IB_WQ_STATE) ? - wq_attr->wq_state : curr_wq_state; + curr_wq_state = wq_attr->curr_wq_state; + wq_state = wq_attr->wq_state; if (curr_wq_state == IB_WQS_ERR) curr_wq_state = MLX5_RQC_STATE_ERR; if (wq_state == IB_WQS_ERR) diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index c683d7000168..8844eacf2380 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -441,6 +441,12 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn, opt_param_mask, qpc, uid); break; + case MLX5_CMD_OP_SQD_RTS_QP: + if (MBOX_ALLOC(mbox, sqd2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc, uid); + break; case MLX5_CMD_OP_INIT2INIT_QP: if (MBOX_ALLOC(mbox, init2init_qp)) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index fab6736e4d6a..191c4ee7db62 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -84,7 +84,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, } in->umem = srq->umem; - err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db); + err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db); if (err) { mlx5_ib_dbg(dev, "map doorbell failed\n"); goto err_umem; diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index cf2852cba45c..8841620af82f 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -866,7 +866,10 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; u8 flags = 0; - /* Matches access in mlx5_set_umr_free_mkey() */ + /* Matches access in mlx5_set_umr_free_mkey(). + * Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and + * kernel ULPs are not aware of it, so we don't set it here. + */ if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) { mlx5_ib_warn( to_mdev(qp->ibqp.device), @@ -1278,7 +1281,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_core_dev *mdev = dev->mdev; - struct mlx5_ib_qp *qp; + struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_wqe_xrc_seg *xrc; struct mlx5_bf *bf; void *cur_edge; @@ -1299,10 +1302,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, return -EIO; } - if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + if (qp->type == IB_QPT_GSI) return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); - qp = to_mqp(ibqp); bf = &qp->bf; spin_lock_irqsave(&qp->sq.lock, flags); @@ -1347,7 +1349,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, } } - switch (ibqp->qp_type) { + switch (qp->type) { case IB_QPT_XRC_INI: xrc = seg; seg += sizeof(*xrc); @@ -1476,7 +1478,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, return -EIO; } - if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + if (qp->type == IB_QPT_GSI) return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); spin_lock_irqsave(&qp->rq.lock, flags); |