diff options
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 255 |
1 files changed, 181 insertions, 74 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 03cc45a5458d..0848c2c2ffcf 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -284,7 +284,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, int index, union ib_gid *gid) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - int rc = 0; + int rc; /* Ignore port_num */ memset(gid, 0, sizeof(*gid)); @@ -565,6 +565,8 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, break; case BNXT_RE_MMAP_UC_DB: case BNXT_RE_MMAP_WC_DB: + case BNXT_RE_MMAP_DBR_BAR: + case BNXT_RE_MMAP_DBR_PAGE: ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx, &entry->rdma_entry, PAGE_SIZE); break; @@ -600,7 +602,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, &pd->qplib_pd)) - atomic_dec(&rdev->pd_count); + atomic_dec(&rdev->stats.res.pd_count); } return 0; } @@ -613,10 +615,11 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) udata, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); struct bnxt_re_user_mmap_entry *entry = NULL; + u32 active_pds; int rc = 0; pd->rdev = rdev; - if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { + if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) { ibdev_err(&rdev->ibdev, "Failed to allocate HW PD"); rc = -ENOMEM; goto fail; @@ -663,7 +666,9 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) if (bnxt_re_create_fence_mr(pd)) ibdev_warn(&rdev->ibdev, "Failed to create Fence-MR\n"); - atomic_inc(&rdev->pd_count); + active_pds = atomic_inc_return(&rdev->stats.res.pd_count); + if (active_pds > rdev->stats.res.pd_watermark) + rdev->stats.res.pd_watermark = active_pds; return 0; dbfail: @@ -679,7 +684,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_dev *rdev = ah->rdev; bool block = true; - int rc = 0; + int rc; block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); @@ -689,7 +694,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) else goto fail; } - atomic_dec(&rdev->ah_count); + atomic_dec(&rdev->stats.res.ah_count); fail: return rc; } @@ -723,6 +728,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, const struct ib_gid_attr *sgid_attr; struct bnxt_re_gid_ctx *ctx; struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); + u32 active_ahs; u8 nw_type; int rc; @@ -775,7 +781,9 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, wmb(); /* make sure cache is updated. */ spin_unlock_irqrestore(&uctx->sh_lock, flag); } - atomic_inc(&rdev->ah_count); + active_ahs = atomic_inc_return(&rdev->stats.res.ah_count); + if (active_ahs > rdev->stats.res.ah_watermark) + rdev->stats.res.ah_watermark = active_ahs; return 0; } @@ -826,7 +834,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) struct bnxt_re_qp *gsi_sqp; struct bnxt_re_ah *gsi_sah; struct bnxt_re_dev *rdev; - int rc = 0; + int rc; rdev = qp->rdev; gsi_sqp = rdev->gsi_ctx.gsi_sqp; @@ -836,7 +844,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, true); - atomic_dec(&rdev->ah_count); + atomic_dec(&rdev->stats.res.ah_count); bnxt_qplib_clean_qp(&qp->qplib_qp); ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); @@ -851,7 +859,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) mutex_lock(&rdev->qp_lock); list_del(&gsi_sqp->list); mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); + atomic_dec(&rdev->stats.res.qp_count); kfree(rdev->gsi_ctx.sqp_tbl); kfree(gsi_sah); @@ -901,7 +909,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) mutex_lock(&rdev->qp_lock); list_del(&qp->list); mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); + atomic_dec(&rdev->stats.res.qp_count); ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -1095,7 +1103,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah "Failed to allocate HW AH for Shadow QP"); goto fail; } - atomic_inc(&rdev->ah_count); + atomic_inc(&rdev->stats.res.ah_count); return ah; @@ -1163,7 +1171,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); - atomic_inc(&rdev->qp_count); + atomic_inc(&rdev->stats.res.qp_count); mutex_unlock(&rdev->qp_lock); return qp; fail: @@ -1340,8 +1348,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, qplqp->pd = &pd->qplib_pd; qplqp->qp_handle = (u64)qplqp; qplqp->max_inline_data = init_attr->cap.max_inline_data; - qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? - true : false); + qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; qptype = bnxt_re_init_qp_type(rdev, init_attr); if (qptype < 0) { rc = qptype; @@ -1446,7 +1453,7 @@ static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, { struct bnxt_re_dev *rdev; struct bnxt_qplib_qp *qplqp; - int rc = 0; + int rc; rdev = qp->rdev; qplqp = &qp->qplib_qp; @@ -1497,6 +1504,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + u32 active_qps; int rc; rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); @@ -1545,7 +1553,18 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); mutex_unlock(&rdev->qp_lock); - atomic_inc(&rdev->qp_count); + active_qps = atomic_inc_return(&rdev->stats.res.qp_count); + if (active_qps > rdev->stats.res.qp_watermark) + rdev->stats.res.qp_watermark = active_qps; + if (qp_init_attr->qp_type == IB_QPT_RC) { + active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count); + if (active_qps > rdev->stats.res.rc_qp_watermark) + rdev->stats.res.rc_qp_watermark = active_qps; + } else if (qp_init_attr->qp_type == IB_QPT_UD) { + active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count); + if (active_qps > rdev->stats.res.ud_qp_watermark) + rdev->stats.res.ud_qp_watermark = active_qps; + } return 0; qp_destroy: @@ -1648,7 +1667,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) nq = qplib_srq->cq->nq; bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); ib_umem_release(srq->umem); - atomic_dec(&rdev->srq_count); + atomic_dec(&rdev->stats.res.srq_count); if (nq) nq->budget--; return 0; @@ -1696,6 +1715,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, struct bnxt_re_srq *srq; struct bnxt_re_pd *pd; struct ib_pd *ib_pd; + u32 active_srqs; int rc, entries; ib_pd = ib_srq->pd; @@ -1760,7 +1780,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, } if (nq) nq->budget++; - atomic_inc(&rdev->srq_count); + active_srqs = atomic_inc_return(&rdev->stats.res.srq_count); + if (active_srqs > rdev->stats.res.srq_watermark) + rdev->stats.res.srq_watermark = active_srqs; spin_lock_init(&srq->lock); return 0; @@ -1862,7 +1884,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, int qp_attr_mask) { struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; - int rc = 0; + int rc; if (qp_attr_mask & IB_QP_STATE) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; @@ -2212,7 +2234,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, u8 ip_version = 0; u16 vlan_id = 0xFFFF; void *buf; - int i, rc = 0; + int i, rc; memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); @@ -2250,7 +2272,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, } is_eth = true; - is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; + is_vlan = vlan_id && (vlan_id < 0x1000); ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, ip_version, is_udp, 0, &qp->qp1_hdr); @@ -2787,7 +2809,6 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_qplib_swqe wqe; int rc = 0; - memset(&wqe, 0, sizeof(wqe)); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); @@ -2886,7 +2907,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); ib_umem_release(cq->umem); - atomic_dec(&rdev->cq_count); + atomic_dec(&rdev->stats.res.cq_count); nq->budget--; kfree(cq->cql); return 0; @@ -2902,6 +2923,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int cqe = attr->cqe; struct bnxt_qplib_nq *nq = NULL; unsigned int nq_alloc_cnt; + u32 active_cqs; if (attr->flags) return -EOPNOTSUPP; @@ -2970,7 +2992,9 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, cq->cq_period = cq->qplib_cq.period; nq->budget++; - atomic_inc(&rdev->cq_count); + active_cqs = atomic_inc_return(&rdev->stats.res.cq_count); + if (active_cqs > rdev->stats.res.cq_watermark) + rdev->stats.res.cq_watermark = active_cqs; spin_lock_init(&cq->cq_lock); if (udata) { @@ -3083,6 +3107,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) } cq->ib_cq.cqe = cq->resize_cqe; + atomic_inc(&rdev->stats.res.resize_count); return 0; @@ -3319,26 +3344,21 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, struct bnxt_re_dev *rdev = gsi_qp->rdev; struct bnxt_re_sqp_entries *sqp_entry = NULL; struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; + dma_addr_t shrq_hdr_buf_map; + struct ib_sge s_sge[2] = {}; + struct ib_sge r_sge[2] = {}; struct bnxt_re_ah *gsi_sah; + struct ib_recv_wr rwr = {}; + dma_addr_t rq_hdr_buf_map; + struct ib_ud_wr udwr = {}; struct ib_send_wr *swr; - struct ib_ud_wr udwr; - struct ib_recv_wr rwr; + u32 skip_bytes = 0; int pkt_type = 0; - u32 tbl_idx; void *rq_hdr_buf; - dma_addr_t rq_hdr_buf_map; - dma_addr_t shrq_hdr_buf_map; u32 offset = 0; - u32 skip_bytes = 0; - struct ib_sge s_sge[2]; - struct ib_sge r_sge[2]; + u32 tbl_idx; int rc; - memset(&udwr, 0, sizeof(udwr)); - memset(&rwr, 0, sizeof(rwr)); - memset(&s_sge, 0, sizeof(s_sge)); - memset(&r_sge, 0, sizeof(r_sge)); - swr = &udwr.wr; tbl_idx = cqe->wr_id; @@ -3578,7 +3598,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp) { struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; unsigned long flags; - int rc = 0; + int rc; spin_lock_irqsave(&qp->sq_lock, flags); @@ -3768,6 +3788,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mr *mr; + u32 active_mrs; int rc; mr = kzalloc(sizeof(*mr), GFP_KERNEL); @@ -3795,7 +3816,9 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC)) mr->ib_mr.rkey = mr->ib_mr.lkey; - atomic_inc(&rdev->mr_count); + active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); + if (active_mrs > rdev->stats.res.mr_watermark) + rdev->stats.res.mr_watermark = active_mrs; return &mr->ib_mr; @@ -3828,7 +3851,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) ib_umem_release(mr->ib_umem); kfree(mr); - atomic_dec(&rdev->mr_count); + atomic_dec(&rdev->stats.res.mr_count); return rc; } @@ -3858,6 +3881,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mr *mr = NULL; + u32 active_mrs; int rc; if (type != IB_MR_TYPE_MEM_REG) { @@ -3896,7 +3920,9 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, goto fail_mr; } - atomic_inc(&rdev->mr_count); + active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); + if (active_mrs > rdev->stats.res.mr_watermark) + rdev->stats.res.mr_watermark = active_mrs; return &mr->ib_mr; fail_mr: @@ -3914,6 +3940,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mw *mw; + u32 active_mws; int rc; mw = kzalloc(sizeof(*mw), GFP_KERNEL); @@ -3932,7 +3959,9 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, } mw->ib_mw.rkey = mw->qplib_mw.rkey; - atomic_inc(&rdev->mw_count); + active_mws = atomic_inc_return(&rdev->stats.res.mw_count); + if (active_mws > rdev->stats.res.mw_watermark) + rdev->stats.res.mw_watermark = active_mws; return &mw->ib_mw; fail: @@ -3953,21 +3982,19 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) } kfree(mw); - atomic_dec(&rdev->mw_count); + atomic_dec(&rdev->stats.res.mw_count); return rc; } -/* uverbs */ -struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, - u64 virt_addr, int mr_access_flags, - struct ib_udata *udata) +static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr, + int mr_access_flags, struct ib_umem *umem) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_re_mr *mr; - struct ib_umem *umem; unsigned long page_size; + struct bnxt_re_mr *mr; int umem_pgs, rc; + u32 active_mrs; if (length > BNXT_RE_MAX_MR_SIZE) { ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n", @@ -3975,6 +4002,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, return ERR_PTR(-ENOMEM); } + page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr); + if (!page_size) { + ibdev_err(&rdev->ibdev, "umem page size unsupported!"); + return ERR_PTR(-EINVAL); + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -3986,45 +4019,33 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { - ibdev_err(&rdev->ibdev, "Failed to allocate MR"); + ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); + rc = -EIO; goto free_mr; } /* The fixed portion of the rkey is the same as the lkey */ mr->ib_mr.rkey = mr->qplib_mr.rkey; - - umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); - if (IS_ERR(umem)) { - ibdev_err(&rdev->ibdev, "Failed to get umem"); - rc = -EFAULT; - goto free_mrw; - } mr->ib_umem = umem; - mr->qplib_mr.va = virt_addr; - page_size = ib_umem_find_best_pgsz( - umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr); - if (!page_size) { - ibdev_err(&rdev->ibdev, "umem page size unsupported!"); - rc = -EFAULT; - goto free_umem; - } mr->qplib_mr.total_size = length; umem_pgs = ib_umem_num_dma_blocks(umem, page_size); rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem, umem_pgs, page_size); if (rc) { - ibdev_err(&rdev->ibdev, "Failed to register user MR"); - goto free_umem; + ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc); + rc = -EIO; + goto free_mrw; } mr->ib_mr.lkey = mr->qplib_mr.lkey; mr->ib_mr.rkey = mr->qplib_mr.lkey; - atomic_inc(&rdev->mr_count); + active_mrs = atomic_inc_return(&rdev->stats.res.mr_count); + if (active_mrs > rdev->stats.res.mr_watermark) + rdev->stats.res.mr_watermark = active_mrs; return &mr->ib_mr; -free_umem: - ib_umem_release(umem); + free_mrw: bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); free_mr: @@ -4032,6 +4053,48 @@ free_mr: return ERR_PTR(rc); } +struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct ib_umem *umem; + struct ib_mr *ib_mr; + + umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); + if (IS_ERR(umem)) + return ERR_CAST(umem); + + ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); + if (IS_ERR(ib_mr)) + ib_umem_release(umem); + return ib_mr; +} + +struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, + u64 length, u64 virt_addr, int fd, + int mr_access_flags, struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct ib_umem_dmabuf *umem_dmabuf; + struct ib_umem *umem; + struct ib_mr *ib_mr; + + umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length, + fd, mr_access_flags); + if (IS_ERR(umem_dmabuf)) + return ERR_CAST(umem_dmabuf); + + umem = &umem_dmabuf->umem; + + ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); + if (IS_ERR(ib_mr)) + ib_umem_release(umem); + return ib_mr; +} + int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) { struct ib_device *ibdev = ctx->device; @@ -4087,6 +4150,8 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) goto cfail; } uctx->shpage_mmap = &entry->rdma_entry; + if (rdev->pacing.dbr_pacing) + resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED; rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (rc) { @@ -4159,6 +4224,19 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) case BNXT_RE_MMAP_SH_PAGE: ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg)); break; + case BNXT_RE_MMAP_DBR_BAR: + pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; + ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), + rdma_entry); + break; + case BNXT_RE_MMAP_DBR_PAGE: + /* Driver doesn't expect write access for user space */ + if (vma->vm_flags & VM_WRITE) + return -EFAULT; + ret = vm_insert_page(vma, vma->vm_start, + virt_to_page((void *)bnxt_entry->mem_offset)); + break; default: ret = -EINVAL; break; @@ -4178,6 +4256,15 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) kfree(bnxt_entry); } +static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs) +{ + struct bnxt_re_ucontext *uctx; + + uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); + bnxt_re_pacing_alert(uctx->rdev); + return 0; +} + static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); @@ -4190,7 +4277,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle * u64 mmap_offset; u32 length; u32 dpi; - u64 dbr; + u64 addr; int err; uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); @@ -4212,19 +4299,30 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle * return -ENOMEM; length = PAGE_SIZE; dpi = uctx->wcdpi.dpi; - dbr = (u64)uctx->wcdpi.umdbr; + addr = (u64)uctx->wcdpi.umdbr; mmap_flag = BNXT_RE_MMAP_WC_DB; } else { return -EINVAL; } break; + case BNXT_RE_ALLOC_DBR_BAR_PAGE: + length = PAGE_SIZE; + addr = (u64)rdev->pacing.dbr_bar_addr; + mmap_flag = BNXT_RE_MMAP_DBR_BAR; + break; + + case BNXT_RE_ALLOC_DBR_PAGE: + length = PAGE_SIZE; + addr = (u64)rdev->pacing.dbr_page; + mmap_flag = BNXT_RE_MMAP_DBR_PAGE; + break; default: return -EOPNOTSUPP; } - entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset); + entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset); if (!entry) return -ENOMEM; @@ -4264,6 +4362,9 @@ static int alloc_page_obj_cleanup(struct ib_uobject *uobject, uctx->wcdpi.dbr = NULL; } break; + case BNXT_RE_MMAP_DBR_BAR: + case BNXT_RE_MMAP_DBR_PAGE: + break; default: goto exit; } @@ -4301,7 +4402,13 @@ DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); +DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV); + +DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV, + &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV)); + const struct uapi_definition bnxt_re_uapi_defs[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV), {} }; |