diff options
Diffstat (limited to 'drivers/infiniband/hw')
32 files changed, 216 insertions, 362 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 29cc0d14399a..3224f18a66e5 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + if (index > 0) + return -EINVAL; - /* Ignore port_num */ + *pkey = IB_DEFAULT_PKEY_FULL; - memset(pkey, 0, sizeof(*pkey)); - return bnxt_qplib_get_pkey(&rdev->qplib_res, - &rdev->qplib_res.pkey_tbl, index, pkey); + return 0; } int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b44944fb9b24..3d6834d3d4fb 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, qplib_srq); struct ib_event ib_event; - int rc = 0; ib_event.device = &srq->rdev->ibdev; ib_event.element.srq = &srq->ib_srq; @@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, (*srq->ib_srq.event_handler)(&ib_event, srq->ib_srq.srq_context); } - return rc; + return 0; } static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ca88849559bf..96e581ced50e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -46,6 +46,7 @@ #include <linux/delay.h> #include <linux/prefetch.h> #include <linux/if_ether.h> +#include <rdma/ib_mad.h> #include "roce_hsi.h" @@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; struct creq_modify_qp_resp resp; - u16 cmd_flags = 0, pkey; + u16 cmd_flags = 0; u32 temp32[4]; u32 bmask; int rc; @@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) req.access = qp->access; - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { - if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, - qp->pkey_index, &pkey)) - req.pkey = cpu_to_le16(pkey); - } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) req.qkey = cpu_to_le32(qp->qkey); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 3de854727460..061b2895dd9b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -555,7 +555,7 @@ skip_ctx_setup: void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - kfree(rcfw->cmdq.cmdq_bitmap); + bitmap_free(rcfw->cmdq.cmdq_bitmap); kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); @@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_sg_info sginfo = {}; struct bnxt_qplib_cmdq_ctx *cmdq; struct bnxt_qplib_creq_ctx *creq; - u32 bmap_size = 0; rcfw->pdev = res->pdev; cmdq = &rcfw->cmdq; @@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!rcfw->crsqe_tbl) goto fail; - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); - cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); + cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 82faa4e4cda8..0a3d8e7da3d4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bc1ba4b51ba4..126d4f26f75a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } -static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - if (!pkey_tbl->tbl) - dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); - else - kfree(pkey_tbl->tbl); - - pkey_tbl->tbl = NULL; - pkey_tbl->max = 0; - pkey_tbl->active = 0; -} - -static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, - u16 max) -{ - pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); - if (!pkey_tbl->tbl) - return -ENOMEM; - - pkey_tbl->max = max; - return 0; -}; - /* PDs */ int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) { @@ -843,24 +818,6 @@ unmap_io: return -ENOMEM; } -/* PKEYs */ -static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - pkey_tbl->active = 0; -} - -static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - u16 pkey = 0xFFFF; - - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - - /* pkey default = 0xFFFF */ - bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); -} - /* Stats */ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats) @@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) { - bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); } int bnxt_qplib_init_res(struct bnxt_qplib_res *res) { bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); - bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); return 0; } void bnxt_qplib_free_res(struct bnxt_qplib_res *res) { - bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); @@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, if (rc) goto fail; - rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); - if (rc) - goto fail; - rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); if (rc) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index e1411a2352a7..982e2c96dac2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl { u8 *vlan; }; -struct bnxt_qplib_pkey_tbl { - u16 *tbl; - u16 max; - u16 active; -}; - struct bnxt_qplib_dpi { u32 dpi; void __iomem *dbr; @@ -258,7 +252,6 @@ struct bnxt_qplib_res { struct bnxt_qplib_rcfw *rcfw; struct bnxt_qplib_pd_tbl pd_tbl; struct bnxt_qplib_sgid_tbl sgid_tbl; - struct bnxt_qplib_pkey_tbl pkey_tbl; struct bnxt_qplib_dpi_tbl dpi_tbl; bool prio; bool is_vf; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 379e715ebd30..b802981b7171 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_srq = le16_to_cpu(sb->max_srq); attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; attr->max_srq_sges = sb->max_srq_sge; - attr->max_pkey = le32_to_cpu(sb->max_pkeys); - /* - * Some versions of FW reports more than 0xFFFF. - * Restrict it for now to 0xFFFF to avoid - * reporting trucated value - */ - if (attr->max_pkey > 0xFFFF) { - /* ib_port_attr::pkey_tbl_len is u16 */ - attr->max_pkey = 0xFFFF; - } - + attr->max_pkey = 1; attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); @@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return rc; } -/* pkeys */ -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey) -{ - if (index == 0xFFFF) { - *pkey = 0xFFFF; - return 0; - } - if (index >= pkey_tbl->max) { - dev_err(&res->pdev->dev, - "Index %d exceeded PKEY table max (%d)\n", - index, pkey_tbl->max); - return -EINVAL; - } - memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); - return 0; -} - -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (!pkey_tbl->active) { - dev_err(&res->pdev->dev, "PKEY table has no active entries\n"); - return -ENOMEM; - } - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - break; - } - if (i == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY 0x%04x not found in the pkey table\n", *pkey); - return -ENOMEM; - } - memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); - pkey_tbl->active--; - - /* unlock */ - return rc; -} - -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, free_idx, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (pkey_tbl->active == pkey_tbl->max) { - dev_err(&res->pdev->dev, "PKEY table is full\n"); - return -ENOMEM; - } - free_idx = pkey_tbl->max; - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - return -EALREADY; - else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) - free_idx = i; - } - if (free_idx == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY table is FULL but count is not MAX??\n"); - return -ENOMEM; - } - /* Add PKEY to the pkey_tbl */ - memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); - pkey_tbl->active++; - - /* unlock */ - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index a18f568cb23e..5939e8fc8353 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey); -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr, bool vf); int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c index 724d23297b35..f64e7e02b129 100644 --- a/drivers/infiniband/hw/cxgb4/id_table.c +++ b/drivers/infiniband/hw/cxgb4/id_table.c @@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; @@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) obj -= alloc->start; spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); spin_unlock_irqrestore(&alloc->lock, flags); } int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { - int i; - alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = prandom_u32() % RANDOM_SKIP; else alloc->last = 0; - alloc->max = num; + alloc->max = num; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void c4iw_id_table_free(struct c4iw_id_table *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 0c8fd5a85fcb..89f36a3a9af0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -41,6 +41,7 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> +#include <net/addrconf.h> #include <linux/io.h> #include <asm/irq.h> @@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro return -EINVAL; dev = to_c4iw_dev(ibdev); - memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + dev->rdev.lldi.ports[0]->dev_addr); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = dev->device_cap_flags; @@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work) struct c4iw_dev *dev = ctx->dev; pr_debug("c4iw_dev %p\n", dev); - memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); - memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->rdev.lldi.ports[0]->dev_addr); dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; if (fastreg_support) dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5b11c8282744..a71c5a36ceba 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, if (!pq->reqs) goto pq_reqs_nomem; - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), - sizeof(*pq->req_in_use), - GFP_KERNEL); + pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); if (!pq->req_in_use) goto pq_reqs_no_in_use; @@ -210,7 +208,7 @@ cq_comps_nomem: cq_nomem: kmem_cache_destroy(pq->txreq_cache); pq_txreq_nomem: - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); pq_reqs_no_in_use: kfree(pq->reqs); pq_reqs_nomem: @@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, pq->wait, !atomic_read(&pq->n_reqs)); kfree(pq->reqs); - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); flush_pq_iowait(pq); kfree(pq); diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 84f3f2b5f097..3f7fb7508585 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -61,7 +61,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, CMD_POLL_TOKEN, 0); if (ret) { dev_err_ratelimited(hr_dev->dev, - "failed to post mailbox %x in poll mode, ret = %d.\n", + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", op, ret); return ret; } @@ -91,7 +91,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, if (unlikely(token != context->token)) { dev_err_ratelimited(hr_dev->dev, - "[cmd] invalid ae token %x,context token is %x!\n", + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", token, context->token); return; } @@ -130,14 +130,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, context->token, 1); if (ret) { dev_err_ratelimited(dev, - "failed to post mailbox %x in event mode, ret = %d.\n", + "failed to post mailbox 0x%x in event mode, ret = %d.\n", op, ret); goto out; } if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", context->token, op); ret = -EBUSY; goto out; @@ -145,7 +145,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", context->token, op, ret); out: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 43e17d61cb63..bc7112a205a7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -182,6 +182,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; @@ -228,6 +229,7 @@ struct hns_roce_uar { enum hns_roce_mmap_type { HNS_ROCE_MMAP_TYPE_DB = 1, HNS_ROCE_MMAP_TYPE_TPTR, + HNS_ROCE_MMAP_TYPE_DWQE, }; struct hns_user_mmap_entry { @@ -354,10 +356,10 @@ struct hns_roce_mr { u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access; /* Access permission of MR */ + u32 access; /* Access permission of MR */ int enabled; /* MR's active status */ - int type; /* MR's register type */ - u32 pbl_hop_num; /* multi-hop number */ + int type; /* MR's register type */ + u32 pbl_hop_num; /* multi-hop number */ struct hns_roce_mtr pbl_mtr; u32 npages; dma_addr_t *page_list; @@ -374,17 +376,17 @@ struct hns_roce_wq { u32 wqe_cnt; /* WQE num */ u32 max_gs; u32 rsv_sge; - int offset; - int wqe_shift; /* WQE size */ + u32 offset; + u32 wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg; }; struct hns_roce_sge { - unsigned int sge_cnt; /* SGE num */ - int offset; - int sge_shift; /* SGE size */ + unsigned int sge_cnt; /* SGE num */ + u32 offset; + u32 sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -468,7 +470,7 @@ struct hns_roce_cq { struct hns_roce_idx_que { struct hns_roce_mtr mtr; - int entry_shift; + u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; @@ -480,7 +482,7 @@ struct hns_roce_srq { u32 wqe_cnt; int max_gs; u32 rsv_sge; - int wqe_shift; + u32 wqe_shift; u32 cqn; u32 xrcdn; void __iomem *db_reg; @@ -627,10 +629,6 @@ struct hns_roce_work { u32 queue_num; }; -enum { - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), -}; - struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; @@ -672,9 +670,10 @@ struct hns_roce_qp { unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; - struct list_head node; /* all qps are on a list */ - struct list_head rq_node; /* all recv qps are on a list */ - struct list_head sq_node; /* all send qps are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ + struct hns_user_mmap_entry *dwqe_mmap_entry; }; struct hns_roce_ib_iboe { @@ -767,7 +766,7 @@ struct hns_roce_caps { u32 reserved_qps; int num_qpc_timer; int num_cqc_timer; - int num_srqs; + u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; u32 max_srq_sges; @@ -781,7 +780,7 @@ struct hns_roce_caps { u32 min_cqes; u32 min_wqes; u32 reserved_cqs; - int reserved_srqs; + u32 reserved_srqs; int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; @@ -855,7 +854,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; @@ -874,7 +873,7 @@ struct hns_roce_caps { u32 gmv_hop_num; u32 sl_num; u32 llm_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode */ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; u16 default_ceq_max_cnt; u16 default_ceq_period; @@ -1001,8 +1000,8 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /* only for hw v1 */ - u32 tptr_size; /* only for hw v1 */ + dma_addr_t tptr_dma_addr; /* only for hw v1 */ + u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; @@ -1010,6 +1009,7 @@ struct hns_roce_dev { u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; + u64 dwqe_page; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -1158,7 +1158,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index bbfa1332dedc..e681c2dc23e8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void *wqe) { +#define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ @@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); + V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, + qp->sl >> HNS_ROCE_SL_SHIFT); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); @@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, continue; dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = %x, return = %x\n", + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); ret = -EIO; } } else { /* FW/HW reset or incorrect number of desc */ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", csq->head, tail); csq->head = tail; @@ -1997,7 +1999,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | + HNS_ROCE_CAP_FLAG_DIRECT_WQE; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -4733,7 +4736,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } @@ -4762,7 +4765,8 @@ static bool check_qp_state(enum ib_qp_state cur_state, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, + [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; @@ -5827,7 +5831,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; @@ -5841,7 +5845,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 4d904d5e82be..fddb9bc3c14c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,26 +35,15 @@ #include <linux/bitops.h> -#define HNS_ROCE_VF_QPC_BT_NUM 256 -#define HNS_ROCE_VF_SCCC_BT_NUM 64 -#define HNS_ROCE_VF_SRQC_BT_NUM 64 -#define HNS_ROCE_VF_CQC_BT_NUM 64 -#define HNS_ROCE_VF_MPT_BT_NUM 64 -#define HNS_ROCE_VF_SMAC_NUM 32 -#define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_VF_GMV_BT_NUM 256 - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 #define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 @@ -63,13 +52,10 @@ #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 -#define HNS_ROCE_V2_MAX_IRQ_NUM 65 -#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 @@ -81,7 +67,6 @@ #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 @@ -103,7 +88,6 @@ #define HNS_ROCE_INVALID_LKEY 0x0 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 -#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 @@ -1441,7 +1425,7 @@ struct hns_roce_v2_priv { struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + struct list_head node; /* all dips are on a list */ }; #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4194b626f3c6..d0b976a86cd5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -270,6 +270,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -307,9 +310,25 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, entry->address = address; entry->mmap_type = mmap_type; - ret = rdma_user_mmap_entry_insert_exact( - ucontext, &entry->rdma_entry, length, - mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1); + switch (mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 0); + break; + case HNS_ROCE_MMAP_TYPE_TPTR: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 1); + break; + case HNS_ROCE_MMAP_TYPE_DWQE: + ret = rdma_user_mmap_entry_insert_range( + ucontext, &entry->rdma_entry, length, 2, + U32_MAX); + break; + default: + ret = -EINVAL; + break; + } + if (ret) { kfree(entry); return NULL; @@ -436,10 +455,18 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) entry = to_hns_mmap(rdma_entry); pfn = entry->address >> PAGE_SHIFT; - prot = vma->vm_page_prot; - if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR) - prot = pgprot_noncached(prot); + switch (entry->mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + case HNS_ROCE_MMAP_TYPE_DWQE: + prot = pgprot_device(vma->vm_page_prot); + break; + case HNS_ROCE_MMAP_TYPE_TPTR: + prot = vma->vm_page_prot; + break; + default: + return -EINVAL; + } ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, prot, rdma_entry); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7089ac780291..8de899372567 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -81,7 +81,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) return -ENOMEM; } - mr->key = hw_index_to_key(id); /* MR key */ + mr->key = hw_index_to_key(id); /* MR key */ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, (unsigned long)id); @@ -824,11 +824,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) { struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; int mtt_count, left; - int start_index; + u32 start_index; int total = 0; __le64 *mtts; u32 npage; @@ -884,10 +884,10 @@ done: static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr *attr, struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, int unalinged_size) + unsigned int *buf_page_shift, u64 unalinged_size) { struct hns_roce_buf_region *r; - int first_region_padding; + u64 first_region_padding; int page_cnt, region_cnt; unsigned int page_shift; size_t buf_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 81ffad77ae42..03c349f7ebbe 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -115,6 +115,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) } else { uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_dev->dwqe_page = + pci_resource_start(hr_dev->pci_dev, 4); } return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9af4509894e6..c84e1c23722c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -379,6 +379,11 @@ err_out: return ret; } +static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) +{ + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); +} + void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct xarray *xa = &hr_dev->qp_table_xa; @@ -780,7 +785,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, goto err_inline; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; + return 0; + err_inline: free_rq_inline_buf(hr_qp); @@ -822,6 +831,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int qp_mmap_entry(struct hns_roce_qp *hr_qp, + struct hns_roce_dev *hr_dev, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = + rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct rdma_user_mmap_entry *rdma_entry; + u64 address; + + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; + + hr_qp->dwqe_mmap_entry = + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, + HNS_ROCE_DWQE_SIZE, + HNS_ROCE_MMAP_TYPE_DWQE); + + if (!hr_qp->dwqe_mmap_entry) { + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); + return -ENOMEM; + } + + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + + return 0; +} + static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, @@ -909,10 +947,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); + if (ret) + return ret; + } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, resp); if (ret) - return ret; + goto err_remove_qp; } else { ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); if (ret) @@ -920,6 +964,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } return 0; + +err_remove_qp: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); + + return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, @@ -933,6 +983,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hns_roce_db_unmap_user(uctx, &hr_qp->rdb); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -1391,7 +1443,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, } } -static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) +static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) { return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index aa20827dcc9d..d0d4f2b77d34 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -69,7 +69,7 @@ struct irdma_add_page_info { struct irdma_chunk { struct list_head list; struct irdma_dma_info dmainfo; - void *bitmapbuf; + unsigned long *bitmapbuf; u32 sizeofbitmap; u64 size; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 8cd5f9261692..456fed94b145 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = iwdev->device_cap_flags; @@ -4321,24 +4322,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -static __be64 irdma_mac_to_guid(struct net_device *ndev) -{ - const unsigned char *mac = ndev->dev_addr; - __be64 guid; - unsigned char *dst = (unsigned char *)&guid; - - dst[0] = mac[0] ^ 2; - dst[1] = mac[1]; - dst[2] = mac[2]; - dst[3] = 0xff; - dst[4] = 0xfe; - dst[5] = mac[3]; - dst[6] = mac[4]; - dst[7] = mac[5]; - - return guid; -} - static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, @@ -4408,7 +4391,8 @@ static const struct ib_device_ops irdma_dev_ops = { static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } @@ -4421,7 +4405,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev) struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + netdev->dev_addr); iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0d2fa3338784..d66ce7694bbe 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2784,10 +2784,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (err) goto err_counter; - ibdev->ib_uc_qpns_bitmap = - kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), - sizeof(long), - GFP_KERNEL); + ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, + GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) goto err_steer_qp_release; @@ -2875,7 +2873,7 @@ err_diag_counters: mlx4_ib_diag_cleanup(ibdev); err_steer_free_bitmap: - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, @@ -2988,7 +2986,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index aef1d274a14e..9f0f79d02d3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc) } if (obj < alloc->max) { - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj |= alloc->top; } else obj = -1; @@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; @@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, u32 reserved) { - int i; - /* num must be a power of 2 */ if (num != 1 << (ffs(num) - 1)) return -EINVAL; @@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, alloc->max = num; alloc->mask = mask; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void mthca_alloc_cleanup(struct mthca_alloc *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } /* diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ce0e0867e488..a59100c496b4 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) return -1; found: - clear_bit(seg, buddy->bits[o]); + __clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; - set_bit(seg ^ 1, buddy->bits[o]); + __set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } @@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { - clear_bit(seg ^ 1, buddy->bits[order]); + __clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } - set_bit(seg, buddy->bits[order]); + __set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); @@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { - int i, s; + int i; buddy->max_order = max_order; spin_lock_init(&buddy->lock); @@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL); + buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), + GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; - bitmap_zero(buddy->bits[i], - 1 << (buddy->max_order - i)); } - set_bit(0, buddy->bits[buddy->max_order]); + __set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index c51c3f40700e..265a581133dc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) { int status = -ENOMEM; - size_t pd_bitmap_size; struct ocrdma_alloc_pd_range *cmd; struct ocrdma_alloc_pd_range_rsp *rsp; @@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_dpp_pd = rsp->pd_count; - pd_bitmap_size = - BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); } @@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_normal_pd = rsp->pd_count; - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); @@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) { ocrdma_mbx_dealloc_pd_range(dev); - kfree(dev->pd_mgr->pd_norm_bitmap); - kfree(dev->pd_mgr->pd_dpp_bitmap); + bitmap_free(dev->pd_mgr->pd_norm_bitmap); + bitmap_free(dev->pd_mgr->pd_dpp_bitmap); kfree(dev->pd_mgr); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7abf6cf1e937..5d4b3bc16493 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("Dual BSD/GPL"); -void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) -{ - u8 mac_addr[6]; - - memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - guid[0] = mac_addr[0] ^ 2; - guid[1] = mac_addr[1]; - guid[2] = mac_addr[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = mac_addr[3]; - guid[6] = mac_addr[4]; - guid[7] = mac_addr[5]; -} static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, u32 port_num) { @@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) { int ret; - ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->nic_info.mac_addr); BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, sizeof(OCRDMA_NODE_DESC)); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 735123d0e9ec..bfa7aad92ead 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -41,6 +41,7 @@ */ #include <linux/dma-mapping.h> +#include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> @@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); - ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + dev->nic_info.mac_addr); attr->max_mr_size = dev->attr.max_mr_size; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; @@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) { u16 pd_bitmap_idx = 0; - const unsigned long *pd_bitmap; + unsigned long *pd_bitmap; if (dpp_pool) { pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_dpp_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_dpp_count++; if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; @@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) pd_bitmap = dev->pd_mgr->pd_norm_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_normal_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_norm_count++; if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b73d742a520c..f860b7fcef33 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port, enum rdma_protocol_type ocrdma_query_protocol(struct ib_device *device, u32 port_num); -void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9100009f0a23..a53476653b0d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 756a83bcff58..5a0e26cd648e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); - void *umem_pd; - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); - if (IS_ERR_OR_NULL(umem_pd)) { - return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; - } + pd->umem_pd = usnic_uiom_alloc_pd(); + if (IS_ERR(pd->umem_pd)) + return PTR_ERR(pd->umem_pd); return 0; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c index bf51357ea3aa..9a4de962e947 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c @@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev) tbl->max = num; tbl->mask = mask; spin_lock_init(&tbl->lock); - tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); + tbl->table = bitmap_zalloc(num, GFP_KERNEL); if (!tbl->table) return -ENOMEM; /* 0th UAR is taken by the device. */ - set_bit(0, tbl->table); + __set_bit(0, tbl->table); return 0; } @@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev) { struct pvrdma_id_table *tbl = &dev->uar_table.tbl; - kfree(tbl->table); + bitmap_free(tbl->table); } int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) @@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) return -ENOMEM; } - set_bit(obj, tbl->table); + __set_bit(obj, tbl->table); obj |= tbl->top; spin_unlock_irqrestore(&tbl->lock, flags); @@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) obj = uar->index & (tbl->max - 1); spin_lock_irqsave(&tbl->lock, flags); - clear_bit(obj, tbl->table); + __clear_bit(obj, tbl->table); tbl->last = min(tbl->last, obj); tbl->top = (tbl->top + tbl->max) & tbl->mask; spin_unlock_irqrestore(&tbl->lock, flags); |