From 8dc2ed3f3e5ba245828ad89968f6818be8996e9d Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 8 Apr 2019 18:39:58 +0300 Subject: nvmet-rdma: remove p2p_client initialization from fast-path Initialize it during command allocation. Cc: Logan Gunthorpe Cc: Stephen Bates Signed-off-by: Max Gurtovoy Reviewed-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- drivers/nvme/target/rdma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index ef893addf341..b7275218dfa5 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -373,6 +373,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) goto out_free_rsp; + r->req.p2p_client = &ndev->device->dev; r->send_sge.length = sizeof(*r->req.rsp); r->send_sge.lkey = ndev->pd->local_dma_lkey; @@ -763,8 +764,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); - cmd->req.p2p_client = &queue->dev->device->dev; - if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; -- cgit From fc6c9730725d5cc57c851d0e261a5682bba913a7 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 8 Apr 2019 18:39:59 +0300 Subject: nvmet: rename nvme_completion instances from rsp to cqe Use NVMe namings for improving code readability. Signed-off-by: Max Gurtovoy Reviewed-by : Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/core.c | 22 +++++++++++----------- drivers/nvme/target/fabrics-cmd.c | 16 ++++++++-------- drivers/nvme/target/fc.c | 2 +- drivers/nvme/target/loop.c | 6 +++--- drivers/nvme/target/nvmet.h | 4 ++-- drivers/nvme/target/rdma.c | 18 +++++++++--------- drivers/nvme/target/tcp.c | 8 ++++---- 7 files changed, 38 insertions(+), 38 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 4d8dd29479c0..1c1776c3e316 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -647,7 +647,7 @@ static void nvmet_update_sq_head(struct nvmet_req *req) } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != old_sqhd); } - req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); + req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); } static void nvmet_set_error(struct nvmet_req *req, u16 status) @@ -656,7 +656,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) struct nvme_error_slot *new_error_slot; unsigned long flags; - req->rsp->status = cpu_to_le16(status << 1); + req->cqe->status = cpu_to_le16(status << 1); if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) return; @@ -676,15 +676,15 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) spin_unlock_irqrestore(&ctrl->error_lock, flags); /* set the more bit for this request */ - req->rsp->status |= cpu_to_le16(1 << 14); + req->cqe->status |= cpu_to_le16(1 << 14); } static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); - req->rsp->sq_id = cpu_to_le16(req->sq->qid); - req->rsp->command_id = req->cmd->common.command_id; + req->cqe->sq_id = cpu_to_le16(req->sq->qid); + req->cqe->command_id = req->cmd->common.command_id; if (unlikely(status)) nvmet_set_error(req, status); @@ -841,8 +841,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->sg = NULL; req->sg_cnt = 0; req->transfer_len = 0; - req->rsp->status = 0; - req->rsp->sq_head = 0; + req->cqe->status = 0; + req->cqe->sq_head = 0; req->ns = NULL; req->error_loc = NVMET_NO_ERROR_LOC; req->error_slba = 0; @@ -1069,7 +1069,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; } @@ -1090,7 +1090,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, pr_warn("could not find controller %d for subsys %s / host %s\n", cntlid, subsysnqn, hostnqn); - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; out: @@ -1188,7 +1188,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); goto out; } @@ -1197,7 +1197,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!nvmet_host_allowed(subsys, hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", hostnqn, subsysnqn); - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; goto out_put_subsystem; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 3a76ebc3d155..3b9f79aba98f 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -72,7 +72,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) offsetof(struct nvmf_property_get_command, attrib); } - req->rsp->result.u64 = cpu_to_le64(val); + req->cqe->result.u64 = cpu_to_le64(val); nvmet_req_complete(req, status); } @@ -124,7 +124,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { req->sq->sqhd_disabled = true; - req->rsp->sq_head = cpu_to_le16(0xffff); + req->cqe->sq_head = cpu_to_le16(0xffff); } if (ctrl->ops->install_queue) { @@ -158,7 +158,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) goto out; /* zero out initial completion result, assign values as needed */ - req->rsp->result.u32 = 0; + req->cqe->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", @@ -172,7 +172,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } @@ -195,7 +195,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_info("creating controller %d for subsystem %s for NQN %s.\n", ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); - req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); + req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); out: kfree(d); @@ -222,7 +222,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) goto out; /* zero out initial completion result, assign values as needed */ - req->rsp->result.u32 = 0; + req->cqe->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", @@ -240,14 +240,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid); + req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } status = nvmet_install_queue(ctrl, req); if (status) { /* pass back cntlid that had the issue of installing queue */ - req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); + req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); goto out_ctrl_put; } diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 9369a11fe7a9..508661af0f50 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2184,7 +2184,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, } fod->req.cmd = &fod->cmdiubuf.sqe; - fod->req.rsp = &fod->rspiubuf.cqe; + fod->req.cqe = &fod->rspiubuf.cqe; fod->req.port = tgtport->pe->port; /* clear any response payload */ diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index b9f623ab01f3..a3ae491fa20e 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -18,7 +18,7 @@ struct nvme_loop_iod { struct nvme_request nvme_req; struct nvme_command cmd; - struct nvme_completion rsp; + struct nvme_completion cqe; struct nvmet_req req; struct nvme_loop_queue *queue; struct work_struct work; @@ -94,7 +94,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req) { struct nvme_loop_queue *queue = container_of(req->sq, struct nvme_loop_queue, nvme_sq); - struct nvme_completion *cqe = req->rsp; + struct nvme_completion *cqe = req->cqe; /* * AEN requests are special as they don't time out and can @@ -207,7 +207,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, struct nvme_loop_iod *iod, unsigned int queue_idx) { iod->req.cmd = &iod->cmd; - iod->req.rsp = &iod->rsp; + iod->req.cqe = &iod->cqe; iod->queue = &ctrl->queues[queue_idx]; INIT_WORK(&iod->work, nvme_loop_execute_work); return 0; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 1653d19b187f..c25d88fc9dec 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -284,7 +284,7 @@ struct nvmet_fabrics_ops { struct nvmet_req { struct nvme_command *cmd; - struct nvme_completion *rsp; + struct nvme_completion *cqe; struct nvmet_sq *sq; struct nvmet_cq *cq; struct nvmet_ns *ns; @@ -322,7 +322,7 @@ extern struct workqueue_struct *buffered_io_wq; static inline void nvmet_set_result(struct nvmet_req *req, u32 result) { - req->rsp->result.u32 = cpu_to_le32(result); + req->cqe->result.u32 = cpu_to_le32(result); } /* diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index b7275218dfa5..36d906a7f70d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -160,7 +160,7 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) { return !nvme_is_write(rsp->req.cmd) && rsp->req.transfer_len && - !rsp->req.rsp->status && + !rsp->req.cqe->status && !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); } @@ -364,17 +364,17 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r) { /* NVMe CQE / RDMA SEND */ - r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); - if (!r->req.rsp) + r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); + if (!r->req.cqe) goto out; - r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, - sizeof(*r->req.rsp), DMA_TO_DEVICE); + r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, + sizeof(*r->req.cqe), DMA_TO_DEVICE); if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) goto out_free_rsp; r->req.p2p_client = &ndev->device->dev; - r->send_sge.length = sizeof(*r->req.rsp); + r->send_sge.length = sizeof(*r->req.cqe); r->send_sge.lkey = ndev->pd->local_dma_lkey; r->send_cqe.done = nvmet_rdma_send_done; @@ -389,7 +389,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, return 0; out_free_rsp: - kfree(r->req.rsp); + kfree(r->req.cqe); out: return -ENOMEM; } @@ -398,8 +398,8 @@ static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r) { ib_dma_unmap_single(ndev->device, r->send_sge.addr, - sizeof(*r->req.rsp), DMA_TO_DEVICE); - kfree(r->req.rsp); + sizeof(*r->req.cqe), DMA_TO_DEVICE); + kfree(r->req.cqe); } static int diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 0a941abf56ec..17cf137dc88c 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -161,14 +161,14 @@ static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) { - return nvmet_tcp_has_data_in(cmd) && !cmd->req.rsp->status; + return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; } static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) { return !nvme_is_write(cmd->req.cmd) && cmd->req.transfer_len > 0 && - !cmd->req.rsp->status; + !cmd->req.cqe->status; } static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) @@ -378,7 +378,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst + cmd->req.transfer_len + ddgst); - pdu->command_id = cmd->req.rsp->command_id; + pdu->command_id = cmd->req.cqe->command_id; pdu->data_length = cpu_to_le32(cmd->req.transfer_len); pdu->data_offset = cpu_to_le32(cmd->wbytes_done); @@ -1224,7 +1224,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->rsp_pdu) goto out_free_cmd; - c->req.rsp = &c->rsp_pdu->cqe; + c->req.cqe = &c->rsp_pdu->cqe; c->data_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); -- cgit From 6b7e631b927ca1266b2695307ab71ed7764af75e Mon Sep 17 00:00:00 2001 From: Minwoo Im Date: Sun, 7 Apr 2019 15:28:06 +0900 Subject: nvmet: return a specified error it subsys_alloc fails nvmet_subsys_alloc() returns its pointer or NULL if it fails. We can see three different steps in this function: 1. memory allocation 2. argument check 3. memory allocation for string But now the callers of this function do not seem to handle case 2 by returning -ENOMEM only even if it fails with an invalid parameter. This patch specifies error codes so that caller can pass it to its own caller. Signed-off-by: Minwoo Im Reviewed-by: Chaitanya Kulkarni . Signed-off-by: Christoph Hellwig --- drivers/nvme/target/configfs.c | 4 ++-- drivers/nvme/target/core.c | 6 +++--- drivers/nvme/target/discovery.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index adb79545cdd7..08dd5af357f7 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -898,8 +898,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group, } subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); - if (!subsys) - return ERR_PTR(-ENOMEM); + if (IS_ERR(subsys)) + return ERR_CAST(subsys); config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1c1776c3e316..24e0a36392d9 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1367,7 +1367,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) - return NULL; + return ERR_PTR(-ENOMEM); subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ /* generate a random serial number as our controllers are ephemeral: */ @@ -1383,14 +1383,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, default: pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); kfree(subsys); - return NULL; + return ERR_PTR(-EINVAL); } subsys->type = type; subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, GFP_KERNEL); if (!subsys->subsysnqn) { kfree(subsys); - return NULL; + return ERR_PTR(-ENOMEM); } kref_init(&subsys->ref); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 33ed95e72d6b..e8e09266bfa5 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -372,8 +372,8 @@ int __init nvmet_init_discovery(void) { nvmet_disc_subsys = nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); - if (!nvmet_disc_subsys) - return -ENOMEM; + if (IS_ERR(nvmet_disc_subsys)) + return PTR_ERR(nvmet_disc_subsys); return 0; } -- cgit From a5dffbb66d250a7ef07e27a2e75b8d9d7af2ab41 Mon Sep 17 00:00:00 2001 From: "Enrico Weigelt, metux IT consult" Date: Wed, 24 Apr 2019 12:34:39 +0200 Subject: nvmet: include Build breaks: drivers/nvme/target/core.c: In function 'nvmet_req_alloc_sgl': drivers/nvme/target/core.c:939:12: error: implicit declaration of \ function 'sgl_alloc'; did you mean 'bio_alloc'? \ [-Werror=implicit-function-declaration] req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt); ^~~~~~~~~ bio_alloc drivers/nvme/target/core.c:939:10: warning: assignment makes pointer \ from integer without a cast [-Wint-conversion] req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt); ^ drivers/nvme/target/core.c: In function 'nvmet_req_free_sgl': drivers/nvme/target/core.c:952:3: error: implicit declaration of \ function 'sgl_free'; did you mean 'ida_free'? [-Werror=implicit-function-declaration] sgl_free(req->sg); ^~~~~~~~ ida_free Cause: 1. missing include to 2. SGL_ALLOC needs to be enabled Therefore adding the missing include, as well as Kconfig dependency. Signed-off-by: Enrico Weigelt, metux IT consult Reviewed-by: Sagi Grimberg Reviewed-by: Minwoo Im Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/Kconfig | 1 + drivers/nvme/target/core.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index d94f25cde019..3ef0a4e5eed6 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -3,6 +3,7 @@ config NVME_TARGET tristate "NVMe Target support" depends on BLOCK depends on CONFIGFS_FS + select SGL_ALLOC help This enabled target side support for the NVMe protocol, that is it allows the Linux kernel to implement NVMe subsystems and diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 24e0a36392d9..7734a6acff85 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "nvmet.h" -- cgit From 525ec495e021068aa8635a0e18ff60695f5b1f4f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Apr 2019 11:43:23 -0700 Subject: nvmet-file: clamp-down file namespace lba_shift When the backing file is a tempfile for example, the inode i_blkbits can be 1M in size which causes problems for hosts to support as the disk block size. Instead, expose the minimum between i_blkbits and 12 (4K sector size). Signed-off-by: Sagi Grimberg Reviewed-by:- Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/io-cmd-file.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index bc6ebb51b0bf..05453f5d1448 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -49,7 +49,12 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) goto err; ns->size = stat.size; - ns->blksize_shift = file_inode(ns->file)->i_blkbits; + /* + * i_blkbits can be greater than the universally accepted upper bound, + * so make sure we export a sane namespace lba_shift. + */ + ns->blksize_shift = min_t(u8, + file_inode(ns->file)->i_blkbits, 12); ns->bvec_cache = kmem_cache_create("nvmet-bvec", NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), -- cgit From 569b3d3db1aac8586a16df1745c9e5a99ff47253 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Apr 2019 11:53:16 -0700 Subject: nvmet-tcp: don't fail maxr2t greater than 1 The host may support it, but nothing prevents us from sending a single r2t at a time like we do anyways. Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 17cf137dc88c..69b83fa0c76c 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -774,12 +774,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) return -EPROTO; } - if (icreq->maxr2t != 0) { - pr_err("queue %d: unsupported maxr2t %d\n", queue->idx, - le32_to_cpu(icreq->maxr2t) + 1); - return -EPROTO; - } - queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); if (queue->hdr_digest || queue->data_digest) { -- cgit From 7a42589654ae79e1177f0d74306a02d6cef7bddf Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Apr 2019 11:53:17 -0700 Subject: nvme-tcp: fix a NULL deref when an admin connect times out If we timeout the admin startup sequence we might not yet have an I/O tagset allocated which causes the teardown sequence to crash. Make nvme_tcp_teardown_io_queues safe by not iterating inflight tags if the tagset wasn't allocated. Fixes: 39d57757467b ("nvme-tcp: fix timeout handler") Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 68c49dd67210..aae5374d2b93 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1710,7 +1710,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, { blk_mq_quiesce_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); - blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl); + if (ctrl->admin_tagset) + blk_mq_tagset_busy_iter(ctrl->admin_tagset, + nvme_cancel_request, ctrl); blk_mq_unquiesce_queue(ctrl->admin_q); nvme_tcp_destroy_admin_queue(ctrl, remove); } @@ -1722,7 +1724,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, return; nvme_stop_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl); + if (ctrl->tagset) + blk_mq_tagset_busy_iter(ctrl->tagset, + nvme_cancel_request, ctrl); if (remove) nvme_start_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); -- cgit From 1007709d7d06fab09bf2d007657575958676282b Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Apr 2019 11:53:18 -0700 Subject: nvme-rdma: fix a NULL deref when an admin connect times out If we timeout the admin startup sequence we might not yet have an I/O tagset allocated which causes the teardown sequence to crash. Make nvme_tcp_teardown_io_queues safe by not iterating inflight tags if the tagset wasn't allocated. Fixes: 4c174e636674 ("nvme-rdma: fix timeout handler") Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 11a5ecae78c8..e1824c2e0a1c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, { blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.admin_tagset) + blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, + nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); } @@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.tagset) + blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, + nvme_cancel_request, &ctrl->ctrl); if (remove) nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); -- cgit From efb973b19b88642bb7e08b8ce8e03b0bbd2a7e2a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Apr 2019 11:53:19 -0700 Subject: nvme-tcp: rename function to have nvme_tcp prefix usually nvme_ prefix is for core functions. While we're cleaning up, remove redundant empty lines Signed-off-by: Sagi Grimberg Reviewed-by: Minwoo Im Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index aae5374d2b93..2405bb9c63cc 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -473,7 +473,6 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, } return 0; - } static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, @@ -634,7 +633,6 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status) nvme_end_request(rq, cpu_to_le16(status << 1), res); } - static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -1535,7 +1533,7 @@ out_free_queue: return ret; } -static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) +static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { int i, ret; @@ -1565,7 +1563,7 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) return nr_io_queues; } -static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl) +static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { unsigned int nr_io_queues; int ret; @@ -1582,7 +1580,7 @@ static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl) dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); - return nvme_tcp_alloc_io_queues(ctrl); + return __nvme_tcp_alloc_io_queues(ctrl); } static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) @@ -1599,7 +1597,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) { int ret; - ret = nvme_alloc_io_queues(ctrl); + ret = nvme_tcp_alloc_io_queues(ctrl); if (ret) return ret; -- cgit From 663d6fee66b555f6a080104751be0b54e0bca78a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 15 Apr 2019 09:51:46 +0800 Subject: nvme-loop: kill timeout handler Firstly it doesn't make sense to handle timeout for loop: 1) for admin queue, the request is always completed in code path of queuing IO. 2) for normal IO request, the timeout on these IOs have been handled by underlying queue already. Secondly nvme-loop's timeout handler is simply broken, and easy to cause issue: 1) no any sync/protection between timeout and normal completion, and now it is driver's responsibility to deal with that; 2) bad reset implementation, blk_mq_update_nr_hw_queues() is called after all NSs's queue is stopped(quiesced), and easy to trigger deadlock. So kill the timeout handler. Signed-off-by: Ming Lei Reviewd-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/loop.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index a3ae491fa20e..9e211ad6bdd3 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -129,20 +129,6 @@ static void nvme_loop_execute_work(struct work_struct *work) nvmet_req_execute(&iod->req); } -static enum blk_eh_timer_return -nvme_loop_timeout(struct request *rq, bool reserved) -{ - struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq); - - /* queue error recovery */ - nvme_reset_ctrl(&iod->queue->ctrl->ctrl); - - /* fail with DNR on admin cmd timeout */ - nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; - - return BLK_EH_DONE; -} - static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -253,7 +239,6 @@ static const struct blk_mq_ops nvme_loop_mq_ops = { .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_request, .init_hctx = nvme_loop_init_hctx, - .timeout = nvme_loop_timeout, }; static const struct blk_mq_ops nvme_loop_admin_mq_ops = { @@ -261,7 +246,6 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_request, .init_hctx = nvme_loop_init_admin_hctx, - .timeout = nvme_loop_timeout, }; static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) -- cgit From 82bebbde02e24ad7b641eca25e632f32579ed52f Mon Sep 17 00:00:00 2001 From: Minwoo Im Date: Wed, 10 Apr 2019 23:48:59 +0900 Subject: nvme-rdma: fix typo in struct comment struct nvme_rdma_cm_rej has two different attributes: recfmt and sts. And sts will have value what this comment wanted to show. Signed-off-by: Minwoo Im Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- include/linux/nvme-rdma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index 3aa97b98dc89..3ec8e50efa16 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -77,7 +77,7 @@ struct nvme_rdma_cm_rep { * struct nvme_rdma_cm_rej - rdma connect reject * * @recfmt: format of the RDMA Private Data - * @fsts: error status for the associated connect request + * @sts: error status for the associated connect request */ struct nvme_rdma_cm_rej { __le16 recfmt; -- cgit From 01fa017484ad98fccdeaab32db0077c574b6bd6f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 11 Mar 2019 15:02:25 -0700 Subject: nvme: set 0 capacity if namespace block size exceeds PAGE_SIZE If our target exposed a namespace with a block size that is greater than PAGE_SIZE, set 0 capacity on the namespace as we do not support it. This issue encountered when the nvmet namespace was backed by a tempfile. Signed-off-by: Sagi Grimberg Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 248ff3b48041..3dd043aa6d1f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1591,6 +1591,10 @@ static void nvme_update_disk_info(struct gendisk *disk, sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); unsigned short bs = 1 << ns->lba_shift; + if (ns->lba_shift > PAGE_SHIFT) { + /* unsupported block size, set capacity to 0 later */ + bs = (1 << 9); + } blk_mq_freeze_queue(disk->queue); blk_integrity_unregister(disk); @@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk, if (ns->ms && !ns->ext && (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) nvme_init_integrity(disk, ns->ms, ns->pi_type); - if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) + if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || + ns->lba_shift > PAGE_SHIFT) capacity = 0; set_capacity(disk, capacity); -- cgit