diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/auth.c | 6 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 72 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 51 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.h | 2 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 8 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 15 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 40 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 22 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 23 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 40 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 33 | ||||
-rw-r--r-- | drivers/nvme/target/auth.c | 32 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 81 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 15 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd-auth.c | 49 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 11 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 17 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 9 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 32 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 11 | ||||
-rw-r--r-- | drivers/nvme/target/zns.c | 10 |
22 files changed, 371 insertions, 211 deletions
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index a264b3ae078b..371e14f0a203 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work) NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); if (ret) { chap->status = ret; - chap->error = -ECONNREFUSED; + chap->error = -EKEYREJECTED; return; } @@ -797,7 +797,7 @@ static void nvme_queue_auth_work(struct work_struct *work) NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); if (ret) { chap->status = ret; - chap->error = -ECONNREFUSED; + chap->error = -EKEYREJECTED; return; } @@ -818,7 +818,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ret = nvme_auth_process_dhchap_success1(ctrl, chap); if (ret) { /* Controller authentication failed */ - chap->error = -ECONNREFUSED; + chap->error = -EKEYREJECTED; goto fail2; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 943d72bdd794..954f850f113a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -383,14 +383,14 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) if (likely(nvme_req(req)->status == 0)) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) - return AUTHENTICATE; - if (blk_noretry_request(req) || (nvme_req(req)->status & NVME_SC_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; + if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + return AUTHENTICATE; + if (req->cmd_flags & REQ_NVME_MPATH) { if (nvme_is_path_error(nvme_req(req)->status) || blk_queue_dying(req->q)) @@ -629,27 +629,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); /* - * Returns true for sink states that can't ever transition back to live. - */ -static bool nvme_state_terminal(struct nvme_ctrl *ctrl) -{ - switch (nvme_ctrl_state(ctrl)) { - case NVME_CTRL_NEW: - case NVME_CTRL_LIVE: - case NVME_CTRL_RESETTING: - case NVME_CTRL_CONNECTING: - return false; - case NVME_CTRL_DELETING: - case NVME_CTRL_DELETING_NOIO: - case NVME_CTRL_DEAD: - return true; - default: - WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); - return true; - } -} - -/* * Waits for the controller state to be resetting, or returns false if it is * not possible to ever transition to that state. */ @@ -2076,6 +2055,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; struct queue_limits lim; struct nvme_id_ns_nvm *nvm = NULL; + struct nvme_zone_info zi = {}; struct nvme_id_ns *id; sector_t capacity; unsigned lbaf; @@ -2088,9 +2068,10 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (id->ncap == 0) { /* namespace not allocated or attached */ info->is_removed = true; - ret = -ENODEV; + ret = -ENXIO; goto out; } + lbaf = nvme_lbaf_index(id->flbas); if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); @@ -2098,8 +2079,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, goto out; } + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + ns->head->ids.csi == NVME_CSI_ZNS) { + ret = nvme_query_zone_info(ns, lbaf, &zi); + if (ret < 0) + goto out; + } + blk_mq_freeze_queue(ns->disk->queue); - lbaf = nvme_lbaf_index(id->flbas); ns->head->lba_shift = id->lbaf[lbaf].ds; ns->head->nuse = le64_to_cpu(id->nuse); capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); @@ -2112,13 +2099,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, capacity = 0; nvme_config_discard(ns, &lim); if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - ns->head->ids.csi == NVME_CSI_ZNS) { - ret = nvme_update_zone_info(ns, lbaf, &lim); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue); - goto out; - } - } + ns->head->ids.csi == NVME_CSI_ZNS) + nvme_update_zone_info(ns, &lim, &zi); ret = queue_limits_commit_update(ns->disk->queue, &lim); if (ret) { blk_mq_unfreeze_queue(ns->disk->queue); @@ -2150,7 +2132,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { - ret = blk_revalidate_disk_zones(ns->disk, NULL); + ret = blk_revalidate_disk_zones(ns->disk); if (ret && !nvme_first_scan(ns->disk)) goto out; } @@ -2201,6 +2183,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) } if (!ret && nvme_ns_head_multipath(ns->head)) { + struct queue_limits *ns_lim = &ns->disk->queue->limits; struct queue_limits lim; blk_mq_freeze_queue(ns->head->disk->queue); @@ -2212,7 +2195,26 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); nvme_mpath_revalidate_paths(ns); + /* + * queue_limits mixes values that are the hardware limitations + * for bio splitting with what is the device configuration. + * + * For NVMe the device configuration can change after e.g. a + * Format command, and we really want to pick up the new format + * value here. But we must still stack the queue limits to the + * least common denominator for multipathing to split the bios + * properly. + * + * To work around this, we explicitly set the device + * configuration to those that we just queried, but only stack + * the splitting limits in to make sure we still obey possibly + * lower limitations of other controllers. + */ lim = queue_limits_start_update(ns->head->disk->queue); + lim.logical_block_size = ns_lim->logical_block_size; + lim.physical_block_size = ns_lim->physical_block_size; + lim.io_min = ns_lim->io_min; + lim.io_opt = ns_lim->io_opt; queue_limits_stack_bdev(&lim, ns->disk->part0, 0, ns->head->disk->disk_name); ret = queue_limits_commit_update(ns->head->disk->queue, &lim); @@ -3658,7 +3660,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) "Found shared namespace %d, but multipathing not supported.\n", info->nsid); dev_warn_once(ctrl->device, - "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); + "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n"); } } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 1f0ea1f32d22..c6ad2148c2e0 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -428,12 +428,6 @@ static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid, * fabrics-protocol connection of the NVMe Admin queue between the * host system device and the allocated NVMe controller on the * target system via a NVMe Fabrics "Connect" command. - * - * Return: - * 0: success - * > 0: NVMe error status code - * < 0: Linux errno error code - * */ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) { @@ -467,7 +461,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) if (result & NVME_CONNECT_AUTHREQ_ASCR) { dev_warn(ctrl->device, "qid 0: secure concatenation is not supported\n"); - ret = NVME_SC_AUTH_REQUIRED; + ret = -EOPNOTSUPP; goto out_free_data; } /* Authentication required */ @@ -475,14 +469,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) if (ret) { dev_warn(ctrl->device, "qid 0: authentication setup failed\n"); - ret = NVME_SC_AUTH_REQUIRED; goto out_free_data; } ret = nvme_auth_wait(ctrl, 0); - if (ret) + if (ret) { dev_warn(ctrl->device, - "qid 0: authentication failed\n"); - else + "qid 0: authentication failed, error %d\n", + ret); + } else dev_info(ctrl->device, "qid 0: authenticated\n"); } @@ -542,7 +536,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) if (result & NVME_CONNECT_AUTHREQ_ASCR) { dev_warn(ctrl->device, "qid 0: secure concatenation is not supported\n"); - ret = NVME_SC_AUTH_REQUIRED; + ret = -EOPNOTSUPP; goto out_free_data; } /* Authentication required */ @@ -550,12 +544,13 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) if (ret) { dev_warn(ctrl->device, "qid %d: authentication setup failed\n", qid); - ret = NVME_SC_AUTH_REQUIRED; - } else { - ret = nvme_auth_wait(ctrl, qid); - if (ret) - dev_warn(ctrl->device, - "qid %u: authentication failed\n", qid); + goto out_free_data; + } + ret = nvme_auth_wait(ctrl, qid); + if (ret) { + dev_warn(ctrl->device, + "qid %u: authentication failed, error %d\n", + qid, ret); } } out_free_data: @@ -564,8 +559,26 @@ out_free_data: } EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); -bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) +/* + * Evaluate the status information returned by the transport in order to decided + * if a reconnect attempt should be scheduled. + * + * Do not retry when: + * + * - the DNR bit is set and the specification states no further connect + * attempts with the same set of paramenters should be attempted. + * + * - when the authentication attempt fails, because the key was invalid. + * This error code is set on the host side. + */ +bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) { + if (status > 0 && (status & NVME_SC_DNR)) + return false; + + if (status == -EKEYREJECTED) + return false; + if (ctrl->opts->max_reconnects == -1 || ctrl->nr_reconnects < ctrl->opts->max_reconnects) return true; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 37c974c38dcb..602135910ae9 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -223,7 +223,7 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops); void nvmf_unregister_transport(struct nvmf_transport_ops *ops); void nvmf_free_options(struct nvmf_ctrl_options *opts); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); -bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); +bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status); bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts); void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 68a5d971657b..f0b081332749 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2428,7 +2428,7 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) * controller. Called after last nvme_put_ctrl() call */ static void -nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) +nvme_fc_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); @@ -3310,12 +3310,10 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) dev_info(ctrl->ctrl.device, "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", ctrl->cnum, status); - if (status > 0 && (status & NVME_SC_DNR)) - recon = false; } else if (time_after_eq(jiffies, rport->dev_loss_end)) recon = false; - if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { + if (recon && nvmf_should_reconnect(&ctrl->ctrl, status)) { if (portptr->port_state == FC_OBJSTATE_ONLINE) dev_info(ctrl->ctrl.device, "NVME-FC{%d}: Reconnect attempt in %ld " @@ -3384,7 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, - .free_ctrl = nvme_fc_nvme_ctrl_freed, + .free_ctrl = nvme_fc_free_ctrl, .submit_async_event = nvme_fc_submit_async_event, .delete_ctrl = nvme_fc_delete_ctrl, .get_address = nvmf_get_address, diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 3dfd5ae99ae0..499a8bb7cac7 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* - * For iopoll, complete it directly. + * For iopoll, complete it directly. Note that using the uring_cmd + * helper for this is safe only because we check blk_rq_is_poll(). + * As that returns false if we're NOT on a polled queue, then it's + * safe to use the polled completion helper. + * * Otherwise, move the completion to task work. */ - if (blk_rq_is_poll(req)) - nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); - else + if (blk_rq_is_poll(req)) { + if (pdu->bio) + blk_rq_unmap_user(pdu->bio); + io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); + } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); + } return RQ_END_IO_FREE; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5397fb428b24..d16e976ae1a4 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -247,7 +247,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) if (nvme_path_is_disabled(ns)) continue; - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) + if (ns->ctrl->numa_node != NUMA_NO_NODE && + READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) distance = node_distance(node, ns->ctrl->numa_node); else distance = LOCAL_DISTANCE; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 24193fcb8bd5..cacc56f4bbf4 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -162,6 +162,11 @@ enum nvme_quirks { * Disables simple suspend/resume path. */ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20), + + /* + * MSI (but not MSI-X) interrupts are broken and never fire. + */ + NVME_QUIRK_BROKEN_MSI = (1 << 21), }; /* @@ -741,6 +746,27 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; } +/* + * Returns true for sink states that can't ever transition back to live. + */ +static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl) +{ + switch (nvme_ctrl_state(ctrl)) { + case NVME_CTRL_NEW: + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + return false; + case NVME_CTRL_DELETING: + case NVME_CTRL_DELETING_NOIO: + case NVME_CTRL_DEAD: + return true; + default: + WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); + return true; + } +} + void nvme_complete_rq(struct request *req); void nvme_complete_batch_req(struct request *req); @@ -1036,10 +1062,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) } #endif /* CONFIG_NVME_MULTIPATH */ +struct nvme_zone_info { + u64 zone_size; + unsigned int max_open_zones; + unsigned int max_active_zones; +}; + int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); -int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf, - struct queue_limits *lim); +int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, + struct nvme_zone_info *zi); +void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, + struct nvme_zone_info *zi); #ifdef CONFIG_BLK_DEV_ZONED blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, @@ -1114,7 +1148,7 @@ static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) } static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) { - return NVME_SC_AUTH_REQUIRED; + return -EPROTONOSUPPORT; } static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; #endif diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8e0bb9692685..710043086dff 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1286,6 +1286,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) u32 csts = readl(dev->bar + NVME_REG_CSTS); u8 opcode; + if (nvme_state_terminal(&dev->ctrl)) + goto disable; + /* If PCI error recovery process is happening, we cannot reset or * the recovery mechanism will surely fail. */ @@ -1390,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) return BLK_EH_RESET_TIMER; disable: - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { + if (nvme_state_terminal(&dev->ctrl)) + nvme_dev_disable(dev, true); return BLK_EH_DONE; + } nvme_dev_disable(dev, false); if (nvme_try_sched_reset(&dev->ctrl)) @@ -2218,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, poll_queues; + unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY; /* * Poll queues don't need interrupts, but we need at least one I/O queue @@ -2241,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) irq_queues = 1; if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) irq_queues += (nr_io_queues - poll_queues); - return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); + if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) + flags &= ~PCI_IRQ_MSI; + return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags, + &affd); } static unsigned int nvme_max_io_queues(struct nvme_dev *dev) @@ -2471,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) { int result = -ENOMEM; struct pci_dev *pdev = to_pci_dev(dev->dev); + unsigned int flags = PCI_IRQ_ALL_TYPES; if (pci_enable_device_mem(pdev)) return result; @@ -2487,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev) * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll * adjust this later. */ - result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) + flags &= ~PCI_IRQ_MSI; + result = pci_alloc_irq_vectors(pdev, 1, 1, flags); if (result < 0) goto disable; @@ -3384,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ + .driver_data = NVME_QUIRK_BROKEN_MSI }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 366f0bb4ebfc..51a62b0c645a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -982,7 +982,8 @@ free_ctrl: kfree(ctrl); } -static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) +static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl, + int status) { enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); @@ -992,7 +993,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) return; } - if (nvmf_should_reconnect(&ctrl->ctrl)) { + if (nvmf_should_reconnect(&ctrl->ctrl, status)) { dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", ctrl->ctrl.opts->reconnect_delay); queue_delayed_work(nvme_wq, &ctrl->reconnect_work, @@ -1104,10 +1105,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_rdma_ctrl, reconnect_work); + int ret; ++ctrl->ctrl.nr_reconnects; - if (nvme_rdma_setup_ctrl(ctrl, false)) + ret = nvme_rdma_setup_ctrl(ctrl, false); + if (ret) goto requeue; dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", @@ -1118,9 +1121,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) return; requeue: - dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", - ctrl->ctrl.nr_reconnects); - nvme_rdma_reconnect_or_remove(ctrl); + dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n", + ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects); + nvme_rdma_reconnect_or_remove(ctrl, ret); } static void nvme_rdma_error_recovery_work(struct work_struct *work) @@ -1145,7 +1148,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) return; } - nvme_rdma_reconnect_or_remove(ctrl); + nvme_rdma_reconnect_or_remove(ctrl, 0); } static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) @@ -2169,6 +2172,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); + int ret; nvme_stop_ctrl(&ctrl->ctrl); nvme_rdma_shutdown_ctrl(ctrl, false); @@ -2179,14 +2183,15 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) return; } - if (nvme_rdma_setup_ctrl(ctrl, false)) + ret = nvme_rdma_setup_ctrl(ctrl, false); + if (ret) goto out_fail; return; out_fail: ++ctrl->ctrl.nr_reconnects; - nvme_rdma_reconnect_or_remove(ctrl); + nvme_rdma_reconnect_or_remove(ctrl, ret); } static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index fdbcdcedcee9..8b5e4327fe83 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -360,12 +360,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) } while (ret > 0); } -static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) +static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue) { return !list_empty(&queue->send_list) || !llist_empty(&queue->req_list); } +static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) +{ + return !nvme_tcp_tls(&queue->ctrl->ctrl) && + nvme_tcp_queue_has_pending(queue); +} + static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, bool sync, bool last) { @@ -386,7 +392,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, mutex_unlock(&queue->send_mutex); } - if (last && nvme_tcp_queue_more(queue)) + if (last && nvme_tcp_queue_has_pending(queue)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } @@ -2155,7 +2161,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, nvme_tcp_destroy_io_queues(ctrl, remove); } -static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) +static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl, + int status) { enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); @@ -2165,13 +2172,14 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) return; } - if (nvmf_should_reconnect(ctrl)) { + if (nvmf_should_reconnect(ctrl, status)) { dev_info(ctrl->device, "Reconnecting in %d seconds...\n", ctrl->opts->reconnect_delay); queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, ctrl->opts->reconnect_delay * HZ); } else { - dev_info(ctrl->device, "Removing controller...\n"); + dev_info(ctrl->device, "Removing controller (%d)...\n", + status); nvme_delete_ctrl(ctrl); } } @@ -2252,23 +2260,25 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), struct nvme_tcp_ctrl, connect_work); struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; + int ret; ++ctrl->nr_reconnects; - if (nvme_tcp_setup_ctrl(ctrl, false)) + ret = nvme_tcp_setup_ctrl(ctrl, false); + if (ret) goto requeue; - dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", - ctrl->nr_reconnects); + dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n", + ctrl->nr_reconnects, ctrl->opts->max_reconnects); ctrl->nr_reconnects = 0; return; requeue: - dev_info(ctrl->device, "Failed reconnect attempt %d\n", - ctrl->nr_reconnects); - nvme_tcp_reconnect_or_remove(ctrl); + dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n", + ctrl->nr_reconnects, ctrl->opts->max_reconnects); + nvme_tcp_reconnect_or_remove(ctrl, ret); } static void nvme_tcp_error_recovery_work(struct work_struct *work) @@ -2295,7 +2305,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) return; } - nvme_tcp_reconnect_or_remove(ctrl); + nvme_tcp_reconnect_or_remove(ctrl, 0); } static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) @@ -2315,6 +2325,7 @@ static void nvme_reset_ctrl_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, reset_work); + int ret; nvme_stop_ctrl(ctrl); nvme_tcp_teardown_ctrl(ctrl, false); @@ -2328,14 +2339,15 @@ static void nvme_reset_ctrl_work(struct work_struct *work) return; } - if (nvme_tcp_setup_ctrl(ctrl, false)) + ret = nvme_tcp_setup_ctrl(ctrl, false); + if (ret) goto out_fail; return; out_fail: ++ctrl->nr_reconnects; - nvme_tcp_reconnect_or_remove(ctrl); + nvme_tcp_reconnect_or_remove(ctrl, ret); } static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 722384bcc765..77aa0f440a6d 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl) return 0; } -int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf, - struct queue_limits *lim) +int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, + struct nvme_zone_info *zi) { struct nvme_effects_log *log = ns->head->effects; struct nvme_command c = { }; @@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf, goto free_data; } - ns->head->zsze = - nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze)); - if (!is_power_of_2(ns->head->zsze)) { + zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze); + if (!is_power_of_2(zi->zone_size)) { dev_warn(ns->ctrl->device, - "invalid zone size:%llu for namespace:%u\n", - ns->head->zsze, ns->head->ns_id); + "invalid zone size: %llu for namespace: %u\n", + zi->zone_size, ns->head->ns_id); status = -ENODEV; goto free_data; } + zi->max_open_zones = le32_to_cpu(id->mor) + 1; + zi->max_active_zones = le32_to_cpu(id->mar) + 1; - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); - lim->zoned = 1; - lim->max_open_zones = le32_to_cpu(id->mor) + 1; - lim->max_active_zones = le32_to_cpu(id->mar) + 1; - lim->chunk_sectors = ns->head->zsze; - lim->max_zone_append_sectors = ns->ctrl->max_zone_append; free_data: kfree(id); return status; } +void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, + struct nvme_zone_info *zi) +{ + lim->zoned = 1; + lim->max_open_zones = zi->max_open_zones; + lim->max_active_zones = zi->max_active_zones; + lim->max_zone_append_sectors = ns->ctrl->max_zone_append; + lim->chunk_sectors = ns->head->zsze = + nvme_lba_to_sect(ns->head, zi->zone_size); + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); +} + static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, unsigned int nr_zones, size_t *buflen) { diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 3ddbc3880cac..7d2633940f9b 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -44,6 +44,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, dhchap_secret = kstrdup(secret, GFP_KERNEL); if (!dhchap_secret) return -ENOMEM; + down_write(&nvmet_config_sem); if (set_ctrl) { kfree(host->dhchap_ctrl_secret); host->dhchap_ctrl_secret = strim(dhchap_secret); @@ -53,6 +54,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, host->dhchap_secret = strim(dhchap_secret); host->dhchap_key_hash = key_hash; } + up_write(&nvmet_config_sem); return 0; } @@ -124,12 +126,11 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) return ret; } -int nvmet_setup_auth(struct nvmet_ctrl *ctrl) +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) { int ret = 0; struct nvmet_host_link *p; struct nvmet_host *host = NULL; - const char *hash_name; down_read(&nvmet_config_sem); if (nvmet_is_disc_subsys(ctrl->subsys)) @@ -147,13 +148,16 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) } if (!host) { pr_debug("host %s not found\n", ctrl->hostnqn); - ret = -EPERM; + ret = NVME_AUTH_DHCHAP_FAILURE_FAILED; goto out_unlock; } ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id); - if (ret < 0) + if (ret < 0) { pr_warn("Failed to setup DH group"); + ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + goto out_unlock; + } if (!host->dhchap_secret) { pr_debug("No authentication provided\n"); @@ -164,12 +168,6 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) pr_debug("Re-use existing hash ID %d\n", ctrl->shash_id); } else { - hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); - if (!hash_name) { - pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id); - ret = -EINVAL; - goto out_unlock; - } ctrl->shash_id = host->dhchap_hash_id; } @@ -178,7 +176,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10, host->dhchap_key_hash); if (IS_ERR(ctrl->host_key)) { - ret = PTR_ERR(ctrl->host_key); + ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE; ctrl->host_key = NULL; goto out_free_hash; } @@ -196,7 +194,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10, host->dhchap_ctrl_key_hash); if (IS_ERR(ctrl->ctrl_key)) { - ret = PTR_ERR(ctrl->ctrl_key); + ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE; ctrl->ctrl_key = NULL; goto out_free_hash; } @@ -285,9 +283,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, } if (shash_len != crypto_shash_digestsize(shash_tfm)) { - pr_debug("%s: hash len mismatch (len %d digest %d)\n", - __func__, shash_len, - crypto_shash_digestsize(shash_tfm)); + pr_err("%s: hash len mismatch (len %d digest %d)\n", + __func__, shash_len, + crypto_shash_digestsize(shash_tfm)); ret = -EINVAL; goto out_free_tfm; } @@ -370,7 +368,7 @@ out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: crypto_free_shash(shash_tfm); - return 0; + return ret; } int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, @@ -480,7 +478,7 @@ out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: crypto_free_shash(shash_tfm); - return 0; + return ret; } int nvmet_auth_ctrl_exponential(struct nvmet_req *req, diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 77a6e817b315..7c43a0ad6877 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -754,6 +754,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = { NULL, }; +bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid) +{ + struct config_item *ns_item; + char name[12]; + + snprintf(name, sizeof(name), "%u", nsid); + mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex); + ns_item = config_group_find_item(&subsys->namespaces_group, name); + mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex); + return ns_item != NULL; +} + static void nvmet_ns_release(struct config_item *item) { struct nvmet_ns *ns = to_nvmet_ns(item); @@ -1613,6 +1625,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group, return ERR_PTR(-EINVAL); } + if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) { + pr_err("can't create subsystem using unique discovery NQN\n"); + return ERR_PTR(-EINVAL); + } + subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); if (IS_ERR(subsys)) return ERR_CAST(subsys); @@ -1990,11 +2007,17 @@ static struct config_group nvmet_ports_group; static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, char *page) { - u8 *dhchap_secret = to_host(item)->dhchap_secret; + u8 *dhchap_secret; + ssize_t ret; + down_read(&nvmet_config_sem); + dhchap_secret = to_host(item)->dhchap_secret; if (!dhchap_secret) - return sprintf(page, "\n"); - return sprintf(page, "%s\n", dhchap_secret); + ret = sprintf(page, "\n"); + else + ret = sprintf(page, "%s\n", dhchap_secret); + up_read(&nvmet_config_sem); + return ret; } static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, @@ -2018,10 +2041,16 @@ static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, char *page) { u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; + ssize_t ret; + down_read(&nvmet_config_sem); + dhchap_secret = to_host(item)->dhchap_ctrl_secret; if (!dhchap_secret) - return sprintf(page, "\n"); - return sprintf(page, "%s\n", dhchap_secret); + ret = sprintf(page, "\n"); + else + ret = sprintf(page, "%s\n", dhchap_secret); + up_read(&nvmet_config_sem); + return ret; } static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, @@ -2159,7 +2188,49 @@ static const struct config_item_type nvmet_hosts_type = { static struct config_group nvmet_hosts_group; +static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn); +} + +static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, + const char *page, size_t count) +{ + struct list_head *entry; + size_t len; + + len = strcspn(page, "\n"); + if (!len || len > NVMF_NQN_FIELD_LEN - 1) + return -EINVAL; + + down_write(&nvmet_config_sem); + list_for_each(entry, &nvmet_subsystems_group.cg_children) { + struct config_item *item = + container_of(entry, struct config_item, ci_entry); + + if (!strncmp(config_item_name(item), page, len)) { + pr_err("duplicate NQN %s\n", config_item_name(item)); + up_write(&nvmet_config_sem); + return -EINVAL; + } + } + memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN); + memcpy(nvmet_disc_subsys->subsysnqn, page, len); + up_write(&nvmet_config_sem); + + return len; +} + +CONFIGFS_ATTR(nvmet_root_, discovery_nqn); + +static struct configfs_attribute *nvmet_root_attrs[] = { + &nvmet_root_attr_discovery_nqn, + NULL, +}; + static const struct config_item_type nvmet_root_type = { + .ct_attrs = nvmet_root_attrs, .ct_owner = THIS_MODULE, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6bbe4df0166c..2fde22323622 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -437,10 +437,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) u16 nvmet_req_find_ns(struct nvmet_req *req) { u32 nsid = le32_to_cpu(req->cmd->common.nsid); + struct nvmet_subsys *subsys = nvmet_req_subsys(req); - req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid); + req->ns = xa_load(&subsys->namespaces, nsid); if (unlikely(!req->ns)) { req->error_loc = offsetof(struct nvme_common_command, nsid); + if (nvmet_subsys_nsid_exists(subsys, nsid)) + return NVME_SC_INTERNAL_PATH_ERROR; return NVME_SC_INVALID_NS | NVME_SC_DNR; } @@ -1541,6 +1544,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, } down_read(&nvmet_config_sem); + if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn, + NVMF_NQN_SIZE)) { + if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) { + up_read(&nvmet_config_sem); + return nvmet_disc_subsys; + } + } list_for_each_entry(p, &port->subsystems, entry) { if (!strncmp(p->subsys->subsysnqn, subsysnqn, NVMF_NQN_SIZE)) { @@ -1676,7 +1686,8 @@ static int __init nvmet_init(void) if (!buffered_io_wq) goto out_free_zbd_work_queue; - nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); + nvmet_wq = alloc_workqueue("nvmet-wq", + WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!nvmet_wq) goto out_free_buffered_work_queue; diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index eb7785be0ca7..d61b8c6ff3b2 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -31,7 +31,7 @@ void nvmet_auth_sq_init(struct nvmet_sq *sq) sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } -static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) +static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_negotiate_data *data = d; @@ -109,7 +109,7 @@ static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) return 0; } -static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) +static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_reply_data *data = d; @@ -172,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) return 0; } -static u16 nvmet_auth_failure2(void *d) +static u8 nvmet_auth_failure2(void *d) { struct nvmf_auth_dhchap_failure_data *data = d; @@ -186,6 +186,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req) void *d; u32 tl; u16 status = 0; + u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; @@ -237,30 +238,32 @@ void nvmet_execute_auth_send(struct nvmet_req *req) if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { /* Restart negotiation */ - pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, - ctrl->cntlid, req->sq->qid); + pr_debug("%s: ctrl %d qid %d reset negotiation\n", + __func__, ctrl->cntlid, req->sq->qid); if (!req->sq->qid) { - if (nvmet_setup_auth(ctrl) < 0) { - status = NVME_SC_INTERNAL; - pr_err("ctrl %d qid 0 failed to setup" - "re-authentication", + dhchap_status = nvmet_setup_auth(ctrl); + if (dhchap_status) { + pr_err("ctrl %d qid 0 failed to setup re-authentication\n", ctrl->cntlid); - goto done_failure1; + req->sq->dhchap_status = dhchap_status; + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + goto done_kfree; } } - req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } else if (data->auth_id != req->sq->dhchap_step) goto done_failure1; /* Validate negotiation parameters */ - status = nvmet_auth_negotiate(req, d); - if (status == 0) + dhchap_status = nvmet_auth_negotiate(req, d); + if (dhchap_status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; - req->sq->dhchap_status = status; - status = 0; + req->sq->dhchap_status = dhchap_status; } goto done_kfree; } @@ -284,15 +287,14 @@ void nvmet_execute_auth_send(struct nvmet_req *req) switch (data->auth_id) { case NVME_AUTH_DHCHAP_MESSAGE_REPLY: - status = nvmet_auth_reply(req, d); - if (status == 0) + dhchap_status = nvmet_auth_reply(req, d); + if (dhchap_status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; - req->sq->dhchap_status = status; - status = 0; + req->sq->dhchap_status = dhchap_status; } goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: @@ -301,13 +303,12 @@ void nvmet_execute_auth_send(struct nvmet_req *req) __func__, ctrl->cntlid, req->sq->qid); goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: - status = nvmet_auth_failure2(d); - if (status) { + dhchap_status = nvmet_auth_failure2(d); + if (dhchap_status) { pr_warn("ctrl %d qid %d: authentication failed (%d)\n", - ctrl->cntlid, req->sq->qid, status); - req->sq->dhchap_status = status; + ctrl->cntlid, req->sq->qid, dhchap_status); + req->sq->dhchap_status = dhchap_status; req->sq->authenticated = false; - status = 0; } goto done_kfree; default: diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index b23f4cf840bd..042b379cbb36 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -211,7 +211,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; u16 status; - int ret; + u8 dhchap_status; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; @@ -254,11 +254,12 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) uuid_copy(&ctrl->hostid, &d->hostid); - ret = nvmet_setup_auth(ctrl); - if (ret < 0) { - pr_err("Failed to setup authentication, error %d\n", ret); + dhchap_status = nvmet_setup_auth(ctrl); + if (dhchap_status) { + pr_err("Failed to setup authentication, dhchap status %u\n", + dhchap_status); nvmet_ctrl_put(ctrl); - if (ret == -EPERM) + if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); else status = NVME_SC_INTERNAL; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index fd229f310c93..337ee1cb09ae 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1115,16 +1115,21 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) } static bool -nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id) +nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) { struct nvmet_fc_tgt_assoc *a; + bool found = false; + rcu_read_lock(); list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { - if (association_id == a->association_id) - return true; + if (association_id == a->association_id) { + found = true; + break; + } } + rcu_read_unlock(); - return false; + return found; } static struct nvmet_fc_tgt_assoc * @@ -1164,13 +1169,11 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); - rcu_read_lock(); - if (!nvmet_fc_assoc_exits(tgtport, ran)) { + if (!nvmet_fc_assoc_exists(tgtport, ran)) { assoc->association_id = ran; list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); done = true; } - rcu_read_unlock(); spin_unlock_irqrestore(&tgtport->lock, flags); } while (!done); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index f460728e1df1..2f22b07eab29 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -113,8 +113,8 @@ struct nvmet_sq { bool authenticated; struct delayed_work auth_expired_work; u16 dhchap_tid; - u16 dhchap_status; - int dhchap_step; + u8 dhchap_status; + u8 dhchap_step; u8 *dhchap_c1; u8 *dhchap_c2; u32 dhchap_s1; @@ -543,6 +543,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, struct nvmet_host *host); void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page); +bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid); #define NVMET_MIN_QUEUE_SIZE 16 #define NVMET_MAX_QUEUE_SIZE 1024 @@ -713,7 +714,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req); int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, bool set_ctrl); int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); -int nvmet_setup_auth(struct nvmet_ctrl *ctrl); +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl); void nvmet_auth_sq_init(struct nvmet_sq *sq); void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); void nvmet_auth_sq_free(struct nvmet_sq *sq); @@ -732,7 +733,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req, int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, u8 *buf, int buf_size); #else -static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl) +static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) { return 0; } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 5b8c63e74639..689bb5d3cfdc 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) return 0; out_free: - while (--i >= 0) { - struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - - list_del(&rsp->free_list); - nvmet_rdma_free_rsp(ndev, rsp); - } + while (--i >= 0) + nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); out: return ret; @@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) struct nvmet_rdma_device *ndev = queue->dev; int i, nr_rsps = queue->recv_queue_size * 2; - for (i = 0; i < nr_rsps; i++) { - struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - - list_del(&rsp->free_list); - nvmet_rdma_free_rsp(ndev, rsp); - } + for (i = 0; i < nr_rsps; i++) + nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); } @@ -1814,18 +1806,14 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) { - struct nvmet_rdma_queue *queue; + struct nvmet_rdma_queue *queue, *n; -restart: mutex_lock(&nvmet_rdma_queue_mutex); - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { - if (queue->nvme_sq.ctrl == ctrl) { - list_del_init(&queue->queue_list); - mutex_unlock(&nvmet_rdma_queue_mutex); - - __nvmet_rdma_queue_disconnect(queue); - goto restart; - } + list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { + if (queue->nvme_sq.ctrl != ctrl) + continue; + list_del_init(&queue->queue_list); + __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index a5422e2c979a..380f22ee3ebb 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -348,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) return 0; } +/* If cmd buffers are NULL, no operation is performed */ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) { kfree(cmd->iov); @@ -1581,13 +1582,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) struct nvmet_tcp_cmd *cmd = queue->cmds; int i; - for (i = 0; i < queue->nr_cmds; i++, cmd++) { - if (nvmet_tcp_need_data_in(cmd)) - nvmet_tcp_free_cmd_buffers(cmd); - } - - if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) - nvmet_tcp_free_cmd_buffers(&queue->connect); + for (i = 0; i < queue->nr_cmds; i++, cmd++) + nvmet_tcp_free_cmd_buffers(cmd); + nvmet_tcp_free_cmd_buffers(&queue->connect); } static void nvmet_tcp_release_queue_work(struct work_struct *w) diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 3148d9f1bde6..0021d06041c1 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) return false; /* - * ZNS does not define a conventional zone type. If the underlying - * device has a bitmap set indicating the existence of conventional - * zones, reject the device. Otherwise, use report zones to detect if - * the device has conventional zones. + * ZNS does not define a conventional zone type. Use report zones + * to detect if the device has conventional zones and reject it if + * it does. */ - if (ns->bdev->bd_disk->conv_zones_bitmap) - return false; - ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), validate_conv_zones_cb, NULL); if (ret < 0) |