diff options
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r-- | drivers/nvme/target/Makefile | 2 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 307 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 50 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 158 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 7 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 74 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/pr.c | 1155 | ||||
-rw-r--r-- | drivers/nvme/target/trace.c | 108 | ||||
-rw-r--r-- | drivers/nvme/target/zns.c | 21 |
11 files changed, 1800 insertions, 86 deletions
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index c402c44350b2..f2b025bbe10c 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ - discovery.o io-cmd-file.o io-cmd-bdev.o + discovery.o io-cmd-file.o io-cmd-bdev.o pr.o nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 081f0473cd9e..fa89b0549c36 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -71,6 +71,35 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) nvmet_req_complete(req, 0); } +static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req) +{ + struct nvme_supported_log *logs; + u16 status; + + logs = kzalloc(sizeof(*logs), GFP_KERNEL); + if (!logs) { + status = NVME_SC_INTERNAL; + goto out; + } + + logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP); + + status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs)); + kfree(logs); +out: + nvmet_req_complete(req, status); +} + static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, struct nvme_smart_log *slog) { @@ -110,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, unsigned long idx; ctrl = req->sq->ctrl; - xa_for_each(&ctrl->subsys->namespaces, idx, ns) { + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { /* we don't have the right data for file backed ns */ if (!ns->bdev) continue; @@ -130,6 +159,45 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, return NVME_SC_SUCCESS; } +static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req) +{ + struct nvme_rotational_media_log *log; + struct gendisk *disk; + u16 status; + + req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( + req->cmd->get_log_page.lsi)); + status = nvmet_req_find_ns(req); + if (status) + goto out; + + if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + if (req->transfer_len != sizeof(*log)) { + status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; + goto out; + } + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) + goto out; + + log->endgid = req->cmd->get_log_page.lsi; + disk = req->ns->bdev->bd_disk; + if (disk && disk->ia_ranges) + log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges); + else + log->numa = cpu_to_le16(1); + + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); + kfree(log); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) { struct nvme_smart_log *log; @@ -176,6 +244,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) log->iocs[nvme_cmd_read] = log->iocs[nvme_cmd_flush] = log->iocs[nvme_cmd_dsm] = + log->iocs[nvme_cmd_resv_acquire] = + log->iocs[nvme_cmd_resv_register] = + log->iocs[nvme_cmd_resv_release] = + log->iocs[nvme_cmd_resv_report] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); log->iocs[nvme_cmd_write] = log->iocs[nvme_cmd_write_zeroes] = @@ -259,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, u32 count = 0; if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { - xa_for_each(&ctrl->subsys->namespaces, idx, ns) + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { if (ns->anagrpid == grpid) desc->nsids[count++] = cpu_to_le32(ns->nsid); + } } desc->grpid = cpu_to_le32(grpid); @@ -272,6 +345,49 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, return struct_size(desc, nsids, count); } +static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req) +{ + u64 host_reads, host_writes, data_units_read, data_units_written; + struct nvme_endurance_group_log *log; + u16 status; + + /* + * The target driver emulates each endurance group as its own + * namespace, reusing the nsid as the endurance group identifier. + */ + req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( + req->cmd->get_log_page.lsi)); + status = nvmet_req_find_ns(req); + if (status) + goto out; + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) { + status = NVME_SC_INTERNAL; + goto out; + } + + if (!req->ns->bdev) + goto copy; + + host_reads = part_stat_read(req->ns->bdev, ios[READ]); + data_units_read = + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); + host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); + data_units_written = + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); + + put_unaligned_le64(host_reads, &log->hrc[0]); + put_unaligned_le64(data_units_read, &log->dur[0]); + put_unaligned_le64(host_writes, &log->hwc[0]); + put_unaligned_le64(data_units_written, &log->duw[0]); +copy: + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); + kfree(log); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) { struct nvme_ana_rsp_hdr hdr = { 0, }; @@ -317,12 +433,44 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_get_log_page_features(struct nvmet_req *req) +{ + struct nvme_supported_features_log *features; + u16 status; + + features = kzalloc(sizeof(*features), GFP_KERNEL); + if (!features) { + status = NVME_SC_INTERNAL; + goto out; + } + + features->fis[NVME_FEAT_NUM_QUEUES] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_KATO] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_ASYNC_EVENT] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_HOST_ID] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_WRITE_PROTECT] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); + features->fis[NVME_FEAT_RESV_MASK] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); + + status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features)); + kfree(features); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) return; switch (req->cmd->get_log_page.lid) { + case NVME_LOG_SUPPORTED: + return nvmet_execute_get_supported_log_pages(req); case NVME_LOG_ERROR: return nvmet_execute_get_log_page_error(req); case NVME_LOG_SMART: @@ -338,8 +486,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) return nvmet_execute_get_log_changed_ns(req); case NVME_LOG_CMD_EFFECTS: return nvmet_execute_get_log_cmd_effects_ns(req); + case NVME_LOG_ENDURANCE_GROUP: + return nvmet_execute_get_log_page_endgrp(req); case NVME_LOG_ANA: return nvmet_execute_get_log_page_ana(req); + case NVME_LOG_FEATURES: + return nvmet_execute_get_log_page_features(req); + case NVME_LOG_RMI: + return nvmet_execute_get_log_page_rmi(req); + case NVME_LOG_RESERVATION: + return nvmet_execute_get_log_page_resv(req); } pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); @@ -433,7 +589,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | - NVME_CTRL_ONCS_WRITE_ZEROES); + NVME_CTRL_ONCS_WRITE_ZEROES | + NVME_CTRL_ONCS_RESERVATIONS); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; @@ -445,11 +602,12 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->awun = 0; id->awupf = 0; - id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ + /* we always support SGLs */ + id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED); if (ctrl->ops->flags & NVMF_KEYED_SGLS) - id->sgls |= cpu_to_le32(1 << 2); + id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS); if (req->port->inline_data_size) - id->sgls |= cpu_to_le32(1 << 20); + id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS); strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); @@ -467,6 +625,13 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->msdbd = ctrl->ops->msdbd; + /* + * Endurance group identifier is 16 bits, so we can't let namespaces + * overflow that since we reuse the nsid + */ + BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX); + id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES); + id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); id->anatt = 10; /* random value */ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); @@ -551,6 +716,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) id->nmic = NVME_NS_NMIC_SHARED; id->anagrpid = cpu_to_le32(req->ns->anagrpid); + if (req->ns->pr.enable) + id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS | + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY | + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS | + NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF; + + /* + * Since we don't know any better, every namespace is its own endurance + * group. + */ + id->endgid = cpu_to_le16(req->ns->nsid); + memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); id->lbaf[0].ds = req->ns->blksize_shift; @@ -576,7 +756,40 @@ out: nvmet_req_complete(req, status); } -static void nvmet_execute_identify_nslist(struct nvmet_req *req) +static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req) +{ + u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid); + static const int buf_size = NVME_IDENTIFY_DATA_SIZE; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_ns *ns; + unsigned long idx; + __le16 *list; + u16 status; + int i = 1; + + list = kzalloc(buf_size, GFP_KERNEL); + if (!list) { + status = NVME_SC_INTERNAL; + goto out; + } + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->nsid <= min_endgid) + continue; + + list[i++] = cpu_to_le16(ns->nsid); + if (i == buf_size / sizeof(__le16)) + break; + } + + list[0] = cpu_to_le16(i - 1); + status = nvmet_copy_to_sgl(req, 0, list, buf_size); + kfree(list); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css) { static const int buf_size = NVME_IDENTIFY_DATA_SIZE; struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -603,9 +816,11 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req) goto out; } - xa_for_each(&ctrl->subsys->namespaces, idx, ns) { + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { if (ns->nsid <= min_nsid) continue; + if (match_css && req->ns->csi != req->cmd->identify.csi) + continue; list[i++] = cpu_to_le32(ns->nsid); if (i == buf_size / sizeof(__le32)) break; @@ -685,6 +900,61 @@ static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); } +static void nvme_execute_identify_ns_nvm(struct nvmet_req *req) +{ + u16 status; + struct nvme_id_ns_nvm *id; + + status = nvmet_req_find_ns(req); + if (status) + goto out; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + status = NVME_SC_INTERNAL; + goto out; + } + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_id_cs_indep(struct nvmet_req *req) +{ + struct nvme_id_ns_cs_indep *id; + u16 status; + + status = nvmet_req_find_ns(req); + if (status) + goto out; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + status = NVME_SC_INTERNAL; + goto out; + } + + id->nstat = NVME_NSTAT_NRDY; + id->anagrpid = cpu_to_le32(req->ns->anagrpid); + id->nmic = NVME_NS_NMIC_SHARED; + if (req->ns->readonly) + id->nsattr |= NVME_NS_ATTR_RO; + if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) + id->nsfeat |= NVME_NS_ROTATIONAL; + /* + * We need flush command to flush the file's metadata, + * so report supporting vwc if backend is file, even + * though buffered_io is disable. + */ + if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) + id->nsfeat |= NVME_NS_VWC_NOT_PRESENT; + + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + kfree(id); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_identify(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) @@ -698,7 +968,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) nvmet_execute_identify_ctrl(req); return; case NVME_ID_CNS_NS_ACTIVE_LIST: - nvmet_execute_identify_nslist(req); + nvmet_execute_identify_nslist(req, false); return; case NVME_ID_CNS_NS_DESC_LIST: nvmet_execute_identify_desclist(req); @@ -706,8 +976,8 @@ static void nvmet_execute_identify(struct nvmet_req *req) case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: - /* Not supported */ - break; + nvme_execute_identify_ns_nvm(req); + return; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { nvmet_execute_identify_ns_zns(req); @@ -729,6 +999,15 @@ static void nvmet_execute_identify(struct nvmet_req *req) break; } break; + case NVME_ID_CNS_NS_ACTIVE_LIST_CS: + nvmet_execute_identify_nslist(req, true); + return; + case NVME_ID_CNS_NS_CS_INDEP: + nvmet_execute_id_cs_indep(req); + return; + case NVME_ID_CNS_ENDGRP_LIST: + nvmet_execute_identify_endgrp_list(req); + return; } pr_debug("unhandled identify cns %d on qid %d\n", @@ -861,6 +1140,9 @@ void nvmet_execute_set_features(struct nvmet_req *req) case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; + case NVME_FEAT_RESV_MASK: + status = nvmet_set_feat_resv_notif_mask(req, cdw11); + break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; @@ -959,6 +1241,9 @@ void nvmet_execute_get_features(struct nvmet_req *req) case NVME_FEAT_WRITE_PROTECT: status = nvmet_get_feat_write_protect(req); break; + case NVME_FEAT_RESV_MASK: + status = nvmet_get_feat_resv_notif_mask(req); + break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 685e89b35d33..2b030f0efc38 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -769,6 +769,32 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); +static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page) +{ + return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable); +} + +static ssize_t nvmet_ns_resv_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + bool val; + + if (kstrtobool(page, &val)) + return -EINVAL; + + mutex_lock(&ns->subsys->lock); + if (ns->enabled) { + pr_err("the ns:%d is already enabled.\n", ns->nsid); + mutex_unlock(&ns->subsys->lock); + return -EINVAL; + } + ns->pr.enable = val; + mutex_unlock(&ns->subsys->lock); + return count; +} +CONFIGFS_ATTR(nvmet_ns_, resv_enable); + static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_device_path, &nvmet_ns_attr_device_nguid, @@ -777,24 +803,13 @@ static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_enable, &nvmet_ns_attr_buffered_io, &nvmet_ns_attr_revalidate_size, + &nvmet_ns_attr_resv_enable, #ifdef CONFIG_PCI_P2PDMA &nvmet_ns_attr_p2pmem, #endif NULL, }; -bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid) -{ - struct config_item *ns_item; - char name[12]; - - snprintf(name, sizeof(name), "%u", nsid); - mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex); - ns_item = config_group_find_item(&subsys->namespaces_group, name); - mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex); - return ns_item != NULL; -} - static void nvmet_ns_release(struct config_item *item) { struct nvmet_ns *ns = to_nvmet_ns(item); @@ -2227,12 +2242,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, const char *page, size_t count) { struct list_head *entry; + char *old_nqn, *new_nqn; size_t len; len = strcspn(page, "\n"); if (!len || len > NVMF_NQN_FIELD_LEN - 1) return -EINVAL; + new_nqn = kstrndup(page, len, GFP_KERNEL); + if (!new_nqn) + return -ENOMEM; + down_write(&nvmet_config_sem); list_for_each(entry, &nvmet_subsystems_group.cg_children) { struct config_item *item = @@ -2241,13 +2261,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, if (!strncmp(config_item_name(item), page, len)) { pr_err("duplicate NQN %s\n", config_item_name(item)); up_write(&nvmet_config_sem); + kfree(new_nqn); return -EINVAL; } } - memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN); - memcpy(nvmet_disc_subsys->subsysnqn, page, len); + old_nqn = nvmet_disc_subsys->subsysnqn; + nvmet_disc_subsys->subsysnqn = new_nqn; up_write(&nvmet_config_sem); + kfree(old_nqn); return len; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ed2424f8a396..fde6c555af61 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) unsigned long idx; u32 nsid = 0; - xa_for_each(&subsys->namespaces, idx, cur) + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur) nsid = cur->nsid; return nsid; @@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) struct nvmet_subsys *subsys = nvmet_req_subsys(req); req->ns = xa_load(&subsys->namespaces, nsid); - if (unlikely(!req->ns)) { + if (unlikely(!req->ns || !req->ns->enabled)) { req->error_loc = offsetof(struct nvme_common_command, nsid); - if (nvmet_subsys_nsid_exists(subsys, nsid)) - return NVME_SC_INTERNAL_PATH_ERROR; - return NVME_SC_INVALID_NS | NVME_STATUS_DNR; + if (!req->ns) /* ns doesn't exist! */ + return NVME_SC_INVALID_NS | NVME_STATUS_DNR; + + /* ns exists but it's disabled */ + req->ns = NULL; + return NVME_SC_INTERNAL_PATH_ERROR; } percpu_ref_get(&req->ns->ref); @@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns) goto out_unlock; ret = -EMFILE; - if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) - goto out_unlock; ret = nvmet_bdev_ns_enable(ns); if (ret == -ENOTBLK) @@ -599,30 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns) list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_p2pmem_ns_add_p2p(ctrl, ns); - ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, - 0, GFP_KERNEL); - if (ret) - goto out_dev_put; - - if (ns->nsid > subsys->max_nsid) - subsys->max_nsid = ns->nsid; - - ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); - if (ret) - goto out_restore_subsys_maxnsid; - - subsys->nr_namespaces++; + if (ns->pr.enable) { + ret = nvmet_pr_init_ns(ns); + if (ret) + goto out_dev_put; + } nvmet_ns_changed(subsys, ns->nsid); ns->enabled = true; + xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); ret = 0; out_unlock: mutex_unlock(&subsys->lock); return ret; - -out_restore_subsys_maxnsid: - subsys->max_nsid = nvmet_max_nsid(subsys); - percpu_ref_exit(&ns->ref); out_dev_put: list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); @@ -641,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns) goto out_unlock; ns->enabled = false; - xa_erase(&ns->subsys->namespaces, ns->nsid); - if (ns->nsid == subsys->max_nsid) - subsys->max_nsid = nvmet_max_nsid(subsys); + xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); mutex_unlock(&subsys->lock); + if (ns->pr.enable) + nvmet_pr_exit_ns(ns); + + mutex_lock(&subsys->lock); + nvmet_ns_changed(subsys, ns->nsid); + nvmet_ns_dev_disable(ns); +out_unlock: + mutex_unlock(&subsys->lock); +} + +void nvmet_ns_free(struct nvmet_ns *ns) +{ + struct nvmet_subsys *subsys = ns->subsys; + + nvmet_ns_disable(ns); + + mutex_lock(&subsys->lock); + + xa_erase(&subsys->namespaces, ns->nsid); + if (ns->nsid == subsys->max_nsid) + subsys->max_nsid = nvmet_max_nsid(subsys); + + mutex_unlock(&subsys->lock); + /* * Now that we removed the namespaces from the lookup list, we * can kill the per_cpu ref and wait for any remaining references @@ -664,17 +676,8 @@ void nvmet_ns_disable(struct nvmet_ns *ns) percpu_ref_exit(&ns->ref); mutex_lock(&subsys->lock); - subsys->nr_namespaces--; - nvmet_ns_changed(subsys, ns->nsid); - nvmet_ns_dev_disable(ns); -out_unlock: mutex_unlock(&subsys->lock); -} - -void nvmet_ns_free(struct nvmet_ns *ns) -{ - nvmet_ns_disable(ns); down_write(&nvmet_ana_sem); nvmet_ana_group_enabled[ns->anagrpid]--; @@ -688,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ns *ns; + mutex_lock(&subsys->lock); + + if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) + goto out_unlock; + ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (!ns) - return NULL; + goto out_unlock; init_completion(&ns->disable_done); ns->nsid = nsid; ns->subsys = subsys; + if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL)) + goto out_free; + + if (ns->nsid > subsys->max_nsid) + subsys->max_nsid = nsid; + + if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL)) + goto out_exit; + + subsys->nr_namespaces++; + + mutex_unlock(&subsys->lock); + down_write(&nvmet_ana_sem); ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; nvmet_ana_group_enabled[ns->anagrpid]++; @@ -707,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) ns->csi = NVME_CSI_NVM; return ns; +out_exit: + subsys->max_nsid = nvmet_max_nsid(subsys); + percpu_ref_exit(&ns->ref); +out_free: + kfree(ns); +out_unlock: + mutex_unlock(&subsys->lock); + return NULL; } static void nvmet_update_sq_head(struct nvmet_req *req) @@ -754,6 +783,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { struct nvmet_ns *ns = req->ns; + struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref; if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); @@ -766,6 +796,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) trace_nvmet_req_complete(req); req->ops->queue_response(req); + + if (pc_ref) + nvmet_pr_put_ns_pc_ref(pc_ref); if (ns) nvmet_put_namespace(ns); } @@ -929,18 +962,39 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return ret; } + if (req->ns->pr.enable) { + ret = nvmet_parse_pr_cmd(req); + if (!ret) + return ret; + } + switch (req->ns->csi) { case NVME_CSI_NVM: if (req->ns->file) - return nvmet_file_parse_io_cmd(req); - return nvmet_bdev_parse_io_cmd(req); + ret = nvmet_file_parse_io_cmd(req); + else + ret = nvmet_bdev_parse_io_cmd(req); + break; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) - return nvmet_bdev_zns_parse_io_cmd(req); - return NVME_SC_INVALID_IO_CMD_SET; + ret = nvmet_bdev_zns_parse_io_cmd(req); + else + ret = NVME_SC_INVALID_IO_CMD_SET; + break; default: - return NVME_SC_INVALID_IO_CMD_SET; + ret = NVME_SC_INVALID_IO_CMD_SET; } + if (ret) + return ret; + + if (req->ns->pr.enable) { + ret = nvmet_pr_check_cmd_access(req); + if (ret) + return ret; + + ret = nvmet_pr_get_ns_pc_ref(req); + } + return ret; } bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, @@ -964,6 +1018,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->ns = NULL; req->error_loc = NVMET_NO_ERROR_LOC; req->error_slba = 0; + req->pc_ref = NULL; /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { @@ -1015,6 +1070,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init); void nvmet_req_uninit(struct nvmet_req *req) { percpu_ref_put(&req->sq->ref); + if (req->pc_ref) + nvmet_pr_put_ns_pc_ref(req->pc_ref); if (req->ns) nvmet_put_namespace(req->ns); } @@ -1355,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, ctrl->p2p_client = get_device(req->p2p_client); - xa_for_each(&ctrl->subsys->namespaces, idx, ns) + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } @@ -1383,7 +1440,8 @@ static void nvmet_fatal_error_handler(struct work_struct *work) } u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) + struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, + uuid_t *hostid) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; @@ -1462,6 +1520,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, } ctrl->cntlid = ret; + uuid_copy(&ctrl->hostid, hostid); + /* * Discovery controllers may use some arbitrary high value * in order to cleanup stale discovery sessions @@ -1478,6 +1538,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, nvmet_start_keep_alive_timer(ctrl); mutex_lock(&subsys->lock); + ret = nvmet_ctrl_init_pr(ctrl); + if (ret) + goto init_pr_fail; list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); nvmet_setup_p2p_ns_map(ctrl, req); nvmet_debugfs_ctrl_setup(ctrl); @@ -1486,6 +1549,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, *ctrlp = ctrl; return 0; +init_pr_fail: + mutex_unlock(&subsys->lock); + nvmet_stop_keep_alive_timer(ctrl); + ida_free(&cntlid_ida, ctrl->cntlid); out_free_sqs: kfree(ctrl->sqs); out_free_changed_ns_list: @@ -1504,6 +1571,7 @@ static void nvmet_ctrl_free(struct kref *ref) struct nvmet_subsys *subsys = ctrl->subsys; mutex_lock(&subsys->lock); + nvmet_ctrl_destroy_pr(ctrl); nvmet_release_p2p_ns_map(ctrl); list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); @@ -1717,7 +1785,7 @@ static int __init nvmet_init(void) goto out_free_zbd_work_queue; nvmet_wq = alloc_workqueue("nvmet-wq", - WQ_MEM_RECLAIM | WQ_UNBOUND, 0); + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!nvmet_wq) goto out_free_buffered_work_queue; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index c4b2eddd5666..c49904ebb6c2 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -64,6 +64,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) case NVME_REG_CSTS: val = ctrl->csts; break; + case NVME_REG_CRTO: + val = NVME_CAP_TIMEOUT(ctrl->csts); + break; default: status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; @@ -245,12 +248,10 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, - le32_to_cpu(c->kato), &ctrl); + le32_to_cpu(c->kato), &ctrl, &d->hostid); if (status) goto out; - uuid_copy(&ctrl->hostid, &d->hostid); - dhchap_status = nvmet_setup_auth(ctrl); if (dhchap_status) { pr_err("Failed to setup authentication, dhchap status %u\n", diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 0bda83d0fc3e..eaf31c823cbe 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ - id->npwg = lpp0b; + id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 190f55e6d753..7233549f7c8a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -20,9 +20,11 @@ #include <linux/blkdev.h> #include <linux/radix-tree.h> #include <linux/t10-pi.h> +#include <linux/kfifo.h> -#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) +#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0) +#define NVMET_NS_ENABLED XA_MARK_1 #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 #define NVMET_NO_ERROR_LOC ((u16)-1) @@ -30,6 +32,13 @@ #define NVMET_MN_MAX_SIZE 40 #define NVMET_SN_MAX_SIZE 20 #define NVMET_FR_MAX_SIZE 8 +#define NVMET_PR_LOG_QUEUE_SIZE 64 + +#define nvmet_for_each_ns(xa, index, entry) \ + xa_for_each(xa, index, entry) + +#define nvmet_for_each_enabled_ns(xa, index, entry) \ + xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED) /* * Supported optional AENs: @@ -56,6 +65,38 @@ #define IPO_IATTR_CONNECT_SQE(x) \ (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) +struct nvmet_pr_registrant { + u64 rkey; + uuid_t hostid; + enum nvme_pr_type rtype; + struct list_head entry; + struct rcu_head rcu; +}; + +struct nvmet_pr { + bool enable; + unsigned long notify_mask; + atomic_t generation; + struct nvmet_pr_registrant __rcu *holder; + /* + * During the execution of the reservation command, mutual + * exclusion is required throughout the process. However, + * while waiting asynchronously for the 'per controller + * percpu_ref' to complete before the 'preempt and abort' + * command finishes, a semaphore is needed to ensure mutual + * exclusion instead of a mutex. + */ + struct semaphore pr_sem; + struct list_head registrant_list; +}; + +struct nvmet_pr_per_ctrl_ref { + struct percpu_ref ref; + struct completion free_done; + struct completion confirm_done; + uuid_t hostid; +}; + struct nvmet_ns { struct percpu_ref ref; struct file *bdev_file; @@ -85,6 +126,8 @@ struct nvmet_ns { int pi_type; int metadata_size; u8 csi; + struct nvmet_pr pr; + struct xarray pr_per_ctrl_refs; }; static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) @@ -191,6 +234,13 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port) return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED; } +struct nvmet_pr_log_mgr { + struct mutex lock; + u64 lost_count; + u64 counter; + DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE); +}; + struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; @@ -246,6 +296,7 @@ struct nvmet_ctrl { u8 *dh_key; size_t dh_keysize; #endif + struct nvmet_pr_log_mgr pr_log_mgr; }; struct nvmet_subsys { @@ -396,6 +447,9 @@ struct nvmet_req { struct work_struct zmgmt_work; } z; #endif /* CONFIG_BLK_DEV_ZONED */ + struct { + struct work_struct abort_work; + } r; }; int sg_cnt; int metadata_sg_cnt; @@ -412,6 +466,7 @@ struct nvmet_req { struct device *p2p_client; u16 error_loc; u64 error_slba; + struct nvmet_pr_per_ctrl_ref *pc_ref; }; #define NVMET_MAX_MPOOL_BVEC 16 @@ -498,7 +553,8 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); + struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, + uuid_t *hostid); struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req); @@ -761,4 +817,18 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; } #endif +int nvmet_pr_init_ns(struct nvmet_ns *ns); +u16 nvmet_parse_pr_cmd(struct nvmet_req *req); +u16 nvmet_pr_check_cmd_access(struct nvmet_req *req); +int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl); +void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl); +void nvmet_pr_exit_ns(struct nvmet_ns *ns); +void nvmet_execute_get_log_page_resv(struct nvmet_req *req); +u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask); +u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req); +u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req); +static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref) +{ + percpu_ref_put(&pc_ref->ref); +} #endif /* _NVMET_H */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 0f9b280c438d..30b21936b0c6 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -13,7 +13,7 @@ #include "../host/nvme.h" #include "nvmet.h" -MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU); +MODULE_IMPORT_NS("NVME_TARGET_PASSTHRU"); /* * xarray to maintain one passthru subsystem per nvme controller. diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c new file mode 100644 index 000000000000..cd22d8333314 --- /dev/null +++ b/drivers/nvme/target/pr.c @@ -0,0 +1,1155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics Persist Reservation. + * Copyright (c) 2024 Guixin Liu, Alibaba Group. + * All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/unaligned.h> +#include "nvmet.h" + +#define NVMET_PR_NOTIFI_MASK_ALL \ + (1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \ + 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \ + 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED) + +static inline bool nvmet_pr_parse_ignore_key(u32 cdw10) +{ + /* Ignore existing key, bit 03. */ + return (cdw10 >> 3) & 1; +} + +static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr) +{ + return container_of(pr, struct nvmet_ns, pr); +} + +static struct nvmet_pr_registrant * +nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid) +{ + struct nvmet_pr_registrant *reg; + + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, hostid)) + return reg; + } + return NULL; +} + +u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask) +{ + u32 nsid = le32_to_cpu(req->cmd->common.nsid); + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_ns *ns; + unsigned long idx; + u16 status; + + if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + if (nsid != U32_MAX) { + status = nvmet_req_find_ns(req); + if (status) + return status; + if (!req->ns->pr.enable) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + + WRITE_ONCE(req->ns->pr.notify_mask, mask); + goto success; + } + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->pr.enable) + WRITE_ONCE(ns->pr.notify_mask, mask); + } + +success: + nvmet_set_result(req, mask); + return NVME_SC_SUCCESS; +} + +u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req) +{ + u16 status; + + status = nvmet_req_find_ns(req); + if (status) + return status; + + if (!req->ns->pr.enable) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + + nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask)); + return status; +} + +void nvmet_execute_get_log_page_resv(struct nvmet_req *req) +{ + struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr; + struct nvme_pr_log next_log = {0}; + struct nvme_pr_log log = {0}; + u16 status = NVME_SC_SUCCESS; + u64 lost_count; + u64 cur_count; + u64 next_count; + + mutex_lock(&log_mgr->lock); + if (!kfifo_get(&log_mgr->log_queue, &log)) + goto out; + + /* + * We can't get the last in kfifo. + * Utilize the current count and the count from the next log to + * calculate the number of lost logs, while also addressing cases + * of overflow. If there is no subsequent log, the number of lost + * logs is equal to the lost_count within the nvmet_pr_log_mgr. + */ + cur_count = le64_to_cpu(log.count); + if (kfifo_peek(&log_mgr->log_queue, &next_log)) { + next_count = le64_to_cpu(next_log.count); + if (next_count > cur_count) + lost_count = next_count - cur_count - 1; + else + lost_count = U64_MAX - cur_count + next_count - 1; + } else { + lost_count = log_mgr->lost_count; + } + + log.count = cpu_to_le64((cur_count + lost_count) == 0 ? + 1 : (cur_count + lost_count)); + log_mgr->lost_count -= lost_count; + + log.nr_pages = kfifo_len(&log_mgr->log_queue); + +out: + status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log)); + mutex_unlock(&log_mgr->lock); + nvmet_req_complete(req, status); +} + +static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type, + u32 nsid) +{ + struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr; + struct nvme_pr_log log = {0}; + + mutex_lock(&log_mgr->lock); + log_mgr->counter++; + if (log_mgr->counter == 0) + log_mgr->counter = 1; + + log.count = cpu_to_le64(log_mgr->counter); + log.type = log_type; + log.nsid = cpu_to_le32(nsid); + + if (!kfifo_put(&log_mgr->log_queue, log)) { + pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n", + ctrl->cntlid, log_type, nsid); + log_mgr->lost_count++; + } + + mutex_unlock(&log_mgr->lock); +} + +static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid) +{ + struct nvmet_ns *ns = nvmet_pr_to_ns(pr); + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; + + if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask)) + return; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (!uuid_equal(&ctrl->hostid, hostid) && + nvmet_pr_find_registrant(pr, &ctrl->hostid)) { + nvmet_pr_add_resv_log(ctrl, + NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid); + nvmet_add_async_event(ctrl, NVME_AER_CSS, + NVME_AEN_RESV_LOG_PAGE_AVALIABLE, + NVME_LOG_RESERVATION); + } + } + mutex_unlock(&subsys->lock); +} + +static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid, + u8 log_type) +{ + struct nvmet_ns *ns = nvmet_pr_to_ns(pr); + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (uuid_equal(hostid, &ctrl->hostid)) { + nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid); + nvmet_add_async_event(ctrl, NVME_AER_CSS, + NVME_AEN_RESV_LOG_PAGE_AVALIABLE, + NVME_LOG_RESERVATION); + } + } + mutex_unlock(&subsys->lock); +} + +static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid) +{ + if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask)) + return; + + nvmet_pr_send_event_to_host(pr, hostid, + NVME_PR_LOG_RESERVATOIN_PREEMPTED); +} + +static void nvmet_pr_registration_preempted(struct nvmet_pr *pr, + uuid_t *hostid) +{ + if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask)) + return; + + nvmet_pr_send_event_to_host(pr, hostid, + NVME_PR_LOG_REGISTRATION_PREEMPTED); +} + +static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype, + struct nvmet_pr_registrant *reg) +{ + reg->rtype = new_rtype; + rcu_assign_pointer(pr->holder, reg); +} + +static u16 nvmet_pr_register(struct nvmet_req *req, + struct nvmet_pr_register_data *d) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_registrant *new, *reg; + struct nvmet_pr *pr = &req->ns->pr; + u16 status = NVME_SC_SUCCESS; + u64 nrkey = le64_to_cpu(d->nrkey); + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NVME_SC_INTERNAL; + + down(&pr->pr_sem); + reg = nvmet_pr_find_registrant(pr, &ctrl->hostid); + if (reg) { + if (reg->rkey != nrkey) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + kfree(new); + goto out; + } + + memset(new, 0, sizeof(*new)); + INIT_LIST_HEAD(&new->entry); + new->rkey = nrkey; + uuid_copy(&new->hostid, &ctrl->hostid); + list_add_tail_rcu(&new->entry, &pr->registrant_list); + +out: + up(&pr->pr_sem); + return status; +} + +static void nvmet_pr_unregister_one(struct nvmet_pr *pr, + struct nvmet_pr_registrant *reg) +{ + struct nvmet_pr_registrant *first_reg; + struct nvmet_pr_registrant *holder; + u8 original_rtype; + + list_del_rcu(®->entry); + + holder = rcu_dereference_protected(pr->holder, 1); + if (reg != holder) + goto out; + + original_rtype = holder->rtype; + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + first_reg = list_first_or_null_rcu(&pr->registrant_list, + struct nvmet_pr_registrant, entry); + if (first_reg) + first_reg->rtype = original_rtype; + rcu_assign_pointer(pr->holder, first_reg); + } else { + rcu_assign_pointer(pr->holder, NULL); + + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY) + nvmet_pr_resv_released(pr, ®->hostid); + } +out: + kfree_rcu(reg, rcu); +} + +static u16 nvmet_pr_unregister(struct nvmet_req *req, + struct nvmet_pr_register_data *d, + bool ignore_key) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid)) { + if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) { + status = NVME_SC_SUCCESS; + nvmet_pr_unregister_one(pr, reg); + } + break; + } + } + up(&pr->pr_sem); + + return status; +} + +static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg, + void *attr) +{ + reg->rkey = *(u64 *)attr; +} + +static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr, + struct nvmet_pr_registrant *reg, + void (*change_attr)(struct nvmet_pr_registrant *reg, + void *attr), + void *attr) +{ + struct nvmet_pr_registrant *holder; + struct nvmet_pr_registrant *new; + + holder = rcu_dereference_protected(pr->holder, 1); + if (reg != holder) { + change_attr(reg, attr); + return NVME_SC_SUCCESS; + } + + new = kmalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return NVME_SC_INTERNAL; + + new->rkey = holder->rkey; + new->rtype = holder->rtype; + uuid_copy(&new->hostid, &holder->hostid); + INIT_LIST_HEAD(&new->entry); + + change_attr(new, attr); + list_replace_rcu(&holder->entry, &new->entry); + rcu_assign_pointer(pr->holder, new); + kfree_rcu(holder, rcu); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pr_replace(struct nvmet_req *req, + struct nvmet_pr_register_data *d, + bool ignore_key) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + u64 nrkey = le64_to_cpu(d->nrkey); + + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid)) { + if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) + status = nvmet_pr_update_reg_attr(pr, reg, + nvmet_pr_update_reg_rkey, + &nrkey); + break; + } + } + up(&pr->pr_sem); + return status; +} + +static void nvmet_execute_pr_register(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + struct nvmet_pr_register_data *d; + u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */ + u16 status; + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + switch (reg_act) { + case NVME_PR_REGISTER_ACT_REG: + status = nvmet_pr_register(req, d); + break; + case NVME_PR_REGISTER_ACT_UNREG: + status = nvmet_pr_unregister(req, d, ignore_key); + break; + case NVME_PR_REGISTER_ACT_REPLACE: + status = nvmet_pr_replace(req, d, ignore_key); + break; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + break; + } +free_data: + kfree(d); +out: + if (!status) + atomic_inc(&req->ns->pr.generation); + nvmet_req_complete(req, status); +} + +static u16 nvmet_pr_acquire(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype) +{ + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + + holder = rcu_dereference_protected(pr->holder, 1); + if (holder && reg != holder) + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + if (holder && reg == holder) { + if (holder->rtype == rtype) + return NVME_SC_SUCCESS; + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + } + + nvmet_pr_set_new_holder(pr, rtype, reg); + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref = + container_of(ref, struct nvmet_pr_per_ctrl_ref, ref); + + complete(&pc_ref->confirm_done); +} + +static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = req->ns; + unsigned long idx; + + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + if (uuid_equal(&pc_ref->hostid, hostid)) { + percpu_ref_kill_and_confirm(&pc_ref->ref, + nvmet_pr_confirm_ns_pc_ref); + wait_for_completion(&pc_ref->confirm_done); + } + } +} + +static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey, + uuid_t *send_hostid, + bool abort) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (reg->rkey == prkey) { + status = NVME_SC_SUCCESS; + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + if (!uuid_equal(&hostid, send_hostid)) + nvmet_pr_registration_preempted(pr, &hostid); + } + } + return status; +} + +static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req, + u64 prkey, + uuid_t *send_hostid, + bool abort) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (reg->rkey == prkey && + !uuid_equal(®->hostid, send_hostid)) { + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + nvmet_pr_registration_preempted(pr, &hostid); + } + } +} + +static void nvmet_pr_unreg_all_others(struct nvmet_req *req, + uuid_t *send_hostid, + bool abort) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (!uuid_equal(®->hostid, send_hostid)) { + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + nvmet_pr_registration_preempted(pr, &hostid); + } + } +} + +static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg, + void *attr) +{ + u8 new_rtype = *(u8 *)attr; + + reg->rtype = new_rtype; +} + +static u16 nvmet_pr_preempt(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype, + struct nvmet_pr_acquire_data *d, + bool abort) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + enum nvme_pr_type original_rtype; + u64 prkey = le64_to_cpu(d->prkey); + u16 status; + + holder = rcu_dereference_protected(pr->holder, 1); + if (!holder) + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + + original_rtype = holder->rtype; + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + if (!prkey) { + /* + * To prevent possible access from other hosts, and + * avoid terminate the holder, set the new holder + * first before unregistering. + */ + nvmet_pr_set_new_holder(pr, rtype, reg); + nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort); + return NVME_SC_SUCCESS; + } + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + } + + if (holder == reg) { + status = nvmet_pr_update_reg_attr(pr, holder, + nvmet_pr_update_holder_rtype, &rtype); + if (!status && original_rtype != rtype) + nvmet_pr_resv_released(pr, ®->hostid); + return status; + } + + if (prkey == holder->rkey) { + /* + * Same as before, set the new holder first. + */ + nvmet_pr_set_new_holder(pr, rtype, reg); + nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid, + abort); + if (original_rtype != rtype) + nvmet_pr_resv_released(pr, ®->hostid); + return NVME_SC_SUCCESS; + } + + if (prkey) + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; +} + +static void nvmet_pr_do_abort(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work); + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = req->ns; + unsigned long idx; + + /* + * The target does not support abort, just wait per-controller ref to 0. + */ + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + if (percpu_ref_is_dying(&pc_ref->ref)) { + wait_for_completion(&pc_ref->free_done); + reinit_completion(&pc_ref->confirm_done); + reinit_completion(&pc_ref->free_done); + percpu_ref_resurrect(&pc_ref->ref); + } + } + + up(&ns->pr.pr_sem); + nvmet_req_complete(req, NVME_SC_SUCCESS); +} + +static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 acquire_act, + u8 rtype, + struct nvmet_pr_acquire_data *d) +{ + u16 status; + + switch (acquire_act) { + case NVME_PR_ACQUIRE_ACT_ACQUIRE: + status = nvmet_pr_acquire(req, reg, rtype); + goto out; + case NVME_PR_ACQUIRE_ACT_PREEMPT: + status = nvmet_pr_preempt(req, reg, rtype, d, false); + goto inc_gen; + case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT: + status = nvmet_pr_preempt(req, reg, rtype, d, true); + goto inc_gen; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + goto out; + } +inc_gen: + if (!status) + atomic_inc(&req->ns->pr.generation); +out: + return status; +} + +static void nvmet_execute_pr_acquire(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + /* Reservation type, bit 15:08 */ + u8 rtype = (u8)((cdw10 >> 8) & 0xff); + /* Reservation acquire action, bit 02:00 */ + u8 acquire_act = cdw10 & 0x07; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_acquire_data *d = NULL; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + u16 status = NVME_SC_SUCCESS; + + if (ignore_key || + rtype < NVME_PR_WRITE_EXCLUSIVE || + rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid) && + reg->rkey == le64_to_cpu(d->crkey)) { + status = __nvmet_execute_pr_acquire(req, reg, + acquire_act, rtype, d); + break; + } + } + + if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) { + kfree(d); + INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort); + queue_work(nvmet_wq, &req->r.abort_work); + return; + } + + up(&pr->pr_sem); + +free_data: + kfree(d); +out: + nvmet_req_complete(req, status); +} + +static u16 nvmet_pr_release(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype) +{ + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + u8 original_rtype; + + holder = rcu_dereference_protected(pr->holder, 1); + if (!holder || reg != holder) + return NVME_SC_SUCCESS; + + original_rtype = holder->rtype; + if (original_rtype != rtype) + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + + rcu_assign_pointer(pr->holder, NULL); + + if (original_rtype != NVME_PR_WRITE_EXCLUSIVE && + original_rtype != NVME_PR_EXCLUSIVE_ACCESS) + nvmet_pr_resv_released(pr, ®->hostid); + + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_clear(struct nvmet_req *req) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + + rcu_assign_pointer(pr->holder, NULL); + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + list_del_rcu(®->entry); + if (!uuid_equal(&req->sq->ctrl->hostid, ®->hostid)) + nvmet_pr_resv_preempted(pr, ®->hostid); + kfree_rcu(reg, rcu); + } + + atomic_inc(&pr->generation); +} + +static u16 __nvmet_execute_pr_release(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 release_act, u8 rtype) +{ + switch (release_act) { + case NVME_PR_RELEASE_ACT_RELEASE: + return nvmet_pr_release(req, reg, rtype); + case NVME_PR_RELEASE_ACT_CLEAR: + nvmet_pr_clear(req); + return NVME_SC_SUCCESS; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + } +} + +static void nvmet_execute_pr_release(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */ + u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_release_data *d; + struct nvmet_pr_registrant *reg; + u16 status; + + if (ignore_key) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid) && + reg->rkey == le64_to_cpu(d->crkey)) { + status = __nvmet_execute_pr_release(req, reg, + release_act, rtype); + break; + } + } + up(&pr->pr_sem); +free_data: + kfree(d); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_pr_report(struct nvmet_req *req) +{ + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */ + u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */ + struct nvme_registered_ctrl_ext *ctrl_eds; + struct nvme_reservation_status_ext *data; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + struct nvmet_pr_registrant *reg; + u16 num_ctrls = 0; + u16 status; + u8 rtype; + + /* nvmet hostid(uuid_t) is 128 bit. */ + if (!eds) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR; + goto out; + } + + if (num_bytes < sizeof(struct nvme_reservation_status_ext)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + data = kzalloc(num_bytes, GFP_KERNEL); + if (!data) { + status = NVME_SC_INTERNAL; + goto out; + } + data->gen = cpu_to_le32(atomic_read(&pr->generation)); + data->ptpls = 0; + ctrl_eds = data->regctl_eds; + + rcu_read_lock(); + holder = rcu_dereference(pr->holder); + rtype = holder ? holder->rtype : 0; + data->rtype = rtype; + + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + num_ctrls++; + /* + * continue to get the number of all registrans. + */ + if (((void *)ctrl_eds + sizeof(*ctrl_eds)) > + ((void *)data + num_bytes)) + continue; + /* + * Dynamic controller, set cntlid to 0xffff. + */ + ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); + if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) + ctrl_eds->rcsts = 1; + if (reg == holder) + ctrl_eds->rcsts = 1; + uuid_copy((uuid_t *)&ctrl_eds->hostid, ®->hostid); + ctrl_eds->rkey = cpu_to_le64(reg->rkey); + ctrl_eds++; + } + rcu_read_unlock(); + + put_unaligned_le16(num_ctrls, data->regctl); + status = nvmet_copy_to_sgl(req, 0, data, num_bytes); + kfree(data); +out: + nvmet_req_complete(req, status); +} + +u16 nvmet_parse_pr_cmd(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->common.opcode) { + case nvme_cmd_resv_register: + req->execute = nvmet_execute_pr_register; + break; + case nvme_cmd_resv_acquire: + req->execute = nvmet_execute_pr_acquire; + break; + case nvme_cmd_resv_release: + req->execute = nvmet_execute_pr_release; + break; + case nvme_cmd_resv_report: + req->execute = nvmet_execute_pr_report; + break; + default: + return 1; + } + return NVME_SC_SUCCESS; +} + +static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req) +{ + u8 opcode = req->cmd->common.opcode; + + if (req->sq->qid) { + switch (opcode) { + case nvme_cmd_flush: + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_dsm: + case nvme_cmd_zone_append: + case nvme_cmd_zone_mgmt_send: + return true; + default: + return false; + } + } + return false; +} + +static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req) +{ + u8 opcode = req->cmd->common.opcode; + + if (req->sq->qid) { + switch (opcode) { + case nvme_cmd_read: + case nvme_cmd_zone_mgmt_recv: + return true; + default: + return false; + } + } + return false; +} + +u16 nvmet_pr_check_cmd_access(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_registrant *holder; + struct nvmet_ns *ns = req->ns; + struct nvmet_pr *pr = &ns->pr; + u16 status = NVME_SC_SUCCESS; + + rcu_read_lock(); + holder = rcu_dereference(pr->holder); + if (!holder) + goto unlock; + if (uuid_equal(&ctrl->hostid, &holder->hostid)) + goto unlock; + + /* + * The Reservation command group is checked in executing, + * allow it here. + */ + switch (holder->rtype) { + case NVME_PR_WRITE_EXCLUSIVE: + if (nvmet_is_req_write_cmd_group(req)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_EXCLUSIVE_ACCESS: + if (nvmet_is_req_read_cmd_group(req) || + nvmet_is_req_write_cmd_group(req)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: + case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: + if ((nvmet_is_req_write_cmd_group(req)) && + !nvmet_pr_find_registrant(pr, &ctrl->hostid)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: + case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: + if ((nvmet_is_req_read_cmd_group(req) || + nvmet_is_req_write_cmd_group(req)) && + !nvmet_pr_find_registrant(pr, &ctrl->hostid)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + default: + pr_warn("the reservation type is set wrong, type:%d\n", + holder->rtype); + break; + } + +unlock: + rcu_read_unlock(); + if (status) + req->error_loc = offsetof(struct nvme_common_command, opcode); + return status; +} + +u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + + pc_ref = xa_load(&req->ns->pr_per_ctrl_refs, + req->sq->ctrl->cntlid); + if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref))) + return NVME_SC_INTERNAL; + req->pc_ref = pc_ref; + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref = + container_of(ref, struct nvmet_pr_per_ctrl_ref, ref); + + complete(&pc_ref->free_done); +} + +static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns, + unsigned long idx, + uuid_t *hostid) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + int ret; + + pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC); + if (!pc_ref) + return -ENOMEM; + + ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); + if (ret) + goto free; + + init_completion(&pc_ref->free_done); + init_completion(&pc_ref->confirm_done); + uuid_copy(&pc_ref->hostid, hostid); + + ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL); + if (ret) + goto exit; + return ret; +exit: + percpu_ref_exit(&pc_ref->ref); +free: + kfree(pc_ref); + return ret; +} + +int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl) +{ + struct nvmet_subsys *subsys = ctrl->subsys; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = NULL; + unsigned long idx; + int ret; + + ctrl->pr_log_mgr.counter = 0; + ctrl->pr_log_mgr.lost_count = 0; + mutex_init(&ctrl->pr_log_mgr.lock); + INIT_KFIFO(ctrl->pr_log_mgr.log_queue); + + /* + * Here we are under subsys lock, if an ns not in subsys->namespaces, + * we can make sure that ns is not enabled, and not call + * nvmet_pr_init_ns(), see more details in nvmet_ns_enable(). + * So just check ns->pr.enable. + */ + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid, + &ctrl->hostid); + if (ret) + goto free_per_ctrl_refs; + } + } + return 0; + +free_per_ctrl_refs: + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid); + if (pc_ref) + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + } + return ret; +} + +void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns; + unsigned long idx; + + kfifo_free(&ctrl->pr_log_mgr.log_queue); + mutex_destroy(&ctrl->pr_log_mgr.lock); + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid); + if (pc_ref) + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + } +} + +int nvmet_pr_init_ns(struct nvmet_ns *ns) +{ + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ctrl *ctrl = NULL; + unsigned long idx; + int ret; + + ns->pr.holder = NULL; + atomic_set(&ns->pr.generation, 0); + sema_init(&ns->pr.pr_sem, 1); + INIT_LIST_HEAD(&ns->pr.registrant_list); + ns->pr.notify_mask = 0; + + xa_init(&ns->pr_per_ctrl_refs); + + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid, + &ctrl->hostid); + if (ret) + goto free_per_ctrl_refs; + } + return 0; + +free_per_ctrl_refs: + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + xa_erase(&ns->pr_per_ctrl_refs, idx); + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + return ret; +} + +void nvmet_pr_exit_ns(struct nvmet_ns *ns) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_pr *pr = &ns->pr; + unsigned long idx; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + list_del(®->entry); + kfree(reg); + } + + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + /* + * No command on ns here, we can safely free pc_ref. + */ + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx); + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + + xa_destroy(&ns->pr_per_ctrl_refs); +} diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index 9a3548179a8e..6dbc7036f2e4 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -180,6 +180,106 @@ static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) return ret; } +static const char *nvmet_trace_resv_reg(struct trace_seq *p, u8 *cdw10) +{ + static const char * const rrega_strs[] = { + [0x00] = "register", + [0x01] = "unregister", + [0x02] = "replace", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 rrega = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 ptpl = (cdw10[3] >> 6) & 0x3; + const char *rrega_str; + + if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega]) + rrega_str = rrega_strs[rrega]; + else + rrega_str = "reserved"; + + trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u", + rrega, rrega_str, iekey, ptpl); + trace_seq_putc(p, 0); + + return ret; +} + +static const char * const rtype_strs[] = { + [0x00] = "reserved", + [0x01] = "write exclusive", + [0x02] = "exclusive access", + [0x03] = "write exclusive registrants only", + [0x04] = "exclusive access registrants only", + [0x05] = "write exclusive all registrants", + [0x06] = "exclusive access all registrants", +}; + +static const char *nvmet_trace_resv_acq(struct trace_seq *p, u8 *cdw10) +{ + static const char * const racqa_strs[] = { + [0x00] = "acquire", + [0x01] = "preempt", + [0x02] = "preempt and abort", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 racqa = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 rtype = cdw10[1]; + const char *racqa_str = "reserved"; + const char *rtype_str = "reserved"; + + if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa]) + racqa_str = racqa_strs[racqa]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s", + racqa, racqa_str, iekey, rtype, rtype_str); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvmet_trace_resv_rel(struct trace_seq *p, u8 *cdw10) +{ + static const char * const rrela_strs[] = { + [0x00] = "release", + [0x01] = "clear", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 rrela = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 rtype = cdw10[1]; + const char *rrela_str = "reserved"; + const char *rtype_str = "reserved"; + + if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela]) + rrela_str = rrela_strs[rrela]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s", + rrela, rrela_str, iekey, rtype, rtype_str); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvmet_trace_resv_report(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u32 numd = get_unaligned_le32(cdw10); + u8 eds = cdw10[4] & 0x1; + + trace_seq_printf(p, "numd=%u, eds=%u", numd, eds); + trace_seq_putc(p, 0); + + return ret; +} + const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { @@ -195,6 +295,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, return nvmet_trace_zone_mgmt_send(p, cdw10); case nvme_cmd_zone_mgmt_recv: return nvmet_trace_zone_mgmt_recv(p, cdw10); + case nvme_cmd_resv_register: + return nvmet_trace_resv_reg(p, cdw10); + case nvme_cmd_resv_acquire: + return nvmet_trace_resv_acq(p, cdw10); + case nvme_cmd_resv_release: + return nvmet_trace_resv_rel(p, cdw10); + case nvme_cmd_resv_report: + return nvmet_trace_resv_report(p, cdw10); default: return nvmet_trace_common(p, cdw10); } diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index af9e13be7678..3aef35b05111 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -537,6 +537,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; + u32 data_len = nvmet_rw_data_len(req); struct bio *bio; int sg_cnt; @@ -544,6 +545,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; + if (data_len > + bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) { + req->error_loc = offsetof(struct nvme_rw_command, length); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + if (!req->sg_cnt) { nvmet_req_complete(req, 0); return; @@ -576,20 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) bio->bi_opf |= REQ_FUA; for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { - struct page *p = sg_page(sg); - unsigned int l = sg->length; - unsigned int o = sg->offset; - unsigned int ret; + unsigned int len = sg->length; - ret = bio_add_zone_append_page(bio, p, l, o); - if (ret != sg->length) { + if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio, + sg_page(sg), len, sg->offset) != len) { status = NVME_SC_INTERNAL; goto out_put_bio; } - total_len += sg->length; + total_len += len; } - if (total_len != nvmet_rw_data_len(req)) { + if (total_len != data_len) { status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } |