block-6.7-2023-11-23

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmVfrJIQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpqKID/oCNxih4ErRdCYJOZA+tR0YkuWJBKmF9KSe
 w94GnsxpCdPFMo+Lr2J3WoHsxH/xTFiT5BcLMTKb5Rju7zSIvf40xZxmizwhnU3p
 aA1WhWjo2Qu20p35wbKy23frSOZIZAj3+PXUxOG9hso6fY96JVpDToBQ4I9k5OG1
 1KHXorlktQgDWk1XryI0z9F73Q3Ja3Bt2G4aXRAGwnTyvvIHJd+jm8ksjaEKKkmQ
 YXv1+ZJXhHkaeSfcwYcOFDffx+O1dooWnHjcKbgyXu0UQq0kxxIwBkF5C9ltZVVT
 TW2WusplldV4EU/Z3ck7E03Kbmk81iTN9yyh7wAr0TmkEEOdx5Mv/PtqUHme0A4Z
 PJEBR7g6AVnERttGk4H2MOiPBsMOKBFT2UIVSqUTMsij2a5uaKjGvBXXDELdf4g9
 45QKuP1QflEImtKm6HtELA9gWuAF1rGOrH7PMLLPhmQck643cgVIcw6Tz3eyaJMe
 cBuvTsejkbQVeLSiX1mdvZk0gxuowCP/A8+CXyODdhl+H/mrDnMgR6HCV07VOy6F
 lVXeXjQaqwd9fdQ2kxfjbstNkjE3Z/NRDGJ+y4oNFNj6YmLoVHivBAXDHhHIVpXb
 u/F+IUm7I2M8G3DfqS0VOABauqzbDxe7c8j10OzJeDskd06t1prt6IY/qV4uhImM
 G6XNMzeHjw==
 =EShg
 -----END PGP SIGNATURE-----

Merge tag 'block-6.7-2023-11-23' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A bit bigger than usual at this time, but nothing really earth
  shattering:

   - NVMe pull request via Keith:
       - TCP TLS fixes (Hannes)
       - Authentifaction fixes (Mark, Hannes)
       - Properly terminate target names (Christoph)

   - MD pull request via Song, fixing a raid5 corruption issue

   - Disentanglement of the dependency mess in nvme introduced with the
     tls additions. Now it should actually build on all configs (Arnd)

   - Series of bcache fixes (Coly)

   - Removal of a dead helper (Damien)

   - s390 dasd fix (Muhammad, Jan)

   - lockdep blk-cgroup fixes (Ming)"

* tag 'block-6.7-2023-11-23' of git://git.kernel.dk/linux: (33 commits)
  nvme: tcp: fix compile-time checks for TLS mode
  nvme: target: fix Kconfig select statements
  nvme: target: fix nvme_keyring_id() references
  nvme: move nvme_stop_keep_alive() back to original position
  nbd: pass nbd_sock to nbd_read_reply() instead of index
  s390/dasd: protect device queue against concurrent access
  s390/dasd: resolve spelling mistake
  block/null_blk: Fix double blk_mq_start_request() warning
  nvmet-tcp: always initialize tls_handshake_tmo_work
  nvmet: nul-terminate the NQNs passed in the connect command
  nvme: blank out authentication fabrics options if not configured
  nvme: catch errors from nvme_configure_metadata()
  nvme-tcp: only evaluate 'tls' option if TLS is selected
  nvme-auth: set explanation code for failure2 msgs
  nvme-auth: unlock mutex in one place only
  block: Remove blk_set_runtime_active()
  nbd: fix null-ptr-dereference while accessing 'nbd->config'
  nbd: factor out a helper to get nbd_config without holding 'config_lock'
  nbd: fold nbd config initialization into nbd_alloc_config()
  bcache: avoid NULL checking to c->root in run_cache_set()
  ...
This commit is contained in:
Linus Torvalds 2023-11-23 17:40:15 -08:00
commit bc893f744e
25 changed files with 210 additions and 148 deletions

View file

@ -577,6 +577,7 @@ static void blkg_destroy_all(struct gendisk *disk)
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blkcg_gq *blkg, *n; struct blkcg_gq *blkg, *n;
int count = BLKG_DESTROY_BATCH_SIZE; int count = BLKG_DESTROY_BATCH_SIZE;
int i;
restart: restart:
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
@ -602,6 +603,18 @@ restart:
} }
} }
/*
* Mark policy deactivated since policy offline has been done, and
* the free is scheduled, so future blkcg_deactivate_policy() can
* be bypassed
*/
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (pol)
__clear_bit(pol->plid, q->blkcg_pols);
}
q->root_blkg = NULL; q->root_blkg = NULL;
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }

View file

@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
if (blkcg == &blkcg_root) if (blkcg == &blkcg_root)
return q->root_blkg; return q->root_blkg;

View file

@ -163,38 +163,15 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
* @q: the queue of the device * @q: the queue of the device
* *
* Description: * Description:
* For historical reasons, this routine merely calls blk_set_runtime_active() * Restart the queue of a runtime suspended device. It does this regardless
* to do the real work of restarting the queue. It does this regardless of * of whether the device's runtime-resume succeeded; even if it failed the
* whether the device's runtime-resume succeeded; even if it failed the
* driver or error handler will need to communicate with the device. * driver or error handler will need to communicate with the device.
* *
* This function should be called near the end of the device's * This function should be called near the end of the device's
* runtime_resume callback. * runtime_resume callback to correct queue runtime PM status and re-enable
* peeking requests from the queue.
*/ */
void blk_post_runtime_resume(struct request_queue *q) void blk_post_runtime_resume(struct request_queue *q)
{
blk_set_runtime_active(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
/**
* blk_set_runtime_active - Force runtime status of the queue to be active
* @q: the queue of the device
*
* If the device is left runtime suspended during system suspend the resume
* hook typically resumes the device and corrects runtime status
* accordingly. However, that does not affect the queue runtime PM status
* which is still "suspended". This prevents processing requests from the
* queue.
*
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*
* This function is also called by blk_post_runtime_resume() for
* runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{ {
int old_status; int old_status;
@ -211,4 +188,4 @@ void blk_set_runtime_active(struct request_queue *q)
if (old_status != RPM_ACTIVE) if (old_status != RPM_ACTIVE)
blk_clear_pm_only(q); blk_clear_pm_only(q);
} }
EXPORT_SYMBOL(blk_set_runtime_active); EXPORT_SYMBOL(blk_post_runtime_resume);

View file

@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
rcu_read_lock();
/* /*
* Update has_rules[] flags for the updated tg's subtree. A tg is * Update has_rules[] flags for the updated tg's subtree. A tg is
* considered to have rules if either the tg itself or any of its * considered to have rules if either the tg itself or any of its
@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
this_tg->latency_target = max(this_tg->latency_target, this_tg->latency_target = max(this_tg->latency_target,
parent_tg->latency_target); parent_tg->latency_target);
} }
rcu_read_unlock();
/* /*
* We're already holding queue_lock and know @tg is valid. Let's * We're already holding queue_lock and know @tg is valid. Let's

View file

@ -67,6 +67,7 @@ struct nbd_sock {
struct recv_thread_args { struct recv_thread_args {
struct work_struct work; struct work_struct work;
struct nbd_device *nbd; struct nbd_device *nbd;
struct nbd_sock *nsock;
int index; int index;
}; };
@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
} }
} }
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
{
if (refcount_inc_not_zero(&nbd->config_refs)) {
/*
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
* and reading nbd->config is ordered. The pair is the barrier in
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
* before nbd->config.
*/
smp_mb__after_atomic();
return nbd->config;
}
return NULL;
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{ {
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE; return BLK_EH_DONE;
} }
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
cmd->status = BLK_STS_TIMEOUT; cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock); mutex_unlock(&cmd->lock);
goto done; goto done;
} }
config = nbd->config;
if (config->num_connections > 1 || if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) { (config->num_connections == 1 && nbd->tag_set.timeout)) {
@ -489,15 +506,9 @@ done:
return BLK_EH_DONE; return BLK_EH_DONE;
} }
/* static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
* Send or receive packet. Return a positive value on success and struct iov_iter *iter, int msg_flags, int *sent)
* negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{ {
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
int result; int result;
struct msghdr msg; struct msghdr msg;
unsigned int noreclaim_flag; unsigned int noreclaim_flag;
@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result; return result;
} }
/*
* Send or receive packet. Return a positive value on success and
* negtive value on failure, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
}
/* /*
* Different settings for sk->sk_sndtimeo can result in different return values * Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons? * if there is a signal pending when we enter sendmsg, because reasons?
@ -696,7 +720,7 @@ out:
return 0; return 0;
} }
static int nbd_read_reply(struct nbd_device *nbd, int index, static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply) struct nbd_reply *reply)
{ {
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
reply->magic = 0; reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
if (result < 0) { if (result < 0) {
if (!nbd_disconnected(nbd->config)) if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk), dev_err(disk_to_dev(nbd->disk),
@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd; struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue; struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock; struct nbd_sock *nsock = args->nsock;
struct nbd_cmd *cmd; struct nbd_cmd *cmd;
struct request *rq; struct request *rq;
while (1) { while (1) {
struct nbd_reply reply; struct nbd_reply reply;
if (nbd_read_reply(nbd, args->index, &reply)) if (nbd_read_reply(nbd, nsock->sock, &reply))
break; break;
/* /*
@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
percpu_ref_put(&q->q_usage_counter); percpu_ref_put(&q->q_usage_counter);
} }
nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock); mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock; struct nbd_sock *nsock;
int ret; int ret;
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n"); "Socks array is empty\n");
return -EINVAL; return -EINVAL;
} }
config = nbd->config;
if (index >= config->num_connections) { if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
INIT_WORK(&args->work, recv_work); INIT_WORK(&args->work, recv_work);
args->index = i; args->index = i;
args->nbd = nbd; args->nbd = nbd;
args->nsock = nsock;
nsock->cookie++; nsock->cookie++;
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
sockfd_put(old); sockfd_put(old);
@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
refcount_inc(&nbd->config_refs); refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work); INIT_WORK(&args->work, recv_work);
args->nbd = nbd; args->nbd = nbd;
args->nsock = config->socks[i];
args->index = i; args->index = i;
queue_work(nbd->recv_workq, &args->work); queue_work(nbd->recv_workq, &args->work);
} }
@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
return error; return error;
} }
static struct nbd_config *nbd_alloc_config(void) static int nbd_alloc_and_init_config(struct nbd_device *nbd)
{ {
struct nbd_config *config; struct nbd_config *config;
if (WARN_ON(nbd->config))
return -EINVAL;
if (!try_module_get(THIS_MODULE)) if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENODEV); return -ENODEV;
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) { if (!config) {
module_put(THIS_MODULE); module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
atomic_set(&config->recv_threads, 0); atomic_set(&config->recv_threads, 0);
@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
init_waitqueue_head(&config->conn_wait); init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS; config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0); atomic_set(&config->live_connections, 0);
return config;
nbd->config = config;
/*
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
* its pair is the barrier in nbd_get_config_unlocked().
* So nbd_get_config_unlocked() won't see nbd->config as null after
* refcount_inc_not_zero() succeed.
*/
smp_mb__before_atomic();
refcount_set(&nbd->config_refs, 1);
return 0;
} }
static int nbd_open(struct gendisk *disk, blk_mode_t mode) static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{ {
struct nbd_device *nbd; struct nbd_device *nbd;
struct nbd_config *config;
int ret = 0; int ret = 0;
mutex_lock(&nbd_index_mutex); mutex_lock(&nbd_index_mutex);
@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
} }
if (!refcount_inc_not_zero(&nbd->config_refs)) {
struct nbd_config *config;
config = nbd_get_config_unlocked(nbd);
if (!config) {
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) { if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
goto out; goto out;
} }
config = nbd_alloc_config(); ret = nbd_alloc_and_init_config(nbd);
if (IS_ERR(config)) { if (ret) {
ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
goto out; goto out;
} }
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs); refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
if (max_part) if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state); set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (nbd_disconnected(nbd->config)) { } else if (nbd_disconnected(config)) {
if (max_part) if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state); set_bit(GD_NEED_PART_SCAN, &disk->state);
} }
@ -1990,22 +2028,17 @@ again:
pr_err("nbd%d already in use\n", index); pr_err("nbd%d already in use\n", index);
return -EBUSY; return -EBUSY;
} }
if (WARN_ON(nbd->config)) {
mutex_unlock(&nbd->config_lock); ret = nbd_alloc_and_init_config(nbd);
nbd_put(nbd); if (ret) {
return -EINVAL;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
nbd_put(nbd); nbd_put(nbd);
pr_err("couldn't allocate config\n"); pr_err("couldn't allocate config\n");
return PTR_ERR(config); return ret;
} }
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
config = nbd->config;
set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd); ret = nbd_genl_size_set(info, nbd);
if (ret) if (ret)
goto out; goto out;
@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
} }
mutex_unlock(&nbd_index_mutex); mutex_unlock(&nbd_index_mutex);
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err(nbd_to_dev(nbd), dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n"); "not configured, cannot reconfigure\n");
nbd_put(nbd); nbd_put(nbd);
@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
} }
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) { !nbd->pid) {
dev_err(nbd_to_dev(nbd), dev_err(nbd_to_dev(nbd),

View file

@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
return BLK_STS_OK; return BLK_STS_OK;
} }
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_op op) sector_t nr_sectors, enum req_op op)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb; struct nullb *nullb = dev->nullb;
blk_status_t sts; blk_status_t sts;
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
if (op == REQ_OP_FLUSH) { if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb)); cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out; goto out;
@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
out: out:
nullb_complete_cmd(cmd); nullb_complete_cmd(cmd);
return BLK_STS_OK;
} }
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->fake_timeout = should_timeout_request(rq) || cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q); blk_should_fake_timeout(rq->q);
blk_mq_start_request(rq);
if (should_requeue_request(rq)) { if (should_requeue_request(rq)) {
/* /*
* Alternate between hitting the core BUSY path, and the * Alternate between hitting the core BUSY path, and the
@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
blk_status_t sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
blk_mq_start_request(rq);
if (is_poll) { if (is_poll) {
spin_lock(&nq->poll_lock); spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list); list_add_tail(&rq->queuelist, &nq->poll_list);
@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (cmd->fake_timeout) if (cmd->fake_timeout)
return BLK_STS_OK; return BLK_STS_OK;
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
return BLK_STS_OK;
} }
static void null_queue_rqs(struct request **rqlist) static void null_queue_rqs(struct request **rqlist)

View file

@ -265,6 +265,7 @@ struct bcache_device {
#define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4 #define BCACHE_DEV_RATE_DW_RUNNING 4
int nr_stripes; int nr_stripes;
#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
unsigned int stripe_size; unsigned int stripe_size;
atomic_t *stripe_sectors_dirty; atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes; unsigned long *full_dirty_stripes;

View file

@ -1000,6 +1000,9 @@ err:
* *
* The btree node will have either a read or a write lock held, depending on * The btree node will have either a read or a write lock held, depending on
* level and op->lock. * level and op->lock.
*
* Note: Only error code or btree pointer will be returned, it is unncessary
* for callers to check NULL pointer.
*/ */
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write, struct bkey *k, int level, bool write,
@ -1111,6 +1114,10 @@ retry:
mutex_unlock(&b->c->bucket_lock); mutex_unlock(&b->c->bucket_lock);
} }
/*
* Only error code or btree pointer will be returned, it is unncessary for
* callers to check NULL pointer.
*/
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait, int level, bool wait,
struct btree *parent) struct btree *parent)
@ -1368,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes)); memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl); closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys; keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3; blocks = btree_default_blocks(b->c) * 2 / 3;
@ -1532,6 +1539,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0; return 0;
n = btree_node_alloc_replacement(replace, NULL); n = btree_node_alloc_replacement(replace, NULL);
if (IS_ERR(n))
return 0;
/* recheck reserve after allocating replacement node */ /* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) { if (btree_check_reserve(b, NULL)) {

View file

@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->stripe_size) if (!d->stripe_size)
d->stripe_size = 1 << 31; d->stripe_size = 1 << 31;
else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!n || n > max_stripes) { if (!n || n > max_stripes) {
@ -2016,7 +2018,7 @@ static int run_cache_set(struct cache_set *c)
c->root = bch_btree_node_get(c, NULL, k, c->root = bch_btree_node_get(c, NULL, k,
j->btree_level, j->btree_level,
true, NULL); true, NULL);
if (IS_ERR_OR_NULL(c->root)) if (IS_ERR(c->root))
goto err; goto err;
list_del_init(&c->root->list); list_del_init(&c->root->list);

View file

@ -1104,7 +1104,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i]; sum += INITIAL_PRIO - cached[i];
if (n) if (n)
do_div(sum, n); sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++) for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) / q[i] = INITIAL_PRIO - cached[n * (i + 1) /

View file

@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg)
int cur_idx, prev_idx, skip_nr; int cur_idx, prev_idx, skip_nr;
k = p = NULL; k = p = NULL;
cur_idx = prev_idx = 0; prev_idx = 0;
bch_btree_iter_init(&c->root->keys, &iter, NULL); bch_btree_iter_init(&c->root->keys, &iter, NULL);
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d) void bch_sectors_dirty_init(struct bcache_device *d)
{ {
int i; int i;
struct btree *b = NULL;
struct bkey *k = NULL; struct bkey *k = NULL;
struct btree_iter iter; struct btree_iter iter;
struct sectors_dirty_init op; struct sectors_dirty_init op;
struct cache_set *c = d->c; struct cache_set *c = d->c;
struct bch_dirty_init_state state; struct bch_dirty_init_state state;
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
if (b != c->root) {
rw_unlock(0, b);
goto retry_lock;
}
/* Just count root keys if no leaf node */ /* Just count root keys if no leaf node */
rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) { if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
op.count = 0; op.count = 0;
for_each_key_filter(&c->root->keys, for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid) k, &iter, bch_ptr_invalid) {
if (KEY_INODE(k) != op.inode)
continue;
sectors_dirty_init_fn(&op.op, c->root, k); sectors_dirty_init_fn(&op.op, c->root, k);
}
rw_unlock(0, c->root); rw_unlock(0, b);
return; return;
} }
@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough)) if (atomic_read(&state.enough))
break; break;
atomic_inc(&state.started);
state.infos[i].state = &state; state.infos[i].state = &state;
state.infos[i].thread = state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i], kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i); "bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) { if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i); pr_err("fails to run thread bch_dirty_init[%d]\n", i);
atomic_dec(&state.started);
for (--i; i >= 0; i--) for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread); kthread_stop(state.infos[i].thread);
goto out; goto out;
} }
atomic_inc(&state.started);
} }
out: out:
/* Must wait for all threads to stop. */ /* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0); wait_event(state.wait, atomic_read(&state.started) == 0);
rw_unlock(0, c->root); rw_unlock(0, b);
} }
void bch_cached_dev_writeback_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc)

View file

@ -8666,7 +8666,8 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio; struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev; struct mddev *mddev = md_io_clone->mddev;
orig_bio->bi_status = bio->bi_status; if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
if (md_io_clone->start_time) if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time); bio_end_io_acct(orig_bio, md_io_clone->start_time);

View file

@ -757,12 +757,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
__func__, chap->qid); __func__, chap->qid);
mutex_lock(&ctrl->dhchap_auth_mutex); mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ret) { if (ret) {
mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret; chap->error = ret;
goto fail2; goto fail2;
} }
mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */ /* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n", dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@ -839,6 +838,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
} }
fail2: fail2:
if (chap->status == 0)
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
__func__, chap->qid, chap->status); __func__, chap->qid, chap->status);
tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);

View file

@ -482,7 +482,6 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{ {
nvme_stop_keep_alive(ctrl);
if (ctrl->admin_tagset) { if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset, blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl); nvme_cancel_request, ctrl);
@ -1814,16 +1813,18 @@ set_pi:
return ret; return ret;
} }
static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{ {
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
if (nvme_init_ms(ns, id)) ret = nvme_init_ms(ns, id);
return; if (ret)
return ret;
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
return; return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) { if (ctrl->ops->flags & NVME_F_FABRICS) {
/* /*
@ -1832,7 +1833,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer. * remap the separate metadata buffer from the block layer.
*/ */
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return; return 0;
ns->features |= NVME_NS_EXT_LBAS; ns->features |= NVME_NS_EXT_LBAS;
@ -1859,6 +1860,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else else
ns->features |= NVME_NS_METADATA_SUPPORTED; ns->features |= NVME_NS_METADATA_SUPPORTED;
} }
return 0;
} }
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@ -2032,7 +2034,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->lba_shift = id->lbaf[lbaf].ds; ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue); nvme_set_queue_limits(ns->ctrl, ns->queue);
nvme_configure_metadata(ns, id); ret = nvme_configure_metadata(ns, id);
if (ret < 0) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
nvme_set_chunk_sectors(ns, id); nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id); nvme_update_disk_info(ns->disk, ns, id);
@ -4348,6 +4354,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_mpath_stop(ctrl); nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl); nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl); nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work); cancel_work_sync(&ctrl->fw_act_work);

View file

@ -667,8 +667,10 @@ static const match_table_t opt_tokens = {
#endif #endif
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" }, { NVMF_OPT_DISCOVERY, "discovery" },
#ifdef CONFIG_NVME_HOST_AUTH
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
#endif
#ifdef CONFIG_NVME_TCP_TLS #ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" }, { NVMF_OPT_TLS, "tls" },
#endif #endif

View file

@ -2530,12 +2530,6 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above. * clean up the admin queue. Same thing as above.
*/ */
nvme_quiesce_admin_queue(&ctrl->ctrl); nvme_quiesce_admin_queue(&ctrl->ctrl);
/*
* Open-coding nvme_cancel_admin_tagset() as fc
* is not using nvme_cancel_request().
*/
nvme_stop_keep_alive(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl); nvme_fc_terminate_exchange, &ctrl->ctrl);
@ -3138,11 +3132,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret) if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
ret = -EIO;
goto out_stop_keep_alive;
}
/* sanity checks */ /* sanity checks */
/* FC-NVME does not have other data in the capsule */ /* FC-NVME does not have other data in the capsule */
@ -3150,7 +3145,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff); ctrl->ctrl.icdoff);
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue; goto out_stop_keep_alive;
} }
/* FC-NVME supports normal SGL Data Block Descriptors */ /* FC-NVME supports normal SGL Data Block Descriptors */
@ -3158,7 +3153,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n"); "Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue; goto out_stop_keep_alive;
} }
if (opts->queue_size > ctrl->ctrl.maxcmd) { if (opts->queue_size > ctrl->ctrl.maxcmd) {
@ -3205,6 +3200,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_term_aen_ops: out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl); nvme_fc_term_aen_ops(ctrl);
out_stop_keep_alive:
nvme_stop_keep_alive(&ctrl->ctrl);
out_disconnect_admin_queue: out_disconnect_admin_queue:
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n", "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",

View file

@ -1080,6 +1080,7 @@ destroy_io:
nvme_rdma_free_io_queues(ctrl); nvme_rdma_free_io_queues(ctrl);
} }
destroy_admin: destroy_admin:
nvme_stop_keep_alive(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl); nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);

View file

@ -36,11 +36,11 @@ static int so_priority;
module_param(so_priority, int, 0644); module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
#ifdef CONFIG_NVME_TCP_TLS
/* /*
* TLS handshake timeout * TLS handshake timeout
*/ */
static int tls_handshake_timeout = 10; static int tls_handshake_timeout = 10;
#ifdef CONFIG_NVME_TCP_TLS
module_param(tls_handshake_timeout, int, 0644); module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout, MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)"); "nvme TLS handshake timeout in seconds (default 10)");
@ -161,10 +161,8 @@ struct nvme_tcp_queue {
struct ahash_request *snd_hash; struct ahash_request *snd_hash;
__le32 exp_ddgst; __le32 exp_ddgst;
__le32 recv_ddgst; __le32 recv_ddgst;
#ifdef CONFIG_NVME_TCP_TLS
struct completion tls_complete; struct completion tls_complete;
int tls_err; int tls_err;
#endif
struct page_frag_cache pf_cache; struct page_frag_cache pf_cache;
void (*state_change)(struct sock *); void (*state_change)(struct sock *);
@ -207,6 +205,14 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues; return queue - queue->ctrl->queues;
} }
static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
return ctrl->opts->tls;
}
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{ {
u32 queue_idx = nvme_tcp_queue_id(queue); u32 queue_idx = nvme_tcp_queue_id(queue);
@ -1412,7 +1418,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp; iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp); iov.iov_len = sizeof(*icresp);
if (queue->ctrl->ctrl.opts->tls) { if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
msg.msg_control = cbuf; msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf); msg.msg_controllen = sizeof(cbuf);
} }
@ -1424,7 +1430,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp; goto free_icresp;
} }
ret = -ENOTCONN; ret = -ENOTCONN;
if (queue->ctrl->ctrl.opts->tls) { if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
ctype = tls_get_record_type(queue->sock->sk, ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf); (struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) { if (ctype != TLS_RECORD_TYPE_DATA) {
@ -1548,7 +1554,6 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
} }
#ifdef CONFIG_NVME_TCP_TLS
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{ {
struct nvme_tcp_queue *queue = data; struct nvme_tcp_queue *queue = data;
@ -1625,14 +1630,6 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
} }
return ret; return ret;
} }
#else
static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue,
key_serial_t pskid)
{
return -EPROTONOSUPPORT;
}
#endif
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
key_serial_t pskid) key_serial_t pskid)
@ -1759,7 +1756,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
} }
/* If PSKs are configured try to start TLS */ /* If PSKs are configured try to start TLS */
if (pskid) { if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid); ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret) if (ret)
goto err_init_connect; goto err_init_connect;
@ -1916,7 +1913,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret; int ret;
key_serial_t pskid = 0; key_serial_t pskid = 0;
if (ctrl->opts->tls) { if (nvme_tcp_tls(ctrl)) {
if (ctrl->opts->tls_key) if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key); pskid = key_serial(ctrl->opts->tls_key);
else else
@ -1949,7 +1946,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{ {
int i, ret; int i, ret;
if (ctrl->opts->tls && !ctrl->tls_key) { if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
dev_err(ctrl->device, "no PSK negotiated\n"); dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY; return -ENOKEY;
} }
@ -2237,6 +2234,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new); nvme_tcp_destroy_io_queues(ctrl, new);
} }
destroy_admin: destroy_admin:
nvme_stop_keep_alive(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false); nvme_tcp_teardown_admin_queue(ctrl, false);
return ret; return ret;
} }

View file

@ -4,6 +4,8 @@ config NVME_TARGET
tristate "NVMe Target support" tristate "NVMe Target support"
depends on BLOCK depends on BLOCK
depends on CONFIGFS_FS depends on CONFIGFS_FS
select NVME_KEYRING if NVME_TARGET_TCP_TLS
select KEYS if NVME_TARGET_TCP_TLS
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC select SGL_ALLOC
help help
@ -87,9 +89,7 @@ config NVME_TARGET_TCP
config NVME_TARGET_TCP_TLS config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support" bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP depends on NVME_TARGET_TCP
select NVME_KEYRING
select NET_HANDSHAKE select NET_HANDSHAKE
select KEYS
help help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API. Enables TLS encryption for the NVMe TCP target using the netlink handshake API.

View file

@ -1893,7 +1893,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (nvme_keyring_id()) { if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
port->keyring = key_lookup(nvme_keyring_id()); port->keyring = key_lookup(nvme_keyring_id());
if (IS_ERR(port->keyring)) { if (IS_ERR(port->keyring)) {
pr_warn("NVMe keyring not available, disabling TLS\n"); pr_warn("NVMe keyring not available, disabling TLS\n");

View file

@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out; goto out;
} }
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl); le32_to_cpu(c->kato), &ctrl);
if (status) if (status)
@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out; goto out;
} }
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req); le16_to_cpu(d->cntlid), req);
if (!ctrl) { if (!ctrl) {

View file

@ -1854,6 +1854,8 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
} }
return ret; return ret;
} }
#else
static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
#endif #endif
static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@ -1911,9 +1913,9 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
#ifdef CONFIG_NVME_TARGET_TCP_TLS
INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
nvmet_tcp_tls_handshake_timeout); nvmet_tcp_tls_handshake_timeout);
#ifdef CONFIG_NVME_TARGET_TCP_TLS
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
struct sock *sk = queue->sock->sk; struct sock *sk = queue->sock->sk;

View file

@ -676,18 +676,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once. * we count each request only once.
*/ */
device = cqr->startdev; device = cqr->startdev;
if (device->profile.data) { if (!device->profile.data)
counter = 1; /* request is not yet queued on the start device */ return;
list_for_each(l, &device->ccw_queue)
if (++counter >= 31) spin_lock(get_ccwdev_lock(device->cdev));
break; counter = 1; /* request is not yet queued on the start device */
} list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock(&device->profile.lock); spin_lock(&device->profile.lock);
if (device->profile.data) { device->profile.data->dasd_io_nr_req[counter]++;
device->profile.data->dasd_io_nr_req[counter]++; if (rq_data_dir(req) == READ)
if (rq_data_dir(req) == READ) device->profile.data->dasd_read_nr_req[counter]++;
device->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&device->profile.lock); spin_unlock(&device->profile.lock);
} }

View file

@ -283,7 +283,7 @@ struct dasd_pprc_dev_info {
__u8 secondary; /* 7 Secondary device address */ __u8 secondary; /* 7 Secondary device address */
__u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */ __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
__u8 reserved2[12]; /* 10-21 reserved */ __u8 reserved2[12]; /* 10-21 reserved */
__u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */ __u16 prim_cu_ssid; /* 22-23 Primary Control Unit SSID */
__u8 reserved3[12]; /* 24-35 reserved */ __u8 reserved3[12]; /* 24-35 reserved */
__u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */ __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
__u8 reserved4[90]; /* 38-127 reserved */ __u8 reserved4[90]; /* 38-127 reserved */

View file

@ -15,7 +15,6 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q);
extern void blk_set_runtime_active(struct request_queue *q);
#else #else
static inline void blk_pm_runtime_init(struct request_queue *q, static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {} struct device *dev) {}