diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 171 |
1 files changed, 92 insertions, 79 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7d52a11e1b61..532304d42f00 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -53,49 +53,16 @@ #endif static struct kmem_cache *scsi_sense_cache; -static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); -static inline struct kmem_cache * -scsi_select_sense_cache(bool unchecked_isa_dma) -{ - return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; -} - -static void scsi_free_sense_buffer(bool unchecked_isa_dma, - unsigned char *sense_buffer) -{ - kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), - sense_buffer); -} - -static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, - gfp_t gfp_mask, int numa_node) -{ - return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), - gfp_mask, numa_node); -} - int scsi_init_sense_cache(struct Scsi_Host *shost) { - struct kmem_cache *cache; int ret = 0; mutex_lock(&scsi_sense_cache_mutex); - cache = scsi_select_sense_cache(shost->unchecked_isa_dma); - if (cache) - goto exit; - - if (shost->unchecked_isa_dma) { - scsi_sense_isadma_cache = - kmem_cache_create("scsi_sense_cache(DMA)", - SCSI_SENSE_BUFFERSIZE, 0, - SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); - if (!scsi_sense_isadma_cache) - ret = -ENOMEM; - } else { + if (!scsi_sense_cache) { scsi_sense_cache = kmem_cache_create_usercopy("scsi_sense_cache", SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, @@ -103,7 +70,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) if (!scsi_sense_cache) ret = -ENOMEM; } - exit: mutex_unlock(&scsi_sense_cache_mutex); return ret; } @@ -328,7 +294,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) if (starget->can_queue > 0) atomic_dec(&starget->target_busy); - atomic_dec(&sdev->device_busy); + sbitmap_put(&sdev->budget_map, cmd->budget_token); + cmd->budget_token = -1; } static void scsi_kick_queue(struct request_queue *q) @@ -384,7 +351,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) static inline bool scsi_device_is_busy(struct scsi_device *sdev) { - if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) + if (scsi_device_busy(sdev) >= sdev->queue_depth) return true; if (atomic_read(&sdev->device_blocked) > 0) return true; @@ -998,8 +965,11 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, } /** - * scsi_alloc_sgtables - allocate S/G tables for a command - * @cmd: command descriptor we wish to initialize + * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists + * @cmd: SCSI command data structure to initialize. + * + * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled + * for @cmd. * * Returns: * * BLK_STS_OK - on success @@ -1143,6 +1113,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) unsigned long jiffies_at_alloc; int retries, to_clear; bool in_flight; + int budget_token = cmd->budget_token; if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { flags |= SCMD_INITIALIZED; @@ -1171,6 +1142,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->retries = retries; if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); + cmd->budget_token = budget_token; } @@ -1254,19 +1226,20 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req) } /* - * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else - * return 0. - * - * Called with the queue_lock held. + * scsi_dev_queue_ready: if we can send requests to sdev, assign one token + * and return the token else return -1. */ static inline int scsi_dev_queue_ready(struct request_queue *q, struct scsi_device *sdev) { - unsigned int busy; + int token; - busy = atomic_inc_return(&sdev->device_busy) - 1; + token = sbitmap_get(&sdev->budget_map); if (atomic_read(&sdev->device_blocked)) { - if (busy) + if (token < 0) + goto out; + + if (scsi_device_busy(sdev) > 1) goto out_dec; /* @@ -1278,13 +1251,12 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, "unblocking device at zero depth\n")); } - if (busy >= sdev->queue_depth) - goto out_dec; - - return 1; + return token; out_dec: - atomic_dec(&sdev->device_busy); - return 0; + if (token >= 0) + sbitmap_put(&sdev->budget_map, token); +out: + return -1; } /* @@ -1428,10 +1400,14 @@ static bool scsi_mq_lld_busy(struct request_queue *q) return false; } -static void scsi_softirq_done(struct request *rq) +/* + * Block layer request completion callback. May be called from interrupt + * context. + */ +static void scsi_complete(struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - int disposition; + enum scsi_disposition disposition; INIT_LIST_HEAD(&cmd->eh_entry); @@ -1605,19 +1581,20 @@ static void scsi_mq_done(struct scsi_cmnd *cmd) blk_mq_complete_request(cmd->request); } -static void scsi_mq_put_budget(struct request_queue *q) +static void scsi_mq_put_budget(struct request_queue *q, int budget_token) { struct scsi_device *sdev = q->queuedata; - atomic_dec(&sdev->device_busy); + sbitmap_put(&sdev->budget_map, budget_token); } -static bool scsi_mq_get_budget(struct request_queue *q) +static int scsi_mq_get_budget(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; + int token = scsi_dev_queue_ready(q, sdev); - if (scsi_dev_queue_ready(q, sdev)) - return true; + if (token >= 0) + return token; atomic_inc(&sdev->restarts); @@ -1636,10 +1613,24 @@ static bool scsi_mq_get_budget(struct request_queue *q) * the .restarts flag, and the request queue will be run for handling * this request, see scsi_end_request(). */ - if (unlikely(atomic_read(&sdev->device_busy) == 0 && + if (unlikely(scsi_device_busy(sdev) == 0 && !scsi_device_blocked(sdev))) blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY); - return false; + return -1; +} + +static void scsi_mq_set_rq_budget_token(struct request *req, int token) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + cmd->budget_token = token; +} + +static int scsi_mq_get_rq_budget_token(struct request *req) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + return cmd->budget_token; } static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1653,6 +1644,8 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, blk_status_t ret; int reason; + WARN_ON_ONCE(cmd->budget_token < 0); + /* * If the device is not in running state we will reject some or all * commands. @@ -1704,7 +1697,8 @@ out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); out_put_budget: - scsi_mq_put_budget(q); + scsi_mq_put_budget(q, cmd->budget_token); + cmd->budget_token = -1; switch (ret) { case BLK_STS_OK: break; @@ -1748,15 +1742,12 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; - const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); struct scatterlist *sg; int ret = 0; - if (unchecked_isa_dma) - cmd->flags |= SCMD_UNCHECKED_ISA_DMA; - cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, - GFP_KERNEL, numa_node); + cmd->sense_buffer = + kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; cmd->req.sense = cmd->sense_buffer; @@ -1770,8 +1761,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, if (shost->hostt->init_cmd_priv) { ret = shost->hostt->init_cmd_priv(shost, cmd); if (ret < 0) - scsi_free_sense_buffer(unchecked_isa_dma, - cmd->sense_buffer); + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); } return ret; @@ -1785,8 +1775,27 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, if (shost->hostt->exit_cmd_priv) shost->hostt->exit_cmd_priv(shost, cmd); - scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, - cmd->sense_buffer); + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); +} + + +static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx) +{ + struct Scsi_Host *shost = hctx->driver_data; + + if (shost->hostt->mq_poll) + return shost->hostt->mq_poll(shost, hctx->queue_num); + + return 0; +} + +static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct Scsi_Host *shost = data; + + hctx->driver_data = shost; + return 0; } static int scsi_map_queues(struct blk_mq_tag_set *set) @@ -1821,8 +1830,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) dma_max_mapping_size(dev) >> SECTOR_SHIFT); } blk_queue_max_hw_sectors(q, shost->max_sectors); - if (shost->unchecked_isa_dma) - blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); blk_queue_segment_boundary(q, shost->dma_boundary); dma_set_seg_boundary(dev, shost->dma_boundary); @@ -1845,7 +1852,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, - .complete = scsi_softirq_done, + .complete = scsi_complete, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, @@ -1856,14 +1863,16 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = { .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, }; static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) { - struct request_queue *q = hctx->queue; - struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; + struct Scsi_Host *shost = hctx->driver_data; shost->hostt->commit_rqs(shost, hctx->queue_num); } @@ -1873,7 +1882,7 @@ static const struct blk_mq_ops scsi_mq_ops = { .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, .commit_rqs = scsi_commit_rqs, - .complete = scsi_softirq_done, + .complete = scsi_complete, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, @@ -1884,6 +1893,10 @@ static const struct blk_mq_ops scsi_mq_ops = { .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, }; struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) @@ -1916,6 +1929,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) else tag_set->ops = &scsi_mq_ops_no_commit; tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; + tag_set->nr_maps = shost->nr_maps ? : 1; tag_set->queue_depth = shost->can_queue; tag_set->cmd_size = cmd_size; tag_set->numa_node = NUMA_NO_NODE; @@ -1988,7 +2002,6 @@ EXPORT_SYMBOL(scsi_unblock_requests); void scsi_exit_queue(void) { kmem_cache_destroy(scsi_sense_cache); - kmem_cache_destroy(scsi_sense_isadma_cache); } /** |