diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-cgroup.c | 4 | ||||
-rw-r--r-- | block/bfq-iosched.h | 4 | ||||
-rw-r--r-- | block/bio.c | 2 | ||||
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 19 | ||||
-rw-r--r-- | block/blk-settings.c | 9 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/genhd.c | 13 | ||||
-rw-r--r-- | block/sed-opal.c | 32 |
10 files changed, 62 insertions, 25 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 144bca006463..7d624a3a3f0f 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -610,6 +610,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) struct bfq_group *bfqg; while (blkg) { + if (!blkg->online) { + blkg = blkg->parent; + continue; + } bfqg = blkg_to_bfqg(blkg); if (bfqg->online) { bio_associate_blkg_from_css(bio, &blkg->blkcg->css); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 64ee618064ba..71f721670ab6 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -369,12 +369,8 @@ struct bfq_queue { unsigned long split_time; /* time of last split */ unsigned long first_IO_time; /* time of first I/O for this queue */ - unsigned long creation_time; /* when this queue is created */ - /* max service rate measured so far */ - u32 max_service_rate; - /* * Pointer to the waker queue for this queue, i.e., to the * queue Q such that this queue happens to get new I/O right diff --git a/block/bio.c b/block/bio.c index 633a902468ec..57c2f327225b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -741,7 +741,7 @@ void bio_put(struct bio *bio) return; } - if (bio->bi_opf & REQ_ALLOC_CACHE) { + if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) { struct bio_alloc_cache *cache; bio_uninit(bio); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 6a5c849ee061..ed761c62ad0a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1213,7 +1213,7 @@ static int blkcg_css_online(struct cgroup_subsys_state *css) * parent so that offline always happens towards the root. */ if (parent) - blkcg_pin_online(css); + blkcg_pin_online(&parent->css); return 0; } diff --git a/block/blk-core.c b/block/blk-core.c index 17667159482e..5487912befe8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) goto fail_stats; - blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); q->nr_requests = BLKDEV_DEFAULT_RQ; diff --git a/block/blk-mq.c b/block/blk-mq.c index 8070b6c10e8d..6a789cda68a5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -611,6 +611,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, .nr_tags = 1, }; u64 alloc_time_ns = 0; + struct request *rq; unsigned int cpu; unsigned int tag; int ret; @@ -660,8 +661,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, tag = blk_mq_get_tag(&data); if (tag == BLK_MQ_NO_TAG) goto out_queue_exit; - return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, + rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, alloc_time_ns); + rq->__data_len = 0; + rq->__sector = (sector_t) -1; + rq->bio = rq->biotail = NULL; + return rq; out_queue_exit: blk_queue_exit(q); @@ -1257,6 +1262,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) (!blk_queue_nomerges(rq->q) && blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_mq_flush_plug_list(plug, false); + last = NULL; trace_block_plug(rq->q); } @@ -3112,8 +3118,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct page *page; unsigned long flags; - /* There is no need to clear a driver tags own mapping */ - if (drv_tags == tags) + /* + * There is no need to clear mapping if driver tags is not initialized + * or the mapping belongs to the driver tags. + */ + if (!drv_tags || drv_tags == tags) return; list_for_each_entry(page, &tags->page_list, lru) { @@ -4185,9 +4194,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, return 0; err_hctxs: - xa_destroy(&q->hctx_table); - q->nr_hw_queues = 0; - blk_mq_sysfs_deinit(q); + blk_mq_release(q); err_poll: blk_stat_free_callback(q->poll_cb); q->poll_cb = NULL; diff --git a/block/blk-settings.c b/block/blk-settings.c index 8bb9eef5310e..8ac1038d0c79 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -57,8 +57,8 @@ void blk_set_default_limits(struct queue_limits *lim) lim->misaligned = 0; lim->zoned = BLK_ZONED_NONE; lim->zone_write_granularity = 0; + lim->dma_alignment = 511; } -EXPORT_SYMBOL(blk_set_default_limits); /** * blk_set_stacking_limits - set default limits for stacking devices @@ -600,6 +600,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); + t->dma_alignment = max(t->dma_alignment, b->dma_alignment); /* Set non-power-of-2 compatible chunk_sectors boundary */ if (b->chunk_sectors) @@ -773,7 +774,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary); **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { - q->dma_alignment = mask; + q->limits.dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_dma_alignment); @@ -795,8 +796,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); - if (mask > q->dma_alignment) - q->dma_alignment = mask; + if (mask > q->limits.dma_alignment) + q->limits.dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_update_dma_alignment); diff --git a/block/blk.h b/block/blk.h index d6ea0d1a6db0..a186ea20f39d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -331,6 +331,7 @@ void blk_rq_set_mixed_merge(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); +void blk_set_default_limits(struct queue_limits *lim); int blk_dev_init(void); /* diff --git a/block/genhd.c b/block/genhd.c index 17b33c62423d..0f9769db2de8 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -410,9 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, * Otherwise just allocate the device numbers for both the whole device * and all partitions from the extended dev_t space. */ + ret = -EINVAL; if (disk->major) { if (WARN_ON(!disk->minors)) - return -EINVAL; + goto out_exit_elevator; if (disk->minors > DISK_MAX_PARTS) { pr_err("block: can't allocate more than %d partitions\n", @@ -420,14 +421,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, disk->minors = DISK_MAX_PARTS; } if (disk->first_minor + disk->minors > MINORMASK + 1) - return -EINVAL; + goto out_exit_elevator; } else { if (WARN_ON(disk->minors)) - return -EINVAL; + goto out_exit_elevator; ret = blk_alloc_ext_minor(); if (ret < 0) - return ret; + goto out_exit_elevator; disk->major = BLOCK_EXT_MAJOR; disk->first_minor = ret; } @@ -526,6 +527,7 @@ out_unregister_bdi: bdi_unregister(disk->bdi); out_unregister_queue: blk_unregister_queue(disk); + rq_qos_exit(disk->queue); out_put_slave_dir: kobject_put(disk->slave_dir); out_put_holder_dir: @@ -540,6 +542,9 @@ out_device_del: out_free_ext_minor: if (disk->major == BLOCK_EXT_MAJOR) blk_free_ext_minor(disk->first_minor); +out_exit_elevator: + if (disk->queue->elevator) + elevator_exit(disk->queue); return ret; } EXPORT_SYMBOL(device_add_disk); diff --git a/block/sed-opal.c b/block/sed-opal.c index 2c5327a0543a..9bdb833e5817 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -87,8 +87,8 @@ struct opal_dev { u64 lowest_lba; size_t pos; - u8 cmd[IO_BUFFER_LENGTH]; - u8 resp[IO_BUFFER_LENGTH]; + u8 *cmd; + u8 *resp; struct parsed_resp parsed; size_t prev_d_len; @@ -2175,6 +2175,8 @@ void free_opal_dev(struct opal_dev *dev) return; clean_opal_dev(dev); + kfree(dev->resp); + kfree(dev->cmd); kfree(dev); } EXPORT_SYMBOL(free_opal_dev); @@ -2187,6 +2189,18 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) if (!dev) return NULL; + /* + * Presumably DMA-able buffers must be cache-aligned. Kmalloc makes + * sure the allocated buffer is DMA-safe in that regard. + */ + dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->cmd) + goto err_free_dev; + + dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->resp) + goto err_free_cmd; + INIT_LIST_HEAD(&dev->unlk_lst); mutex_init(&dev->dev_lock); dev->flags = 0; @@ -2194,11 +2208,21 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) dev->send_recv = send_recv; if (check_opal_support(dev) != 0) { pr_debug("Opal is not supported on this device\n"); - kfree(dev); - return NULL; + goto err_free_resp; } return dev; + +err_free_resp: + kfree(dev->resp); + +err_free_cmd: + kfree(dev->cmd); + +err_free_dev: + kfree(dev); + + return NULL; } EXPORT_SYMBOL(init_opal_dev); |