diff options
-rw-r--r-- | block/bfq-cgroup.c | 10 | ||||
-rw-r--r-- | block/blk-cgroup-rwstat.c | 2 | ||||
-rw-r--r-- | block/blk-cgroup.c | 111 | ||||
-rw-r--r-- | block/blk-cgroup.h | 25 | ||||
-rw-r--r-- | block/blk-iocost.c | 2 | ||||
-rw-r--r-- | block/blk-iolatency.c | 6 | ||||
-rw-r--r-- | block/blk-throttle.c | 23 | ||||
-rw-r--r-- | block/genhd.c | 17 | ||||
-rw-r--r-- | include/linux/blkdev.h | 12 |
9 files changed, 102 insertions, 106 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 624530643a05..ea3638e06e04 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -405,7 +405,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) parent = bfqg_parent(bfqg); - lockdep_assert_held(&bfqg_to_blkg(bfqg)->disk->queue->queue_lock); + lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); if (unlikely(!parent)) return; @@ -536,7 +536,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) { struct blkcg_gq *blkg = pd_to_blkg(pd); struct bfq_group *bfqg = blkg_to_bfqg(blkg); - struct bfq_data *bfqd = blkg->disk->queue->elevator->elevator_data; + struct bfq_data *bfqd = blkg->q->elevator->elevator_data; struct bfq_entity *entity = &bfqg->entity; struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); @@ -999,7 +999,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd) { struct blkcg_gq *blkg; - list_for_each_entry(blkg, &bfqd->queue->disk->blkg_list, entry) { + list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); bfq_end_wr_async_queues(bfqd, bfqg); @@ -1199,7 +1199,7 @@ static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, struct cgroup_subsys_state *pos_css; u64 sum = 0; - lockdep_assert_held(&blkg->disk->queue->queue_lock); + lockdep_assert_held(&blkg->q->queue_lock); rcu_read_lock(); blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { @@ -1293,7 +1293,7 @@ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) if (ret) return NULL; - return blkg_to_bfqg(bfqd->queue->disk->root_blkg); + return blkg_to_bfqg(bfqd->queue->root_blkg); } struct blkcg_policy blkcg_policy_bfq = { diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c index b8b8c82e667a..3304e841df7c 100644 --- a/block/blk-cgroup-rwstat.c +++ b/block/blk-cgroup-rwstat.c @@ -107,7 +107,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, struct cgroup_subsys_state *pos_css; unsigned int i; - lockdep_assert_held(&blkg->disk->queue->queue_lock); + lockdep_assert_held(&blkg->q->queue_lock); memset(sum, 0, sizeof(*sum)); rcu_read_lock(); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 935028912e7a..981ebe003b1c 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -108,35 +108,36 @@ static struct cgroup_subsys_state *blkcg_css(void) return task_css(current, io_cgrp_id); } -static bool blkcg_policy_enabled(struct gendisk *disk, +static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { - return pol && test_bit(pol->plid, disk->blkcg_pols); + return pol && test_bit(pol->plid, q->blkcg_pols); } static void blkg_free_workfn(struct work_struct *work) { struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, free_work); + struct request_queue *q = blkg->q; int i; /* * pd_free_fn() can also be called from blkcg_deactivate_policy(), * in order to make sure pd_free_fn() is called in order, the deletion - * of the list blkg->entry is delayed to here from blkg_destroy(), and + * of the list blkg->q_node is delayed to here from blkg_destroy(), and * blkcg_mutex is used to synchronize blkg_free_workfn() and * blkcg_deactivate_policy(). */ - mutex_lock(&blkg->disk->blkcg_mutex); + mutex_lock(&q->blkcg_mutex); for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->parent) blkg_put(blkg->parent); - list_del_init(&blkg->entry); - mutex_unlock(&blkg->disk->blkcg_mutex); + list_del_init(&blkg->q_node); + mutex_unlock(&q->blkcg_mutex); - put_disk(blkg->disk); + blk_put_queue(q); free_percpu(blkg->iostat_cpu); percpu_ref_exit(&blkg->refcnt); kfree(blkg); @@ -263,13 +264,11 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); if (!blkg->iostat_cpu) goto out_exit_refcnt; - - if (test_bit(GD_DEAD, &disk->state)) + if (!blk_get_queue(disk->queue)) goto out_free_iostat; - get_device(disk_to_dev(disk)); - blkg->disk = disk; - INIT_LIST_HEAD(&blkg->entry); + blkg->q = disk->queue; + INIT_LIST_HEAD(&blkg->q_node); spin_lock_init(&blkg->async_bio_lock); bio_list_init(&blkg->async_bios); INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); @@ -285,7 +284,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; - if (!blkcg_policy_enabled(disk, pol)) + if (!blkcg_policy_enabled(disk->queue, pol)) continue; /* alloc per-policy data and attach it to blkg */ @@ -304,7 +303,7 @@ out_free_pds: while (--i >= 0) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); - put_disk(blkg->disk); + blk_put_queue(disk->queue); out_free_iostat: free_percpu(blkg->iostat_cpu); out_exit_refcnt: @@ -350,7 +349,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, /* link parent */ if (blkcg_parent(blkcg)) { - blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk); + blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); if (WARN_ON_ONCE(!blkg->parent)) { ret = -ENODEV; goto err_put_css; @@ -371,7 +370,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); if (likely(!ret)) { hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); - list_add(&blkg->entry, &disk->blkg_list); + list_add(&blkg->q_node, &disk->queue->blkg_list); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; @@ -423,12 +422,12 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, WARN_ON_ONCE(!rcu_read_lock_held()); - blkg = blkg_lookup(blkcg, disk); + blkg = blkg_lookup(blkcg, q); if (blkg) return blkg; spin_lock_irqsave(&q->queue_lock, flags); - blkg = blkg_lookup(blkcg, disk); + blkg = blkg_lookup(blkcg, q); if (blkg) { if (blkcg != &blkcg_root && blkg != rcu_dereference(blkcg->blkg_hint)) @@ -444,10 +443,10 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, while (true) { struct blkcg *pos = blkcg; struct blkcg *parent = blkcg_parent(blkcg); - struct blkcg_gq *ret_blkg = disk->root_blkg; + struct blkcg_gq *ret_blkg = q->root_blkg; while (parent) { - blkg = blkg_lookup(parent, disk); + blkg = blkg_lookup(parent, q); if (blkg) { /* remember closest blkg */ ret_blkg = blkg; @@ -476,7 +475,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) struct blkcg *blkcg = blkg->blkcg; int i; - lockdep_assert_held(&blkg->disk->queue->queue_lock); + lockdep_assert_held(&blkg->q->queue_lock); lockdep_assert_held(&blkcg->lock); /* @@ -500,7 +499,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) blkg->online = false; - radix_tree_delete(&blkcg->blkg_tree, blkg->disk->queue->id); + radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); hlist_del_init_rcu(&blkg->blkcg_node); /* @@ -526,7 +525,7 @@ static void blkg_destroy_all(struct gendisk *disk) restart: spin_lock_irq(&q->queue_lock); - list_for_each_entry_safe(blkg, n, &disk->blkg_list, entry) { + list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); @@ -545,7 +544,7 @@ restart: } } - disk->root_blkg = NULL; + q->root_blkg = NULL; spin_unlock_irq(&q->queue_lock); } @@ -587,7 +586,9 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, const char *blkg_dev_name(struct blkcg_gq *blkg) { - return bdi_dev_name(blkg->disk->bdi); + if (!blkg->q->disk) + return NULL; + return bdi_dev_name(blkg->q->disk->bdi); } /** @@ -619,10 +620,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->disk->queue->queue_lock); - if (blkcg_policy_enabled(blkg->disk, pol)) + spin_lock_irq(&blkg->q->queue_lock); + if (blkcg_policy_enabled(blkg->q, pol)) total += prfill(sf, blkg->pd[pol->plid], data); - spin_unlock_irq(&blkg->disk->queue->queue_lock); + spin_unlock_irq(&blkg->q->queue_lock); } rcu_read_unlock(); @@ -728,12 +729,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, rcu_read_lock(); spin_lock_irq(&q->queue_lock); - if (!blkcg_policy_enabled(disk, pol)) { + if (!blkcg_policy_enabled(q, pol)) { ret = -EOPNOTSUPP; goto fail_unlock; } - blkg = blkg_lookup(blkcg, disk); + blkg = blkg_lookup(blkcg, q); if (blkg) goto success; @@ -747,7 +748,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, struct blkcg_gq *new_blkg; parent = blkcg_parent(blkcg); - while (parent && !blkg_lookup(parent, disk)) { + while (parent && !blkg_lookup(parent, q)) { pos = parent; parent = blkcg_parent(parent); } @@ -771,13 +772,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, rcu_read_lock(); spin_lock_irq(&q->queue_lock); - if (!blkcg_policy_enabled(disk, pol)) { + if (!blkcg_policy_enabled(q, pol)) { blkg_free(new_blkg); ret = -EOPNOTSUPP; goto fail_preloaded; } - blkg = blkg_lookup(pos, disk); + blkg = blkg_lookup(pos, q); if (blkg) { blkg_free(new_blkg); } else { @@ -951,7 +952,7 @@ static void blkcg_fill_root_iostats(void) class_dev_iter_init(&iter, &block_class, NULL, &disk_type); while ((dev = class_dev_iter_next(&iter))) { struct block_device *bdev = dev_to_bdev(dev); - struct blkcg_gq *blkg = bdev->bd_disk->root_blkg; + struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; struct blkg_iostat tmp; int cpu; unsigned long flags; @@ -1046,9 +1047,9 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->disk->queue->queue_lock); + spin_lock_irq(&blkg->q->queue_lock); blkcg_print_one_stat(blkg, sf); - spin_unlock_irq(&blkg->disk->queue->queue_lock); + spin_unlock_irq(&blkg->q->queue_lock); } rcu_read_unlock(); return 0; @@ -1118,7 +1119,7 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg) while (!hlist_empty(&blkcg->blkg_list)) { struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, struct blkcg_gq, blkcg_node); - struct request_queue *q = blkg->disk->queue; + struct request_queue *q = blkg->q; if (need_resched() || !spin_trylock(&q->queue_lock)) { /* @@ -1298,8 +1299,8 @@ int blkcg_init_disk(struct gendisk *disk) bool preloaded; int ret; - INIT_LIST_HEAD(&disk->blkg_list); - mutex_init(&disk->blkcg_mutex); + INIT_LIST_HEAD(&q->blkg_list); + mutex_init(&q->blkcg_mutex); new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); if (!new_blkg) @@ -1313,7 +1314,7 @@ int blkcg_init_disk(struct gendisk *disk) blkg = blkg_create(&blkcg_root, disk, new_blkg); if (IS_ERR(blkg)) goto err_unlock; - disk->root_blkg = blkg; + q->root_blkg = blkg; spin_unlock_irq(&q->queue_lock); if (preloaded) @@ -1426,7 +1427,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) struct blkcg_gq *blkg, *pinned_blkg = NULL; int ret; - if (blkcg_policy_enabled(disk, pol)) + if (blkcg_policy_enabled(q, pol)) return 0; if (queue_is_mq(q)) @@ -1435,7 +1436,7 @@ retry: spin_lock_irq(&q->queue_lock); /* blkg_list is pushed at the head, reverse walk to allocate parents first */ - list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) { + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { struct blkg_policy_data *pd; if (blkg->pd[pol->plid]) @@ -1480,16 +1481,16 @@ retry: /* all allocated, init in the same order */ if (pol->pd_init_fn) - list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); - list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) { + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { if (pol->pd_online_fn) pol->pd_online_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid]->online = true; } - __set_bit(pol->plid, disk->blkcg_pols); + __set_bit(pol->plid, q->blkcg_pols); ret = 0; spin_unlock_irq(&q->queue_lock); @@ -1505,7 +1506,7 @@ out: enomem: /* alloc failed, nothing's initialized yet, free everything */ spin_lock_irq(&q->queue_lock); - list_for_each_entry(blkg, &disk->blkg_list, entry) { + list_for_each_entry(blkg, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); @@ -1535,18 +1536,18 @@ void blkcg_deactivate_policy(struct gendisk *disk, struct request_queue *q = disk->queue; struct blkcg_gq *blkg; - if (!blkcg_policy_enabled(disk, pol)) + if (!blkcg_policy_enabled(q, pol)) return; if (queue_is_mq(q)) blk_mq_freeze_queue(q); - mutex_lock(&disk->blkcg_mutex); + mutex_lock(&q->blkcg_mutex); spin_lock_irq(&q->queue_lock); - __clear_bit(pol->plid, disk->blkcg_pols); + __clear_bit(pol->plid, q->blkcg_pols); - list_for_each_entry(blkg, &disk->blkg_list, entry) { + list_for_each_entry(blkg, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); @@ -1560,7 +1561,7 @@ void blkcg_deactivate_policy(struct gendisk *disk, } spin_unlock_irq(&q->queue_lock); - mutex_unlock(&disk->blkcg_mutex); + mutex_unlock(&q->blkcg_mutex); if (queue_is_mq(q)) blk_mq_unfreeze_queue(q); @@ -1852,7 +1853,7 @@ void blkcg_maybe_throttle_current(void) blkcg = css_to_blkcg(blkcg_css()); if (!blkcg) goto out; - blkg = blkg_lookup(blkcg, disk); + blkg = blkg_lookup(blkcg, disk->queue); if (!blkg) goto out; if (!blkg_tryget(blkg)) @@ -1957,7 +1958,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, * Associate @bio with the blkg found by combining the css's blkg and the * request_queue of the @bio. An association failure is handled by walking up * the blkg tree. Therefore, the blkg associated can be anything between @blkg - * and disk->root_blkg. This situation only happens when a cgroup is dying and + * and q->root_blkg. This situation only happens when a cgroup is dying and * then the remaining bios will spill to the closest alive blkg. * * A reference will be taken on the blkg and will be released when @bio is @@ -1972,8 +1973,8 @@ void bio_associate_blkg_from_css(struct bio *bio, if (css && css->parent) { bio->bi_blkg = blkg_tryget_closest(bio, css); } else { - blkg_get(bio->bi_bdev->bd_disk->root_blkg); - bio->bi_blkg = bio->bi_bdev->bd_disk->root_blkg; + blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); + bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; } } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index e442b406ca0d..9c5078755e5e 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -53,8 +53,9 @@ struct blkg_iostat_set { /* association between a blk cgroup and a request queue */ struct blkcg_gq { - struct gendisk *disk; - struct list_head entry; + /* Pointer to the associated request_queue */ + struct request_queue *q; + struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; @@ -234,30 +235,30 @@ static inline bool bio_issue_as_root_blkg(struct bio *bio) } /** - * blkg_lookup - lookup blkg for the specified blkcg - disk pair + * blkg_lookup - lookup blkg for the specified blkcg - q pair * @blkcg: blkcg of interest - * @disk: gendisk of interest + * @q: request_queue of interest * - * Lookup blkg for the @blkcg - @disk pair. + * Lookup blkg for the @blkcg - @q pair. * Must be called in a RCU critical section. */ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, - struct gendisk *disk) + struct request_queue *q) { struct blkcg_gq *blkg; WARN_ON_ONCE(!rcu_read_lock_held()); if (blkcg == &blkcg_root) - return disk->root_blkg; + return q->root_blkg; blkg = rcu_dereference(blkcg->blkg_hint); - if (blkg && blkg->disk == disk) + if (blkg && blkg->q == q) return blkg; - blkg = radix_tree_lookup(&blkcg->blkg_tree, disk->queue->id); - if (blkg && blkg->disk != disk) + blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); + if (blkg && blkg->q != q) blkg = NULL; return blkg; } @@ -357,7 +358,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ - (p_blkg)->disk))) + (p_blkg)->q))) /** * blkg_for_each_descendant_post - post-order walk of a blkg's descendants @@ -372,7 +373,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ - (p_blkg)->disk))) + (p_blkg)->q))) bool __blkcg_punt_bio_submit(struct bio *bio); diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 7a2dc9dc8e3b..ff534e9d92dc 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2947,7 +2947,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd); - struct ioc *ioc = q_to_ioc(blkg->disk->queue); + struct ioc *ioc = q_to_ioc(blkg->q); struct ioc_now now; struct blkcg_gq *tblkg; unsigned long flags; diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 5d5aa1e526b7..0dc910568b31 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) rcu_read_lock(); blkg_for_each_descendant_pre(blkg, pos_css, - blkiolat->rqos.disk->root_blkg) { + blkiolat->rqos.disk->queue->root_blkg) { struct iolatency_grp *iolat; struct child_latency_info *lat_info; unsigned long flags; @@ -967,12 +967,12 @@ static void iolatency_pd_init(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); - struct rq_qos *rqos = blkcg_rq_qos(blkg->disk->queue); + struct rq_qos *rqos = blkcg_rq_qos(blkg->q); struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); u64 now = ktime_to_ns(ktime_get()); int cpu; - if (blk_queue_nonrot(blkg->disk->queue)) + if (blk_queue_nonrot(blkg->q)) iolat->ssd = true; else iolat->ssd = false; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e7bd7050d684..47e9d8be68f3 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -387,7 +387,7 @@ static void throtl_pd_init(struct blkg_policy_data *pd) { struct throtl_grp *tg = pd_to_tg(pd); struct blkcg_gq *blkg = tg_to_blkg(tg); - struct throtl_data *td = blkg->disk->queue->td; + struct throtl_data *td = blkg->q->td; struct throtl_service_queue *sq = &tg->service_queue; /* @@ -451,8 +451,7 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td) bool low_valid = false; rcu_read_lock(); - blkg_for_each_descendant_post(blkg, pos_css, - td->queue->disk->root_blkg) { + blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || @@ -1175,13 +1174,13 @@ static void throtl_pending_timer_fn(struct timer_list *t) /* throtl_data may be gone, so figure out request queue by blkg */ if (tg) - q = tg->pd.blkg->disk->queue; + q = tg->pd.blkg->q; else q = td->queue; spin_lock_irq(&q->queue_lock); - if (!q->disk->root_blkg) + if (!q->root_blkg) goto out_unlock; if (throtl_can_upgrade(td, NULL)) @@ -1323,8 +1322,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) * blk-throttle. */ blkg_for_each_descendant_pre(blkg, pos_css, - global ? tg->td->queue->disk->root_blkg : - tg_to_blkg(tg)) { + global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { struct throtl_grp *this_tg = blkg_to_tg(blkg); struct throtl_grp *parent_tg; @@ -1719,7 +1717,7 @@ void blk_throtl_cancel_bios(struct gendisk *disk) * path need RCU protection and to prevent warning from lockdep. */ rcu_read_lock(); - blkg_for_each_descendant_post(blkg, pos_css, disk->root_blkg) { + blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); struct throtl_service_queue *sq = &tg->service_queue; @@ -1873,8 +1871,7 @@ static bool throtl_can_upgrade(struct throtl_data *td, return false; rcu_read_lock(); - blkg_for_each_descendant_post(blkg, pos_css, - td->queue->disk->root_blkg) { + blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); if (tg == this_tg) @@ -1920,8 +1917,7 @@ static void throtl_upgrade_state(struct throtl_data *td) td->low_upgrade_time = jiffies; td->scale = 0; rcu_read_lock(); - blkg_for_each_descendant_post(blkg, pos_css, - td->queue->disk->root_blkg) { + blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); struct throtl_service_queue *sq = &tg->service_queue; @@ -2411,8 +2407,7 @@ void blk_throtl_exit(struct gendisk *disk) { struct request_queue *q = disk->queue; - if (!q->td) - return; + BUG_ON(!q->td); del_timer_sync(&q->td->service_queue.pending_timer); throtl_shutdown_wq(q); blkcg_deactivate_policy(disk, &blkcg_policy_throtl); diff --git a/block/genhd.c b/block/genhd.c index 65373738c70b..093ef292e98f 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -466,13 +466,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, */ pm_runtime_set_memalloc_noio(ddev, true); - ret = blkcg_init_disk(disk); - if (ret) - goto out_del_block_link; - ret = blk_integrity_add(disk); if (ret) - goto out_blkcg_exit; + goto out_del_block_link; disk->part0->bd_holder_dir = kobject_create_and_add("holders", &ddev->kobj); @@ -538,8 +534,6 @@ out_put_holder_dir: kobject_put(disk->part0->bd_holder_dir); out_del_integrity: blk_integrity_del(disk); -out_blkcg_exit: - blkcg_exit_disk(disk); out_del_block_link: if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(ddev)); @@ -1164,8 +1158,6 @@ static void disk_release(struct device *dev) might_sleep(); WARN_ON_ONCE(disk_live(disk)); - blkcg_exit_disk(disk); - /* * To undo the all initialization from blk_mq_init_allocated_queue in * case of a probe failure where add_disk is never called we have to @@ -1178,6 +1170,8 @@ static void disk_release(struct device *dev) !test_bit(GD_ADDED, &disk->state)) blk_mq_exit_queue(disk->queue); + blkcg_exit_disk(disk); + bioset_exit(&disk->bio_split); disk_release_events(disk); @@ -1390,6 +1384,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL)) goto out_destroy_part_tbl; + if (blkcg_init_disk(disk)) + goto out_erase_part0; + rand_initialize_disk(disk); disk_to_dev(disk)->class = &block_class; disk_to_dev(disk)->type = &disk_type; @@ -1402,6 +1399,8 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, #endif return disk; +out_erase_part0: + xa_erase(&disk->part_tbl, 0); out_destroy_part_tbl: xa_destroy(&disk->part_tbl); disk->part0->bd_disk = NULL; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 79aec4ebadb9..b9637d63e6f0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -163,12 +163,6 @@ struct gendisk { struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; -#ifdef CONFIG_BLK_CGROUP - DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); - struct blkcg_gq *root_blkg; - struct list_head blkg_list; - struct mutex blkcg_mutex; -#endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -487,6 +481,12 @@ struct request_queue { struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; +#ifdef CONFIG_BLK_CGROUP + DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct mutex blkcg_mutex; +#endif struct queue_limits limits; |