diff options
-rw-r--r-- | block/blk-cgroup.c | 4 | ||||
-rw-r--r-- | block/blk-cgroup.h | 3 | ||||
-rw-r--r-- | block/blk-iocost.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 9 | ||||
-rw-r--r-- | block/blk-settings.c | 3 | ||||
-rw-r--r-- | block/blk-zoned.c | 23 | ||||
-rw-r--r-- | block/ioprio.c | 26 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 40 | ||||
-rw-r--r-- | drivers/md/raid1.c | 12 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 7 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 | ||||
-rw-r--r-- | include/linux/ioprio.h | 25 |
13 files changed, 77 insertions, 80 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 58b13ef23821..e303fd317313 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -575,13 +575,13 @@ static void blkg_destroy(struct blkcg_gq *blkg) static void blkg_destroy_all(struct gendisk *disk) { struct request_queue *q = disk->queue; - struct blkcg_gq *blkg, *n; + struct blkcg_gq *blkg; int count = BLKG_DESTROY_BATCH_SIZE; int i; restart: spin_lock_irq(&q->queue_lock); - list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { + list_for_each_entry(blkg, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; if (hlist_unhashed(&blkg->blkcg_node)) diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index fd482439afbc..b927a4a0ad03 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -252,7 +252,8 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, if (blkcg == &blkcg_root) return q->root_blkg; - blkg = rcu_dereference(blkcg->blkg_hint); + blkg = rcu_dereference_check(blkcg->blkg_hint, + lockdep_is_held(&q->queue_lock)); if (blkg && blkg->q == q) return blkg; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 089fcb9cfce3..c8beec6d7df0 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1261,7 +1261,7 @@ static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; - u64 last_period, cur_period; + u64 __maybe_unused last_period, cur_period; u64 vtime, vtarget; int i; diff --git a/block/blk-mq.c b/block/blk-mq.c index fb29ff5cc281..aa9a05fdd023 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -772,11 +772,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio, /* * Partial zone append completions cannot be supported as the * BIO fragments may end up not being written sequentially. + * For such case, force the completed nbytes to be equal to + * the BIO size so that bio_advance() sets the BIO remaining + * size to 0 and we end up calling bio_endio() before returning. */ - if (bio->bi_iter.bi_size != nbytes) + if (bio->bi_iter.bi_size != nbytes) { bio->bi_status = BLK_STS_IOERR; - else + nbytes = bio->bi_iter.bi_size; + } else { bio->bi_iter.bi_sector = rq->__sector; + } } bio_advance(bio, nbytes); diff --git a/block/blk-settings.c b/block/blk-settings.c index d993d20dab3c..06ea91e51b8b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -342,6 +342,9 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) if (q->limits.physical_block_size < q->limits.logical_block_size) q->limits.physical_block_size = q->limits.logical_block_size; + if (q->limits.discard_granularity < q->limits.physical_block_size) + q->limits.discard_granularity = q->limits.physical_block_size; + if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index c59d44ee6b23..d343e5756a9c 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -498,7 +498,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, set_bit(idx, args->conv_zones_bitmap); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: - case BLK_ZONE_TYPE_SEQWRITE_PREF: if (!args->seq_zones_wlock) { args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, args->nr_zones); @@ -506,6 +505,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, return -ENOMEM; } break; + case BLK_ZONE_TYPE_SEQWRITE_PREF: default: pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", disk->disk_name, (int)zone->type, zone->start); @@ -615,24 +615,3 @@ int blk_revalidate_disk_zones(struct gendisk *disk, return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); - -void disk_clear_zoned(struct gendisk *disk) -{ - struct request_queue *q = disk->queue; - - blk_mq_freeze_queue(q); - - q->limits.zoned = false; - disk_free_zone_bitmaps(disk); - blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q); - q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE; - disk->nr_zones = 0; - disk->max_open_zones = 0; - disk->max_active_zones = 0; - q->limits.chunk_sectors = 0; - q->limits.zone_write_granularity = 0; - q->limits.max_zone_append_sectors = 0; - - blk_mq_unfreeze_queue(q); -} -EXPORT_SYMBOL_GPL(disk_clear_zoned); diff --git a/block/ioprio.c b/block/ioprio.c index b5a942519a79..73301a261429 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -139,32 +139,6 @@ out: return ret; } -/* - * If the task has set an I/O priority, use that. Otherwise, return - * the default I/O priority. - * - * Expected to be called for current task or with task_lock() held to keep - * io_context stable. - */ -int __get_task_ioprio(struct task_struct *p) -{ - struct io_context *ioc = p->io_context; - int prio; - - if (p != current) - lockdep_assert_held(&p->alloc_lock); - if (ioc) - prio = ioc->ioprio; - else - prio = IOPRIO_DEFAULT; - - if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE) - prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p), - task_nice_ioprio(p)); - return prio; -} -EXPORT_SYMBOL_GPL(__get_task_ioprio); - static int get_task_ioprio(struct task_struct *p) { int ret; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 5ff50e76cee5..1432c83183d0 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -132,7 +132,7 @@ struct blkif_x86_32_request { struct blkif_x86_64_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ - uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */ + uint32_t _pad1; /* offsetof(blkif_request..,u.rw.id)==8 */ uint64_t id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; diff --git a/drivers/md/md.c b/drivers/md/md.c index e351e6c51cc7..ff3057c787c1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8135,6 +8135,19 @@ static void status_unused(struct seq_file *seq) seq_printf(seq, "\n"); } +static void status_personalities(struct seq_file *seq) +{ + struct md_personality *pers; + + seq_puts(seq, "Personalities : "); + spin_lock(&pers_lock); + list_for_each_entry(pers, &pers_list, list) + seq_printf(seq, "[%s] ", pers->name); + + spin_unlock(&pers_lock); + seq_puts(seq, "\n"); +} + static int status_resync(struct seq_file *seq, struct mddev *mddev) { sector_t max_sectors, resync, res; @@ -8276,20 +8289,10 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) static void *md_seq_start(struct seq_file *seq, loff_t *pos) __acquires(&all_mddevs_lock) { - struct md_personality *pers; - - seq_puts(seq, "Personalities : "); - spin_lock(&pers_lock); - list_for_each_entry(pers, &pers_list, list) - seq_printf(seq, "[%s] ", pers->name); - - spin_unlock(&pers_lock); - seq_puts(seq, "\n"); seq->poll_event = atomic_read(&md_event_count); - spin_lock(&all_mddevs_lock); - return seq_list_start(&all_mddevs, *pos); + return seq_list_start_head(&all_mddevs, *pos); } static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) @@ -8300,16 +8303,23 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void md_seq_stop(struct seq_file *seq, void *v) __releases(&all_mddevs_lock) { - status_unused(seq); spin_unlock(&all_mddevs_lock); } static int md_seq_show(struct seq_file *seq, void *v) { - struct mddev *mddev = list_entry(v, struct mddev, all_mddevs); + struct mddev *mddev; sector_t sectors; struct md_rdev *rdev; + if (v == &all_mddevs) { + status_personalities(seq); + if (list_empty(&all_mddevs)) + status_unused(seq); + return 0; + } + + mddev = list_entry(v, struct mddev, all_mddevs); if (!mddev_get(mddev)) return 0; @@ -8385,6 +8395,10 @@ static int md_seq_show(struct seq_file *seq, void *v) } spin_unlock(&mddev->lock); spin_lock(&all_mddevs_lock); + + if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs)) + status_unused(seq); + if (atomic_dec_and_test(&mddev->active)) __mddev_put(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index aaa434f0c175..24f0d799fd98 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1968,12 +1968,12 @@ static void end_sync_write(struct bio *bio) } static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, int rw) + int sectors, struct page *page, blk_opf_t rw) { if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; - if (rw == WRITE) { + if (rw == REQ_OP_WRITE) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) @@ -2090,7 +2090,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, pages[idx], - WRITE) == 0) { + REQ_OP_WRITE) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); } @@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, pages[idx], - READ) != 0) + REQ_OP_READ) != 0) atomic_add(s, &rdev->corrected_errors); } sectors -= s; @@ -2321,7 +2321,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r1_sync_page_io(rdev, sect, s, - conf->tmppage, WRITE); + conf->tmppage, REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } } @@ -2335,7 +2335,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); if (r1_sync_page_io(rdev, sect, s, - conf->tmppage, READ)) { + conf->tmppage, REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n", mdname(mddev), s, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6bedd2d5298f..dace4aa8e353 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3149,12 +3149,11 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) * the device physical block size. */ blk_queue_zone_write_granularity(q, sdkp->physical_block_size); - } else if (blk_queue_is_zoned(q)) { + } else { /* - * Anything else. This includes host-aware device that we treat - * as conventional. + * Host-aware devices are treated as conventional. */ - disk_clear_zoned(sdkp->disk); + WARN_ON_ONCE(blk_queue_is_zoned(q)); } #endif /* CONFIG_BLK_DEV_ZONED */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 94701a63ad8a..e1e705aef51e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -318,7 +318,6 @@ typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); void disk_set_zoned(struct gendisk *disk); -void disk_clear_zoned(struct gendisk *disk); #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 7578d4f6a969..db1249cd9692 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task) } #ifdef CONFIG_BLOCK -int __get_task_ioprio(struct task_struct *p); +/* + * If the task has set an I/O priority, use that. Otherwise, return + * the default I/O priority. + * + * Expected to be called for current task or with task_lock() held to keep + * io_context stable. + */ +static inline int __get_task_ioprio(struct task_struct *p) +{ + struct io_context *ioc = p->io_context; + int prio; + + if (!ioc) + return IOPRIO_DEFAULT; + + if (p != current) + lockdep_assert_held(&p->alloc_lock); + + prio = ioc->ioprio; + if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE) + prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p), + task_nice_ioprio(p)); + return prio; +} #else static inline int __get_task_ioprio(struct task_struct *p) { |