diff options
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 500 |
1 files changed, 299 insertions, 201 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 29e2df5cd77b..7b0f647bcccb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,10 +37,15 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <trace/events/block.h> #include "md.h" #include "raid1.h" #include "bitmap.h" +#define UNSUPPORTED_MDDEV_FLAGS \ + ((1L << MD_HAS_JOURNAL) | \ + (1L << MD_JOURNAL_CLEAN)) + /* * Number of guaranteed r1bios in case of extreme VM load: */ @@ -70,6 +75,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window, sector_t bi_sector); static void lower_barrier(struct r1conf *conf); +#define raid1_log(md, fmt, args...) \ + do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) + static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; @@ -325,6 +333,11 @@ static void raid1_end_read_request(struct bio *bio) if (uptodate) set_bit(R1BIO_Uptodate, &r1_bio->state); + else if (test_bit(FailFast, &rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + /* This was a fail-fast read so we definitely + * want to retry */ + ; else { /* If all other devices have failed, we want to return * the error upwards rather than fail the last device. @@ -347,13 +360,10 @@ static void raid1_end_read_request(struct bio *bio) * oops, read error: */ char b[BDEVNAME_SIZE]; - printk_ratelimited( - KERN_ERR "md/raid1:%s: %s: " - "rescheduling sector %llu\n", - mdname(conf->mddev), - bdevname(rdev->bdev, - b), - (unsigned long long)r1_bio->sector); + pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", + mdname(conf->mddev), + bdevname(rdev->bdev, b), + (unsigned long long)r1_bio->sector); set_bit(R1BIO_ReadError, &r1_bio->state); reschedule_retry(r1_bio); /* don't drop the reference on read_disk yet */ @@ -416,7 +426,24 @@ static void raid1_end_write_request(struct bio *bio) set_bit(MD_RECOVERY_NEEDED, & conf->mddev->recovery); - set_bit(R1BIO_WriteError, &r1_bio->state); + if (test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST) && + /* We never try FailFast to WriteMostly devices */ + !test_bit(WriteMostly, &rdev->flags)) { + md_error(r1_bio->mddev, rdev); + if (!test_bit(Faulty, &rdev->flags)) + /* This is the only remaining device, + * We need to retry the write without + * FailFast + */ + set_bit(R1BIO_WriteError, &r1_bio->state); + else { + /* Finished with this branch */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + } + } else + set_bit(R1BIO_WriteError, &r1_bio->state); } else { /* * Set R1BIO_Uptodate in our master bio, so that we @@ -534,6 +561,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect best_good_sectors = 0; has_nonrot_disk = 0; choose_next_idle = 0; + clear_bit(R1BIO_FailFast, &r1_bio->state); if ((conf->mddev->recovery_cp < this_sector + sectors) || (mddev_is_clustered(conf->mddev) && @@ -607,6 +635,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + if (best_disk >= 0) + /* At least two disks to choose from so failfast is OK */ + set_bit(R1BIO_FailFast, &r1_bio->state); + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); @@ -645,11 +677,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } break; } - /* If device is idle, use it */ - if (pending == 0) { - best_disk = disk; - break; - } if (choose_next_idle) continue; @@ -672,7 +699,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect * mixed ratation/non-rotational disks depending on workload. */ if (best_disk == -1) { - if (has_nonrot_disk) + if (has_nonrot_disk || min_pending == 0) best_disk = best_pending_disk; else best_disk = best_dist_disk; @@ -745,9 +772,14 @@ static void flush_pending_writes(struct r1conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -832,7 +864,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) else if (conf->barrier && bio_data_dir(bio) == WRITE) { if ((conf->mddev->curr_resync_completed >= bio_end_sector(bio)) || - (conf->next_resync + NEXT_NORMALIO_DISTANCE + (conf->start_next_window + NEXT_NORMALIO_DISTANCE <= bio->bi_iter.bi_sector)) wait = false; else @@ -858,6 +890,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) * that queue to allow conf->start_next_window * to increase. */ + raid1_log(conf->mddev, "wait barrier"); wait_event_lock_irq(conf->wait_barrier, !conf->array_frozen && (!conf->barrier || @@ -937,6 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra) */ spin_lock_irq(&conf->resync_lock); conf->array_frozen = 1; + raid1_log(conf->mddev, "wait freeze"); wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, @@ -1019,9 +1053,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -1031,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void raid1_make_request(struct mddev *mddev, struct bio * bio) +static void raid1_read_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; - struct r1bio *r1_bio; struct bio *read_bio; + struct bitmap *bitmap = mddev->bitmap; + const int op = bio_op(bio); + const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + int sectors_handled; + int max_sectors; + int rdisk; + + wait_barrier(conf, bio); + +read_again: + rdisk = read_balance(conf, r1_bio, &max_sectors); + + if (rdisk < 0) { + /* couldn't find anywhere to read from */ + raid_end_bio_io(r1_bio); + return; + } + mirror = conf->mirrors + rdisk; + + if (test_bit(WriteMostly, &mirror->rdev->flags) && + bitmap) { + /* + * Reading from a write-mostly device must take care not to + * over-take any writes that are 'behind' + */ + raid1_log(mddev, "wait behind writes"); + wait_event(bitmap->behind_wait, + atomic_read(&bitmap->behind_writes) == 0); + } + r1_bio->read_disk = rdisk; + r1_bio->start_next_window = 0; + + read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); + + r1_bio->bios[rdisk] = read_bio; + + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; + read_bio->bi_bdev = mirror->rdev->bdev; + read_bio->bi_end_io = raid1_end_read_request; + bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &mirror->rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; + read_bio->bi_private = r1_bio; + + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r1_bio->sector); + + if (max_sectors < r1_bio->sectors) { + /* + * could not read all from this device, so we will need another + * r1_bio. + */ + sectors_handled = (r1_bio->sector + max_sectors + - bio->bi_iter.bi_sector); + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + + /* + * Cannot call generic_make_request directly as that will be + * queued in __make_request and subsequent mempool_alloc might + * block waiting for it. So hand bio over to raid1d. + */ + reschedule_retry(r1_bio); + + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio) - sectors_handled; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); +} + +static void raid1_write_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) +{ + struct r1conf *conf = mddev->private; int i, disks; - struct bitmap *bitmap; + struct bitmap *bitmap = mddev->bitmap; unsigned long flags; const int op = bio_op(bio); - const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)); @@ -1061,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) md_write_start(mddev, bio); /* wait on superblock update early */ - if (bio_data_dir(bio) == WRITE && - ((bio_end_sector(bio) > mddev->suspend_lo && + if ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio))))) { - /* As the suspend_* range is controlled by - * userspace, we want an interruptible - * wait. + bio->bi_iter.bi_sector, bio_end_sector(bio)))) { + + /* + * As the suspend_* range is controlled by userspace, we want + * an interruptible wait. */ DEFINE_WAIT(w); for (;;) { @@ -1080,121 +1209,18 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio)))) + bio->bi_iter.bi_sector, + bio_end_sector(bio)))) break; schedule(); } finish_wait(&conf->wait_barrier, &w); } - start_next_window = wait_barrier(conf, bio); - bitmap = mddev->bitmap; - - /* - * make_request() can abort the operation when read-ahead is being - * used and no empty request is available. - * - */ - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio); - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector; - - /* We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r1_bio and no locking - * will be needed when requests complete. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - - if (rw == READ) { - /* - * read balancing logic: - */ - int rdisk; - -read_again: - rdisk = read_balance(conf, r1_bio, &max_sectors); - - if (rdisk < 0) { - /* couldn't find anywhere to read from */ - raid_end_bio_io(r1_bio); - return; - } - mirror = conf->mirrors + rdisk; - - if (test_bit(WriteMostly, &mirror->rdev->flags) && - bitmap) { - /* Reading from a write-mostly device must - * take care not to over-take any writes - * that are 'behind' - */ - wait_event(bitmap->behind_wait, - atomic_read(&bitmap->behind_writes) == 0); - } - r1_bio->read_disk = rdisk; - r1_bio->start_next_window = 0; - - read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - - r1_bio->bios[rdisk] = read_bio; - - read_bio->bi_iter.bi_sector = r1_bio->sector + - mirror->rdev->data_offset; - read_bio->bi_bdev = mirror->rdev->bdev; - read_bio->bi_end_io = raid1_end_read_request; - bio_set_op_attrs(read_bio, op, do_sync); - read_bio->bi_private = r1_bio; - - if (max_sectors < r1_bio->sectors) { - /* could not read all from this device, so we will - * need another r1_bio. - */ - - sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - /* Cannot call generic_make_request directly - * as that will be queued in __make_request - * and subsequent mempool_alloc might block waiting - * for it. So hand bio over to raid1d. - */ - reschedule_retry(r1_bio); - - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio) - sectors_handled; - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector + - sectors_handled; - goto read_again; - } else - generic_make_request(read_bio); - return; - } - - /* - * WRITE: - */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); + raid1_log(mddev, "wait queued"); wait_event(conf->wait_barrier, conf->pending_count < max_queued_requests); } @@ -1235,8 +1261,7 @@ read_again: int bad_sectors; int is_bad; - is_bad = is_badblock(rdev, r1_bio->sector, - max_sectors, + is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* mustn't write here until the bad block is @@ -1286,6 +1311,7 @@ read_again: rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); + raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); md_wait_for_blocked_rdev(blocked_rdev, mddev); start_next_window = wait_barrier(conf, bio); /* @@ -1324,7 +1350,8 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); if (first_clone) { /* do behind I/O ? @@ -1363,10 +1390,21 @@ read_again: mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && + !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && + conf->raid_disks - mddev->degraded > 1) + mbio->bi_opf |= MD_FAILFAST; mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + mbio, disk_devt(mddev->gendisk), + r1_bio->sector); + /* flush_pending_writes() needs access to the rdev so...*/ + mbio->bi_bdev = (void*)conf->mirrors[i].rdev; + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); if (cb) plug = container_of(cb, struct raid1_plug_cb, cb); @@ -1407,6 +1445,40 @@ read_again: wake_up(&conf->wait_barrier); } +static void raid1_make_request(struct mddev *mddev, struct bio *bio) +{ + struct r1conf *conf = mddev->private; + struct r1bio *r1_bio; + + /* + * make_request() can abort the operation when read-ahead is being + * used and no empty request is available. + * + */ + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio); + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector; + + /* + * We might need to issue multiple reads to different devices if there + * are bad blocks around, so we keep track of the number of reads in + * bio->bi_phys_segments. If this is 0, there is only one r1_bio and + * no locking will be needed when requests complete. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + bio_clear_flag(bio, BIO_SEG_VALID); + + if (bio_data_dir(bio) == READ) + raid1_read_request(mddev, bio, r1_bio); + else + raid1_write_request(mddev, bio, r1_bio); +} + static void raid1_status(struct seq_file *seq, struct mddev *mddev) { struct r1conf *conf = mddev->private; @@ -1436,6 +1508,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * next level up know. * else mark the drive as failed */ + spin_lock_irqsave(&conf->device_lock, flags); if (test_bit(In_sync, &rdev->flags) && (conf->raid_disks - mddev->degraded) == 1) { /* @@ -1445,10 +1518,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * it is very likely to fail. */ conf->recovery_disabled = mddev->recovery_disabled; + spin_unlock_irqrestore(&conf->device_lock, flags); return; } set_bit(Blocked, &rdev->flags); - spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; set_bit(Faulty, &rdev->flags); @@ -1459,36 +1532,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * if recovery is running, make sure it aborts. */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); - printk(KERN_ALERT - "md/raid1:%s: Disk failure on %s, disabling device.\n" - "md/raid1:%s: Operation continuing on %d devices.\n", - mdname(mddev), bdevname(rdev->bdev, b), - mdname(mddev), conf->raid_disks - mddev->degraded); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); + pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" + "md/raid1:%s: Operation continuing on %d devices.\n", + mdname(mddev), bdevname(rdev->bdev, b), + mdname(mddev), conf->raid_disks - mddev->degraded); } static void print_conf(struct r1conf *conf) { int i; - printk(KERN_DEBUG "RAID1 conf printout:\n"); + pr_debug("RAID1 conf printout:\n"); if (!conf) { - printk(KERN_DEBUG "(!conf)\n"); + pr_debug("(!conf)\n"); return; } - printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, - conf->raid_disks); + pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, + conf->raid_disks); rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev) - printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", - i, !test_bit(In_sync, &rdev->flags), - !test_bit(Faulty, &rdev->flags), - bdevname(rdev->bdev,b)); + pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", + i, !test_bit(In_sync, &rdev->flags), + !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev,b)); } rcu_read_unlock(); } @@ -1788,12 +1860,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio) sector_t sect = r1_bio->sector; int sectors = r1_bio->sectors; int idx = 0; + struct md_rdev *rdev; + + rdev = conf->mirrors[r1_bio->read_disk].rdev; + if (test_bit(FailFast, &rdev->flags)) { + /* Don't try recovering from here - just fail it + * ... unless it is the last working device of course */ + md_error(mddev, rdev); + if (test_bit(Faulty, &rdev->flags)) + /* Don't try to read from here, but make sure + * put_buf does it's thing + */ + bio->bi_end_io = end_sync_write; + } while(sectors) { int s = sectors; int d = r1_bio->read_disk; int success = 0; - struct md_rdev *rdev; int start; if (s > (PAGE_SIZE>>9)) @@ -1825,11 +1909,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio) * work just disable and interrupt the recovery. * Don't fail devices as that won't really help. */ - printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" - " for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev, b), - (unsigned long long)r1_bio->sector); + pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), + bdevname(bio->bi_bdev, b), + (unsigned long long)r1_bio->sector); for (d = 0; d < conf->raid_disks * 2; d++) { rdev = conf->mirrors[d].rdev; if (!rdev || test_bit(Faulty, &rdev->flags)) @@ -2013,6 +2096,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) continue; bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) + wbio->bi_opf |= MD_FAILFAST; + wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); @@ -2122,13 +2208,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk, if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); - printk(KERN_INFO - "md/raid1:%s: read error corrected " - "(%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)(sect + - rdev->data_offset), - bdevname(rdev->bdev, b)); + pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect + + rdev->data_offset), + bdevname(rdev->bdev, b)); } rdev_dec_pending(rdev, mddev); } else @@ -2287,6 +2371,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) struct bio *bio; char b[BDEVNAME_SIZE]; struct md_rdev *rdev; + dev_t bio_dev; + sector_t bio_sector; clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just @@ -2300,10 +2386,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) bio = r1_bio->bios[r1_bio->read_disk]; bdevname(bio->bi_bdev, b); + bio_dev = bio->bi_bdev->bd_dev; + bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; bio_put(bio); r1_bio->bios[r1_bio->read_disk] = NULL; - if (mddev->ro == 0) { + rdev = conf->mirrors[r1_bio->read_disk].rdev; + if (mddev->ro == 0 + && !test_bit(FailFast, &rdev->flags)) { freeze_array(conf, 1); fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); @@ -2312,14 +2402,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; } - rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); read_more: disk = read_balance(conf, r1_bio, &max_sectors); if (disk == -1) { - printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" - " read error for block %llu\n", - mdname(mddev), b, (unsigned long long)r1_bio->sector); + pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), b, (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { const unsigned long do_sync @@ -2330,16 +2419,17 @@ read_more: max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; - printk_ratelimited(KERN_ERR - "md/raid1:%s: redirecting sector %llu" - " to other mirror: %s\n", - mdname(mddev), - (unsigned long long)r1_bio->sector, - bdevname(rdev->bdev, b)); + pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", + mdname(mddev), + (unsigned long long)r1_bio->sector, + bdevname(rdev->bdev, b)); bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(bio, REQ_OP_READ, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + bio->bi_opf |= MD_FAILFAST; bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { /* Drat - have to split this up more */ @@ -2353,6 +2443,8 @@ read_more: else mbio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, bio_dev, bio_sector); generic_make_request(bio); bio = NULL; @@ -2367,8 +2459,11 @@ read_more: sectors_handled; goto read_more; - } else + } else { + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, bio_dev, bio_sector); generic_make_request(bio); + } } } @@ -2384,10 +2479,10 @@ static void raid1d(struct md_thread *thread) md_check_recovery(mddev); if (!list_empty_careful(&conf->bio_end_io_list) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { LIST_HEAD(tmp); spin_lock_irqsave(&conf->device_lock, flags); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { while (!list_empty(&conf->bio_end_io_list)) { list_move(conf->bio_end_io_list.prev, &tmp); conf->nr_queued--; @@ -2441,7 +2536,7 @@ static void raid1d(struct md_thread *thread) generic_make_request(r1_bio->bios[r1_bio->read_disk]); cond_resched(); - if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) + if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) md_check_recovery(mddev); } blk_finish_plug(&plug); @@ -2623,6 +2718,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; + if (test_bit(FailFast, &rdev->flags)) + bio->bi_opf |= MD_FAILFAST; } } rcu_read_unlock(); @@ -2642,7 +2739,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, min_bad, 0 ) && ok; } - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); *skipped = 1; put_buf(r1_bio); @@ -2753,6 +2850,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { read_targets--; md_sync_acct(bio->bi_bdev, nr_sectors); + if (read_targets == 1) + bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); } } @@ -2760,6 +2859,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; md_sync_acct(bio->bi_bdev, nr_sectors); + if (read_targets == 1) + bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); } @@ -2875,12 +2976,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); - if (!conf->thread) { - printk(KERN_ERR - "md/raid1:%s: couldn't allocate thread\n", - mdname(mddev)); + if (!conf->thread) goto abort; - } return conf; @@ -2905,13 +3002,13 @@ static int raid1_run(struct mddev *mddev) bool discard_supported = false; if (mddev->level != 1) { - printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", - mdname(mddev), mddev->level); + pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", + mdname(mddev), mddev->level); return -EIO; } if (mddev->reshape_position != MaxSector) { - printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", - mdname(mddev)); + pr_warn("md/raid1:%s: reshape_position set but not supported\n", + mdname(mddev)); return -EIO; } /* @@ -2950,11 +3047,9 @@ static int raid1_run(struct mddev *mddev) mddev->recovery_cp = MaxSector; if (mddev->recovery_cp != MaxSector) - printk(KERN_NOTICE "md/raid1:%s: not clean" - " -- starting background reconstruction\n", - mdname(mddev)); - printk(KERN_INFO - "md/raid1:%s: active with %d out of %d mirrors\n", + pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", + mdname(mddev)); + pr_info("md/raid1:%s: active with %d out of %d mirrors\n", mdname(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); @@ -2964,6 +3059,7 @@ static int raid1_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; mddev->private = conf; + set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); @@ -3107,9 +3203,8 @@ static int raid1_reshape(struct mddev *mddev) rdev->raid_disk = d2; sysfs_unlink_rdev(mddev, rdev); if (sysfs_link_rdev(mddev, rdev)) - printk(KERN_WARNING - "md/raid1:%s: cannot register rd%d\n", - mdname(mddev), rdev->raid_disk); + pr_warn("md/raid1:%s: cannot register rd%d\n", + mdname(mddev), rdev->raid_disk); } if (rdev) newmirrors[d2++].rdev = rdev; @@ -3163,9 +3258,12 @@ static void *raid1_takeover(struct mddev *mddev) mddev->new_layout = 0; mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); - if (!IS_ERR(conf)) + if (!IS_ERR(conf)) { /* Array must appear to be quiesced */ conf->array_frozen = 1; + mddev_clear_unsupported_flags(mddev, + UNSUPPORTED_MDDEV_FLAGS); + } return conf; } return ERR_PTR(-EINVAL); |