diff options
Diffstat (limited to 'drivers/md/raid1.c')
| -rw-r--r-- | drivers/md/raid1.c | 247 | 
1 files changed, 165 insertions, 82 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 29e2df5cd77b..a1f3fbed9100 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,6 +37,7 @@  #include <linux/module.h>  #include <linux/seq_file.h>  #include <linux/ratelimit.h> +#include <trace/events/block.h>  #include "md.h"  #include "raid1.h"  #include "bitmap.h" @@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,  			  sector_t bi_sector);  static void lower_barrier(struct r1conf *conf); +#define raid1_log(md, fmt, args...)				\ +	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) +  static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)  {  	struct pool_info *pi = data; @@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)  	if (uptodate)  		set_bit(R1BIO_Uptodate, &r1_bio->state); +	else if (test_bit(FailFast, &rdev->flags) && +		 test_bit(R1BIO_FailFast, &r1_bio->state)) +		/* This was a fail-fast read so we definitely +		 * want to retry */ +		;  	else {  		/* If all other devices have failed, we want to return  		 * the error upwards rather than fail the last device. @@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)  		 * oops, read error:  		 */  		char b[BDEVNAME_SIZE]; -		printk_ratelimited( -			KERN_ERR "md/raid1:%s: %s: " -			"rescheduling sector %llu\n", -			mdname(conf->mddev), -			bdevname(rdev->bdev, -				 b), -			(unsigned long long)r1_bio->sector); +		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", +				   mdname(conf->mddev), +				   bdevname(rdev->bdev, b), +				   (unsigned long long)r1_bio->sector);  		set_bit(R1BIO_ReadError, &r1_bio->state);  		reschedule_retry(r1_bio);  		/* don't drop the reference on read_disk yet */ @@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)  			set_bit(MD_RECOVERY_NEEDED, &  				conf->mddev->recovery); -		set_bit(R1BIO_WriteError, &r1_bio->state); +		if (test_bit(FailFast, &rdev->flags) && +		    (bio->bi_opf & MD_FAILFAST) && +		    /* We never try FailFast to WriteMostly devices */ +		    !test_bit(WriteMostly, &rdev->flags)) { +			md_error(r1_bio->mddev, rdev); +			if (!test_bit(Faulty, &rdev->flags)) +				/* This is the only remaining device, +				 * We need to retry the write without +				 * FailFast +				 */ +				set_bit(R1BIO_WriteError, &r1_bio->state); +			else { +				/* Finished with this branch */ +				r1_bio->bios[mirror] = NULL; +				to_put = bio; +			} +		} else +			set_bit(R1BIO_WriteError, &r1_bio->state);  	} else {  		/*  		 * Set R1BIO_Uptodate in our master bio, so that we @@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect  	best_good_sectors = 0;  	has_nonrot_disk = 0;  	choose_next_idle = 0; +	clear_bit(R1BIO_FailFast, &r1_bio->state);  	if ((conf->mddev->recovery_cp < this_sector + sectors) ||  	    (mddev_is_clustered(conf->mddev) && @@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect  		} else  			best_good_sectors = sectors; +		if (best_disk >= 0) +			/* At least two disks to choose from so failfast is OK */ +			set_bit(R1BIO_FailFast, &r1_bio->state); +  		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));  		has_nonrot_disk |= nonrot;  		pending = atomic_read(&rdev->nr_pending); @@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect  			}  			break;  		} -		/* If device is idle, use it */ -		if (pending == 0) { -			best_disk = disk; -			break; -		}  		if (choose_next_idle)  			continue; @@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect  	 * mixed ratation/non-rotational disks depending on workload.  	 */  	if (best_disk == -1) { -		if (has_nonrot_disk) +		if (has_nonrot_disk || min_pending == 0)  			best_disk = best_pending_disk;  		else  			best_disk = best_dist_disk; @@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)  		while (bio) { /* submit pending writes */  			struct bio *next = bio->bi_next; +			struct md_rdev *rdev = (void*)bio->bi_bdev;  			bio->bi_next = NULL; -			if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && -			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) +			bio->bi_bdev = rdev->bdev; +			if (test_bit(Faulty, &rdev->flags)) { +				bio->bi_error = -EIO; +				bio_endio(bio); +			} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && +					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))  				/* Just ignore it */  				bio_endio(bio);  			else @@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)  	else if (conf->barrier && bio_data_dir(bio) == WRITE) {  		if ((conf->mddev->curr_resync_completed  		     >= bio_end_sector(bio)) || -		    (conf->next_resync + NEXT_NORMALIO_DISTANCE +		    (conf->start_next_window + NEXT_NORMALIO_DISTANCE  		     <= bio->bi_iter.bi_sector))  			wait = false;  		else @@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)  		 * that queue to allow conf->start_next_window  		 * to increase.  		 */ +		raid1_log(conf->mddev, "wait barrier");  		wait_event_lock_irq(conf->wait_barrier,  				    !conf->array_frozen &&  				    (!conf->barrier || @@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)  	 */  	spin_lock_irq(&conf->resync_lock);  	conf->array_frozen = 1; +	raid1_log(conf->mddev, "wait freeze");  	wait_event_lock_irq_cmd(conf->wait_barrier,  				conf->nr_pending == conf->nr_queued+extra,  				conf->resync_lock, @@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)  	while (bio) { /* submit pending writes */  		struct bio *next = bio->bi_next; +		struct md_rdev *rdev = (void*)bio->bi_bdev;  		bio->bi_next = NULL; -		if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && -		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) +		bio->bi_bdev = rdev->bdev; +		if (test_bit(Faulty, &rdev->flags)) { +			bio->bi_error = -EIO; +			bio_endio(bio); +		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && +				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))  			/* Just ignore it */  			bio_endio(bio);  		else @@ -1136,6 +1171,7 @@ read_again:  			 * take care not to over-take any writes  			 * that are 'behind'  			 */ +			raid1_log(mddev, "wait behind writes");  			wait_event(bitmap->behind_wait,  				   atomic_read(&bitmap->behind_writes) == 0);  		} @@ -1153,8 +1189,16 @@ read_again:  		read_bio->bi_bdev = mirror->rdev->bdev;  		read_bio->bi_end_io = raid1_end_read_request;  		bio_set_op_attrs(read_bio, op, do_sync); +		if (test_bit(FailFast, &mirror->rdev->flags) && +		    test_bit(R1BIO_FailFast, &r1_bio->state)) +			read_bio->bi_opf |= MD_FAILFAST;  		read_bio->bi_private = r1_bio; +		if (mddev->gendisk) +			trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), +					      read_bio, disk_devt(mddev->gendisk), +					      r1_bio->sector); +  		if (max_sectors < r1_bio->sectors) {  			/* could not read all from this device, so we will  			 * need another r1_bio. @@ -1195,6 +1239,7 @@ read_again:  	 */  	if (conf->pending_count >= max_queued_requests) {  		md_wakeup_thread(mddev->thread); +		raid1_log(mddev, "wait queued");  		wait_event(conf->wait_barrier,  			   conf->pending_count < max_queued_requests);  	} @@ -1286,6 +1331,7 @@ read_again:  				rdev_dec_pending(conf->mirrors[j].rdev, mddev);  		r1_bio->state = 0;  		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); +		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);  		md_wait_for_blocked_rdev(blocked_rdev, mddev);  		start_next_window = wait_barrier(conf, bio);  		/* @@ -1363,10 +1409,21 @@ read_again:  		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;  		mbio->bi_end_io	= raid1_end_write_request;  		bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); +		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && +		    !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && +		    conf->raid_disks - mddev->degraded > 1) +			mbio->bi_opf |= MD_FAILFAST;  		mbio->bi_private = r1_bio;  		atomic_inc(&r1_bio->remaining); +		if (mddev->gendisk) +			trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), +					      mbio, disk_devt(mddev->gendisk), +					      r1_bio->sector); +		/* flush_pending_writes() needs access to the rdev so...*/ +		mbio->bi_bdev = (void*)conf->mirrors[i].rdev; +  		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));  		if (cb)  			plug = container_of(cb, struct raid1_plug_cb, cb); @@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)  	 * next level up know.  	 * else mark the drive as failed  	 */ +	spin_lock_irqsave(&conf->device_lock, flags);  	if (test_bit(In_sync, &rdev->flags)  	    && (conf->raid_disks - mddev->degraded) == 1) {  		/* @@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)  		 * it is very likely to fail.  		 */  		conf->recovery_disabled = mddev->recovery_disabled; +		spin_unlock_irqrestore(&conf->device_lock, flags);  		return;  	}  	set_bit(Blocked, &rdev->flags); -	spin_lock_irqsave(&conf->device_lock, flags);  	if (test_and_clear_bit(In_sync, &rdev->flags)) {  		mddev->degraded++;  		set_bit(Faulty, &rdev->flags); @@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)  	 * if recovery is running, make sure it aborts.  	 */  	set_bit(MD_RECOVERY_INTR, &mddev->recovery); -	set_mask_bits(&mddev->flags, 0, -		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); -	printk(KERN_ALERT -	       "md/raid1:%s: Disk failure on %s, disabling device.\n" -	       "md/raid1:%s: Operation continuing on %d devices.\n", -	       mdname(mddev), bdevname(rdev->bdev, b), -	       mdname(mddev), conf->raid_disks - mddev->degraded); +	set_mask_bits(&mddev->sb_flags, 0, +		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); +	pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" +		"md/raid1:%s: Operation continuing on %d devices.\n", +		mdname(mddev), bdevname(rdev->bdev, b), +		mdname(mddev), conf->raid_disks - mddev->degraded);  }  static void print_conf(struct r1conf *conf)  {  	int i; -	printk(KERN_DEBUG "RAID1 conf printout:\n"); +	pr_debug("RAID1 conf printout:\n");  	if (!conf) { -		printk(KERN_DEBUG "(!conf)\n"); +		pr_debug("(!conf)\n");  		return;  	} -	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, -		conf->raid_disks); +	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, +		 conf->raid_disks);  	rcu_read_lock();  	for (i = 0; i < conf->raid_disks; i++) {  		char b[BDEVNAME_SIZE];  		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);  		if (rdev) -			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", -			       i, !test_bit(In_sync, &rdev->flags), -			       !test_bit(Faulty, &rdev->flags), -			       bdevname(rdev->bdev,b)); +			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", +				 i, !test_bit(In_sync, &rdev->flags), +				 !test_bit(Faulty, &rdev->flags), +				 bdevname(rdev->bdev,b));  	}  	rcu_read_unlock();  } @@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)  	sector_t sect = r1_bio->sector;  	int sectors = r1_bio->sectors;  	int idx = 0; +	struct md_rdev *rdev; + +	rdev = conf->mirrors[r1_bio->read_disk].rdev; +	if (test_bit(FailFast, &rdev->flags)) { +		/* Don't try recovering from here - just fail it +		 * ... unless it is the last working device of course */ +		md_error(mddev, rdev); +		if (test_bit(Faulty, &rdev->flags)) +			/* Don't try to read from here, but make sure +			 * put_buf does it's thing +			 */ +			bio->bi_end_io = end_sync_write; +	}  	while(sectors) {  		int s = sectors;  		int d = r1_bio->read_disk;  		int success = 0; -		struct md_rdev *rdev;  		int start;  		if (s > (PAGE_SIZE>>9)) @@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)  			 * work just disable and interrupt the recovery.  			 * Don't fail devices as that won't really help.  			 */ -			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" -			       " for block %llu\n", -			       mdname(mddev), -			       bdevname(bio->bi_bdev, b), -			       (unsigned long long)r1_bio->sector); +			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", +					    mdname(mddev), +					    bdevname(bio->bi_bdev, b), +					    (unsigned long long)r1_bio->sector);  			for (d = 0; d < conf->raid_disks * 2; d++) {  				rdev = conf->mirrors[d].rdev;  				if (!rdev || test_bit(Faulty, &rdev->flags)) @@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)  			continue;  		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); +		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) +			wbio->bi_opf |= MD_FAILFAST; +  		wbio->bi_end_io = end_sync_write;  		atomic_inc(&r1_bio->remaining);  		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); @@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,  				if (r1_sync_page_io(rdev, sect, s,  						    conf->tmppage, READ)) {  					atomic_add(s, &rdev->corrected_errors); -					printk(KERN_INFO -					       "md/raid1:%s: read error corrected " -					       "(%d sectors at %llu on %s)\n", -					       mdname(mddev), s, -					       (unsigned long long)(sect + -								    rdev->data_offset), -					       bdevname(rdev->bdev, b)); +					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", +						mdname(mddev), s, +						(unsigned long long)(sect + +								     rdev->data_offset), +						bdevname(rdev->bdev, b));  				}  				rdev_dec_pending(rdev, mddev);  			} else @@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)  	struct bio *bio;  	char b[BDEVNAME_SIZE];  	struct md_rdev *rdev; +	dev_t bio_dev; +	sector_t bio_sector;  	clear_bit(R1BIO_ReadError, &r1_bio->state);  	/* we got a read error. Maybe the drive is bad.  Maybe just @@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)  	bio = r1_bio->bios[r1_bio->read_disk];  	bdevname(bio->bi_bdev, b); +	bio_dev = bio->bi_bdev->bd_dev; +	bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;  	bio_put(bio);  	r1_bio->bios[r1_bio->read_disk] = NULL; -	if (mddev->ro == 0) { +	rdev = conf->mirrors[r1_bio->read_disk].rdev; +	if (mddev->ro == 0 +	    && !test_bit(FailFast, &rdev->flags)) {  		freeze_array(conf, 1);  		fix_read_error(conf, r1_bio->read_disk,  			       r1_bio->sector, r1_bio->sectors); @@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)  		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;  	} -	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); +	rdev_dec_pending(rdev, conf->mddev);  read_more:  	disk = read_balance(conf, r1_bio, &max_sectors);  	if (disk == -1) { -		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" -		       " read error for block %llu\n", -		       mdname(mddev), b, (unsigned long long)r1_bio->sector); +		pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", +				    mdname(mddev), b, (unsigned long long)r1_bio->sector);  		raid_end_bio_io(r1_bio);  	} else {  		const unsigned long do_sync @@ -2330,16 +2404,17 @@ read_more:  			 max_sectors);  		r1_bio->bios[r1_bio->read_disk] = bio;  		rdev = conf->mirrors[disk].rdev; -		printk_ratelimited(KERN_ERR -				   "md/raid1:%s: redirecting sector %llu" -				   " to other mirror: %s\n", -				   mdname(mddev), -				   (unsigned long long)r1_bio->sector, -				   bdevname(rdev->bdev, b)); +		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", +				    mdname(mddev), +				    (unsigned long long)r1_bio->sector, +				    bdevname(rdev->bdev, b));  		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;  		bio->bi_bdev = rdev->bdev;  		bio->bi_end_io = raid1_end_read_request;  		bio_set_op_attrs(bio, REQ_OP_READ, do_sync); +		if (test_bit(FailFast, &rdev->flags) && +		    test_bit(R1BIO_FailFast, &r1_bio->state)) +			bio->bi_opf |= MD_FAILFAST;  		bio->bi_private = r1_bio;  		if (max_sectors < r1_bio->sectors) {  			/* Drat - have to split this up more */ @@ -2353,6 +2428,8 @@ read_more:  			else  				mbio->bi_phys_segments++;  			spin_unlock_irq(&conf->device_lock); +			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), +					      bio, bio_dev, bio_sector);  			generic_make_request(bio);  			bio = NULL; @@ -2367,8 +2444,11 @@ read_more:  				sectors_handled;  			goto read_more; -		} else +		} else { +			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), +					      bio, bio_dev, bio_sector);  			generic_make_request(bio); +		}  	}  } @@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)  	md_check_recovery(mddev);  	if (!list_empty_careful(&conf->bio_end_io_list) && -	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { +	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {  		LIST_HEAD(tmp);  		spin_lock_irqsave(&conf->device_lock, flags); -		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { +		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {  			while (!list_empty(&conf->bio_end_io_list)) {  				list_move(conf->bio_end_io_list.prev, &tmp);  				conf->nr_queued--; @@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)  			generic_make_request(r1_bio->bios[r1_bio->read_disk]);  		cond_resched(); -		if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) +		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))  			md_check_recovery(mddev);  	}  	blk_finish_plug(&plug); @@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,  			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;  			bio->bi_bdev = rdev->bdev;  			bio->bi_private = r1_bio; +			if (test_bit(FailFast, &rdev->flags)) +				bio->bi_opf |= MD_FAILFAST;  		}  	}  	rcu_read_unlock(); @@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,  							min_bad, 0  					) && ok;  			} -		set_bit(MD_CHANGE_DEVS, &mddev->flags); +		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);  		*skipped = 1;  		put_buf(r1_bio); @@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,  			if (bio->bi_end_io == end_sync_read) {  				read_targets--;  				md_sync_acct(bio->bi_bdev, nr_sectors); +				if (read_targets == 1) +					bio->bi_opf &= ~MD_FAILFAST;  				generic_make_request(bio);  			}  		} @@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,  		atomic_set(&r1_bio->remaining, 1);  		bio = r1_bio->bios[r1_bio->read_disk];  		md_sync_acct(bio->bi_bdev, nr_sectors); +		if (read_targets == 1) +			bio->bi_opf &= ~MD_FAILFAST;  		generic_make_request(bio);  	} @@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)  	err = -ENOMEM;  	conf->thread = md_register_thread(raid1d, mddev, "raid1"); -	if (!conf->thread) { -		printk(KERN_ERR -		       "md/raid1:%s: couldn't allocate thread\n", -		       mdname(mddev)); +	if (!conf->thread)  		goto abort; -	}  	return conf; @@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)  	bool discard_supported = false;  	if (mddev->level != 1) { -		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", -		       mdname(mddev), mddev->level); +		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", +			mdname(mddev), mddev->level);  		return -EIO;  	}  	if (mddev->reshape_position != MaxSector) { -		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", -		       mdname(mddev)); +		pr_warn("md/raid1:%s: reshape_position set but not supported\n", +			mdname(mddev));  		return -EIO;  	}  	/* @@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)  		mddev->recovery_cp = MaxSector;  	if (mddev->recovery_cp != MaxSector) -		printk(KERN_NOTICE "md/raid1:%s: not clean" -		       " -- starting background reconstruction\n", -		       mdname(mddev)); -	printk(KERN_INFO -		"md/raid1:%s: active with %d out of %d mirrors\n", +		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", +			mdname(mddev)); +	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",  		mdname(mddev), mddev->raid_disks - mddev->degraded,  		mddev->raid_disks); @@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)  	mddev->thread = conf->thread;  	conf->thread = NULL;  	mddev->private = conf; +	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);  	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); @@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)  			rdev->raid_disk = d2;  			sysfs_unlink_rdev(mddev, rdev);  			if (sysfs_link_rdev(mddev, rdev)) -				printk(KERN_WARNING -				       "md/raid1:%s: cannot register rd%d\n", -				       mdname(mddev), rdev->raid_disk); +				pr_warn("md/raid1:%s: cannot register rd%d\n", +					mdname(mddev), rdev->raid_disk);  		}  		if (rdev)  			newmirrors[d2++].rdev = rdev; @@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)  		mddev->new_layout = 0;  		mddev->new_chunk_sectors = 0;  		conf = setup_conf(mddev); -		if (!IS_ERR(conf)) +		if (!IS_ERR(conf)) {  			/* Array must appear to be quiesced */  			conf->array_frozen = 1; +			clear_bit(MD_HAS_JOURNAL, &mddev->flags); +			clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); +		}  		return conf;  	}  	return ERR_PTR(-EINVAL);  |