diff options
Diffstat (limited to 'drivers/md/raid1.c')
| -rw-r--r-- | drivers/md/raid1.c | 19 | 
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..967a4ed73929 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)  		spin_lock_irqsave(&conf->device_lock, flags);  		if (r1_bio->mddev->degraded == conf->raid_disks ||  		    (r1_bio->mddev->degraded == conf->raid_disks-1 && -		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) +		     test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))  			uptodate = 1;  		spin_unlock_irqrestore(&conf->device_lock, flags);  	} @@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect  	if ((conf->mddev->recovery_cp < this_sector + sectors) ||  	    (mddev_is_clustered(conf->mddev) && -	    md_cluster_ops->area_resyncing(conf->mddev, this_sector, +	    md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,  		    this_sector + sectors)))  		choose_first = 1;  	else @@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	    ((bio_end_sector(bio) > mddev->suspend_lo &&  	    bio->bi_iter.bi_sector < mddev->suspend_hi) ||  	    (mddev_is_clustered(mddev) && -	     md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { +	     md_cluster_ops->area_resyncing(mddev, WRITE, +		     bio->bi_iter.bi_sector, bio_end_sector(bio))))) {  		/* As the suspend_* range is controlled by  		 * userspace, we want an interruptible  		 * wait. @@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)  			if (bio_end_sector(bio) <= mddev->suspend_lo ||  			    bio->bi_iter.bi_sector >= mddev->suspend_hi ||  			    (mddev_is_clustered(mddev) && -			     !md_cluster_ops->area_resyncing(mddev, +			     !md_cluster_ops->area_resyncing(mddev, WRITE,  				     bio->bi_iter.bi_sector, bio_end_sector(bio))))  				break;  			schedule(); @@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)  {  	char b[BDEVNAME_SIZE];  	struct r1conf *conf = mddev->private; +	unsigned long flags;  	/*  	 * If it is not operational, then we have already marked it as dead @@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)  		return;  	}  	set_bit(Blocked, &rdev->flags); +	spin_lock_irqsave(&conf->device_lock, flags);  	if (test_and_clear_bit(In_sync, &rdev->flags)) { -		unsigned long flags; -		spin_lock_irqsave(&conf->device_lock, flags);  		mddev->degraded++;  		set_bit(Faulty, &rdev->flags); -		spin_unlock_irqrestore(&conf->device_lock, flags);  	} else  		set_bit(Faulty, &rdev->flags); +	spin_unlock_irqrestore(&conf->device_lock, flags);  	/*  	 * if recovery is running, make sure it aborts.  	 */ @@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)  	 * Find all failed disks within the RAID1 configuration  	 * and mark them readable.  	 * Called under mddev lock, so rcu protection not needed. +	 * device_lock used to avoid races with raid1_end_read_request +	 * which expects 'In_sync' flags and ->degraded to be consistent.  	 */ +	spin_lock_irqsave(&conf->device_lock, flags);  	for (i = 0; i < conf->raid_disks; i++) {  		struct md_rdev *rdev = conf->mirrors[i].rdev;  		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)  			sysfs_notify_dirent_safe(rdev->sysfs_state);  		}  	} -	spin_lock_irqsave(&conf->device_lock, flags);  	mddev->degraded -= count;  	spin_unlock_irqrestore(&conf->device_lock, flags);  |