diff options
Diffstat (limited to 'drivers/md/raid1.c')
| -rw-r--r-- | drivers/md/raid1.c | 83 | 
1 files changed, 56 insertions, 27 deletions
| diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 85505424f7a4..e2d8acb1e988 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)  	wake_up(&conf->wait_barrier);  } -static void _wait_barrier(struct r1conf *conf, int idx) +static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)  { +	bool ret = true; +  	/*  	 * We need to increase conf->nr_pending[idx] very early here,  	 * then raise_barrier() can be blocked when it waits for @@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)  	 */  	if (!READ_ONCE(conf->array_frozen) &&  	    !atomic_read(&conf->barrier[idx])) -		return; +		return ret;  	/*  	 * After holding conf->resync_lock, conf->nr_pending[idx] @@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)  	 */  	wake_up(&conf->wait_barrier);  	/* Wait for the barrier in same barrier unit bucket to drop. */ -	wait_event_lock_irq(conf->wait_barrier, -			    !conf->array_frozen && -			     !atomic_read(&conf->barrier[idx]), -			    conf->resync_lock); -	atomic_inc(&conf->nr_pending[idx]); + +	/* Return false when nowait flag is set */ +	if (nowait) { +		ret = false; +	} else { +		wait_event_lock_irq(conf->wait_barrier, +				!conf->array_frozen && +				!atomic_read(&conf->barrier[idx]), +				conf->resync_lock); +		atomic_inc(&conf->nr_pending[idx]); +	} +  	atomic_dec(&conf->nr_waiting[idx]);  	spin_unlock_irq(&conf->resync_lock); +	return ret;  } -static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) +static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)  {  	int idx = sector_to_idx(sector_nr); +	bool ret = true;  	/*  	 * Very similar to _wait_barrier(). The difference is, for read @@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)  	atomic_inc(&conf->nr_pending[idx]);  	if (!READ_ONCE(conf->array_frozen)) -		return; +		return ret;  	spin_lock_irq(&conf->resync_lock);  	atomic_inc(&conf->nr_waiting[idx]); @@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)  	 */  	wake_up(&conf->wait_barrier);  	/* Wait for array to be unfrozen */ -	wait_event_lock_irq(conf->wait_barrier, -			    !conf->array_frozen, -			    conf->resync_lock); -	atomic_inc(&conf->nr_pending[idx]); + +	/* Return false when nowait flag is set */ +	if (nowait) { +		/* Return false when nowait flag is set */ +		ret = false; +	} else { +		wait_event_lock_irq(conf->wait_barrier, +				!conf->array_frozen, +				conf->resync_lock); +		atomic_inc(&conf->nr_pending[idx]); +	} +  	atomic_dec(&conf->nr_waiting[idx]);  	spin_unlock_irq(&conf->resync_lock); +	return ret;  } -static void wait_barrier(struct r1conf *conf, sector_t sector_nr) +static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)  {  	int idx = sector_to_idx(sector_nr); -	_wait_barrier(conf, idx); +	return _wait_barrier(conf, idx, nowait);  }  static void _allow_barrier(struct r1conf *conf, int idx) @@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,  	 * Still need barrier for READ in case that whole  	 * array is frozen.  	 */ -	wait_read_barrier(conf, bio->bi_iter.bi_sector); +	if (!wait_read_barrier(conf, bio->bi_iter.bi_sector, +				bio->bi_opf & REQ_NOWAIT)) { +		bio_wouldblock_error(bio); +		return; +	}  	if (!r1_bio)  		r1_bio = alloc_r1bio(mddev, bio); @@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,  		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {  		DEFINE_WAIT(w); +		if (bio->bi_opf & REQ_NOWAIT) { +			bio_wouldblock_error(bio); +			return; +		}  		for (;;) {  			prepare_to_wait(&conf->wait_barrier,  					&w, TASK_IDLE); @@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,  	 * thread has put up a bar for new requests.  	 * Continue immediately if no resync is active currently.  	 */ -	wait_barrier(conf, bio->bi_iter.bi_sector); +	if (!wait_barrier(conf, bio->bi_iter.bi_sector, +				bio->bi_opf & REQ_NOWAIT)) { +		bio_wouldblock_error(bio); +		return; +	}  	r1_bio = alloc_r1bio(mddev, bio);  	r1_bio->sectors = max_write_sectors; -	if (conf->pending_count >= max_queued_requests) { -		md_wakeup_thread(mddev->thread); -		raid1_log(mddev, "wait queued"); -		wait_event(conf->wait_barrier, -			   conf->pending_count < max_queued_requests); -	}  	/* first select target devices under rcu_lock and  	 * inc refcount on their rdev.  Record them by setting  	 * bios[x] to bio @@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,  				rdev_dec_pending(conf->mirrors[j].rdev, mddev);  		r1_bio->state = 0;  		allow_barrier(conf, bio->bi_iter.bi_sector); + +		if (bio->bi_opf & REQ_NOWAIT) { +			bio_wouldblock_error(bio); +			return; +		}  		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);  		md_wait_for_blocked_rdev(blocked_rdev, mddev); -		wait_barrier(conf, bio->bi_iter.bi_sector); +		wait_barrier(conf, bio->bi_iter.bi_sector, false);  		goto retry_write;  	} @@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)  	int idx;  	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { -		_wait_barrier(conf, idx); +		_wait_barrier(conf, idx, false);  		_allow_barrier(conf, idx);  	} @@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");  MODULE_ALIAS("md-personality-3"); /* RAID1 */  MODULE_ALIAS("md-raid1");  MODULE_ALIAS("md-level-1"); - -module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); |