diff options
Diffstat (limited to 'drivers/md/raid5.c')
| -rw-r--r-- | drivers/md/raid5.c | 50 | 
1 files changed, 45 insertions, 5 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7b820b81d8c2..4739ed891e75 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6079,6 +6079,38 @@ out_release:  	return ret;  } +/* + * If the bio covers multiple data disks, find sector within the bio that has + * the lowest chunk offset in the first chunk. + */ +static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf, +					      struct bio *bi) +{ +	int sectors_per_chunk = conf->chunk_sectors; +	int raid_disks = conf->raid_disks; +	int dd_idx; +	struct stripe_head sh; +	unsigned int chunk_offset; +	sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); +	sector_t sector; + +	/* We pass in fake stripe_head to get back parity disk numbers */ +	sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); +	chunk_offset = sector_div(sector, sectors_per_chunk); +	if (sectors_per_chunk - chunk_offset >= bio_sectors(bi)) +		return r_sector; +	/* +	 * Bio crosses to the next data disk. Check whether it's in the same +	 * chunk. +	 */ +	dd_idx++; +	while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) +		dd_idx++; +	if (dd_idx >= raid_disks) +		return r_sector; +	return r_sector + sectors_per_chunk - chunk_offset; +} +  static bool raid5_make_request(struct mddev *mddev, struct bio * bi)  {  	DEFINE_WAIT_FUNC(wait, woken_wake_function); @@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)  	}  	md_account_bio(mddev, &bi); +	/* +	 * Lets start with the stripe with the lowest chunk offset in the first +	 * chunk. That has the best chances of creating IOs adjacent to +	 * previous IOs in case of sequential IO and thus creates the most +	 * sequential IO pattern. We don't bother with the optimization when +	 * reshaping as the performance benefit is not worth the complexity. +	 */ +	if (likely(conf->reshape_progress == MaxSector)) +		logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); +	s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf); +  	add_wait_queue(&conf->wait_for_overlap, &wait);  	while (1) {  		res = make_stripe_request(mddev, conf, &ctx, logical_sector, @@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)  			continue;  		} -		s = find_first_bit(ctx.sectors_to_do, stripe_cnt); +		s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);  		if (s == stripe_cnt)  			break; @@ -7716,7 +7759,6 @@ static void raid5_set_io_opt(struct r5conf *conf)  static int raid5_run(struct mddev *mddev)  {  	struct r5conf *conf; -	int working_disks = 0;  	int dirty_parity_disks = 0;  	struct md_rdev *rdev;  	struct md_rdev *journal_dev = NULL; @@ -7912,10 +7954,8 @@ static int raid5_run(struct mddev *mddev)  			pr_warn("md: cannot handle concurrent replacement and reshape.\n");  			goto abort;  		} -		if (test_bit(In_sync, &rdev->flags)) { -			working_disks++; +		if (test_bit(In_sync, &rdev->flags))  			continue; -		}  		/* This disc is not fully in-sync.  However if it  		 * just stored parity (beyond the recovery_offset),  		 * when we don't need to be concerned about the  |