diff options
Diffstat (limited to 'drivers/md/raid5.c')
| -rw-r--r-- | drivers/md/raid5.c | 39 | 
1 files changed, 23 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index be117d0a65a8..2031506a0ecd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1139,6 +1139,9 @@ again:  			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;  			bi->bi_io_vec[0].bv_offset = 0;  			bi->bi_iter.bi_size = STRIPE_SIZE; +			bi->bi_write_hint = sh->dev[i].write_hint; +			if (!rrdev) +				sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;  			/*  			 * If this is discard request, set bi_vcnt 0. We don't  			 * want to confuse SCSI because SCSI will replace payload @@ -1190,6 +1193,8 @@ again:  			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;  			rbi->bi_io_vec[0].bv_offset = 0;  			rbi->bi_iter.bi_size = STRIPE_SIZE; +			rbi->bi_write_hint = sh->dev[i].write_hint; +			sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;  			/*  			 * If this is discard request, set bi_vcnt 0. We don't  			 * want to confuse SCSI because SCSI will replace payload @@ -2391,7 +2396,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)  	 * is completely stalled, so now is a good time to resize  	 * conf->disks and the scribble region  	 */ -	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); +	ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO);  	if (ndisks) {  		for (i = 0; i < conf->pool_size; i++)  			ndisks[i] = conf->disks[i]; @@ -3204,6 +3209,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,  		(unsigned long long)sh->sector);  	spin_lock_irq(&sh->stripe_lock); +	sh->dev[dd_idx].write_hint = bi->bi_write_hint;  	/* Don't allow new IO added to stripes in batch list */  	if (sh->batch_head)  		goto overlap; @@ -4614,15 +4620,15 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,  		sh->check_state = head_sh->check_state;  		sh->reconstruct_state = head_sh->reconstruct_state; +		spin_lock_irq(&sh->stripe_lock); +		sh->batch_head = NULL; +		spin_unlock_irq(&sh->stripe_lock);  		for (i = 0; i < sh->disks; i++) {  			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))  				do_wakeup = 1;  			sh->dev[i].flags = head_sh->dev[i].flags &  				(~((1 << R5_WriteError) | (1 << R5_Overlap)));  		} -		spin_lock_irq(&sh->stripe_lock); -		sh->batch_head = NULL; -		spin_unlock_irq(&sh->stripe_lock);  		if (handle_flags == 0 ||  		    sh->state & handle_flags)  			set_bit(STRIPE_HANDLE, &sh->state); @@ -5192,7 +5198,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)  	/*  	 * use bio_clone_fast to make a copy of the bio  	 */ -	align_bi = bio_clone_fast(raid_bio, GFP_NOIO, mddev->bio_set); +	align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);  	if (!align_bi)  		return 0;  	/* @@ -5277,7 +5283,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)  	if (sectors < bio_sectors(raid_bio)) {  		struct r5conf *conf = mddev->private; -		split = bio_split(raid_bio, sectors, GFP_NOIO, conf->bio_split); +		split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);  		bio_chain(split, raid_bio);  		generic_make_request(raid_bio);  		raid_bio = split; @@ -6658,9 +6664,9 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt,  	}  	*group_cnt = num_possible_nodes();  	size = sizeof(struct r5worker) * cnt; -	workers = kzalloc(size * *group_cnt, GFP_NOIO); -	*worker_groups = kzalloc(sizeof(struct r5worker_group) * -				*group_cnt, GFP_NOIO); +	workers = kcalloc(size, *group_cnt, GFP_NOIO); +	*worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group), +				 GFP_NOIO);  	if (!*worker_groups || !workers) {  		kfree(workers);  		kfree(*worker_groups); @@ -6773,8 +6779,7 @@ static void free_conf(struct r5conf *conf)  		if (conf->disks[i].extra_page)  			put_page(conf->disks[i].extra_page);  	kfree(conf->disks); -	if (conf->bio_split) -		bioset_free(conf->bio_split); +	bioset_exit(&conf->bio_split);  	kfree(conf->stripe_hashtbl);  	kfree(conf->pending_data);  	kfree(conf); @@ -6853,6 +6858,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	int i;  	int group_cnt, worker_cnt_per_group;  	struct r5worker_group *new_group; +	int ret;  	if (mddev->new_level != 5  	    && mddev->new_level != 4 @@ -6888,8 +6894,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)  		goto abort;  	INIT_LIST_HEAD(&conf->free_list);  	INIT_LIST_HEAD(&conf->pending_list); -	conf->pending_data = kzalloc(sizeof(struct r5pending_data) * -		PENDING_IO_MAX, GFP_KERNEL); +	conf->pending_data = kcalloc(PENDING_IO_MAX, +				     sizeof(struct r5pending_data), +				     GFP_KERNEL);  	if (!conf->pending_data)  		goto abort;  	for (i = 0; i < PENDING_IO_MAX; i++) @@ -6938,7 +6945,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)  		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;  	max_disks = max(conf->raid_disks, conf->previous_raid_disks); -	conf->disks = kzalloc(max_disks * sizeof(struct disk_info), +	conf->disks = kcalloc(max_disks, sizeof(struct disk_info),  			      GFP_KERNEL);  	if (!conf->disks) @@ -6950,8 +6957,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)  			goto abort;  	} -	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0); -	if (!conf->bio_split) +	ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); +	if (ret)  		goto abort;  	conf->mddev = mddev;  |