diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
| -rw-r--r-- | drivers/md/bcache/request.c | 61 | 
1 files changed, 28 insertions, 33 deletions
| diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 709c9cc34369..681b4f12b05a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)  	if (ret == -ESRCH) {  		op->replace_collision = true;  	} else if (ret) { -		op->error		= -ENOMEM; +		op->status		= BLK_STS_RESOURCE;  		op->insert_data_done	= true;  	} @@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)  	struct closure *cl = bio->bi_private;  	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); -	if (bio->bi_error) { +	if (bio->bi_status) {  		/* TODO: We could try to recover from this. */  		if (op->writeback) -			op->error = bio->bi_error; +			op->status = bio->bi_status;  		else if (!op->replace)  			set_closure_fn(cl, bch_data_insert_error, op->wq);  		else  			set_closure_fn(cl, NULL, NULL);  	} -	bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); +	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");  }  static void bch_data_insert_start(struct closure *cl) @@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)  	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);  	struct bio *bio = op->bio, *n; -	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) -		wake_up_gc(op->c); -  	if (op->bypass)  		return bch_data_invalidate(cl); +	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) +		wake_up_gc(op->c); +  	/*  	 * Journal writes are marked REQ_PREFLUSH; if the original write was a  	 * flush, it'll wait on the journal write. @@ -400,12 +400,6 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)  	if (!congested && !dc->sequential_cutoff)  		goto rescale; -	if (!congested && -	    mode == CACHE_MODE_WRITEBACK && -	    op_is_write(bio->bi_opf) && -	    op_is_sync(bio->bi_opf)) -		goto rescale; -  	spin_lock(&dc->io_lock);  	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) @@ -488,15 +482,15 @@ static void bch_cache_read_endio(struct bio *bio)  	 * from the backing device.  	 */ -	if (bio->bi_error) -		s->iop.error = bio->bi_error; +	if (bio->bi_status) +		s->iop.status = bio->bi_status;  	else if (!KEY_DIRTY(&b->key) &&  		 ptr_stale(s->iop.c, &b->key, 0)) {  		atomic_long_inc(&s->iop.c->cache_read_races); -		s->iop.error = -EINTR; +		s->iop.status = BLK_STS_IOERR;  	} -	bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); +	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");  }  /* @@ -593,9 +587,9 @@ static void request_endio(struct bio *bio)  {  	struct closure *cl = bio->bi_private; -	if (bio->bi_error) { +	if (bio->bi_status) {  		struct search *s = container_of(cl, struct search, cl); -		s->iop.error = bio->bi_error; +		s->iop.status = bio->bi_status;  		/* Only cache read errors are recoverable */  		s->recoverable = false;  	} @@ -607,11 +601,12 @@ static void request_endio(struct bio *bio)  static void bio_complete(struct search *s)  {  	if (s->orig_bio) { -		generic_end_io_acct(bio_data_dir(s->orig_bio), +		struct request_queue *q = s->orig_bio->bi_disk->queue; +		generic_end_io_acct(q, bio_data_dir(s->orig_bio),  				    &s->d->disk->part0, s->start_time);  		trace_bcache_request_end(s->d, s->orig_bio); -		s->orig_bio->bi_error = s->iop.error; +		s->orig_bio->bi_status = s->iop.status;  		bio_endio(s->orig_bio);  		s->orig_bio = NULL;  	} @@ -664,7 +659,7 @@ static inline struct search *search_alloc(struct bio *bio,  	s->iop.inode		= d->id;  	s->iop.write_point	= hash_long((unsigned long) current, 16);  	s->iop.write_prio	= 0; -	s->iop.error		= 0; +	s->iop.status		= 0;  	s->iop.flags		= 0;  	s->iop.flush_journal	= op_is_flush(bio->bi_opf);  	s->iop.wq		= bcache_wq; @@ -707,7 +702,7 @@ static void cached_dev_read_error(struct closure *cl)  		/* Retry from the backing device: */  		trace_bcache_read_retry(s->orig_bio); -		s->iop.error = 0; +		s->iop.status = 0;  		do_bio_hook(s, s->orig_bio);  		/* XXX: invalidate cache */ @@ -734,7 +729,7 @@ static void cached_dev_read_done(struct closure *cl)  	if (s->iop.bio) {  		bio_reset(s->iop.bio);  		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; -		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; +		bio_copy_dev(s->iop.bio, s->cache_miss);  		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;  		bch_bio_map(s->iop.bio, NULL); @@ -767,7 +762,7 @@ static void cached_dev_read_done_bh(struct closure *cl)  				  !s->cache_miss, s->iop.bypass);  	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); -	if (s->iop.error) +	if (s->iop.status)  		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);  	else if (s->iop.bio || verify(dc, &s->bio.bio))  		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); @@ -793,7 +788,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,  	    !(bio->bi_opf & REQ_META) &&  	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)  		reada = min_t(sector_t, dc->readahead >> 9, -			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); +			      get_capacity(bio->bi_disk) - bio_end_sector(bio));  	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); @@ -819,7 +814,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,  		goto out_submit;  	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector; -	cache_bio->bi_bdev		= miss->bi_bdev; +	bio_copy_dev(cache_bio, miss);  	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;  	cache_bio->bi_end_io	= request_endio; @@ -918,7 +913,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)  			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,  							     dc->disk.bio_split); -			flush->bi_bdev	= bio->bi_bdev; +			bio_copy_dev(flush, bio);  			flush->bi_end_io = request_endio;  			flush->bi_private = cl;  			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; @@ -955,13 +950,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,  					struct bio *bio)  {  	struct search *s; -	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; +	struct bcache_device *d = bio->bi_disk->private_data;  	struct cached_dev *dc = container_of(d, struct cached_dev, disk);  	int rw = bio_data_dir(bio); -	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); +	generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); -	bio->bi_bdev = dc->bdev; +	bio_set_dev(bio, dc->bdev);  	bio->bi_iter.bi_sector += dc->sb.data_offset;  	if (cached_dev_get(dc)) { @@ -1071,10 +1066,10 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,  {  	struct search *s;  	struct closure *cl; -	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; +	struct bcache_device *d = bio->bi_disk->private_data;  	int rw = bio_data_dir(bio); -	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); +	generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);  	s = search_alloc(bio, d);  	cl = &s->cl; |