diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
| -rw-r--r-- | drivers/md/bcache/request.c | 70 | 
1 files changed, 39 insertions, 31 deletions
| diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7dbe8b6316a0..3bf35914bb57 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,9 +25,9 @@  struct kmem_cache *bch_search_cache; -static void bch_data_insert_start(struct closure *); +static void bch_data_insert_start(struct closure *cl); -static unsigned cache_mode(struct cached_dev *dc) +static unsigned int cache_mode(struct cached_dev *dc)  {  	return BDEV_CACHE_MODE(&dc->sb);  } @@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)  	bio_for_each_segment(bv, bio, iter) {  		void *d = kmap(bv.bv_page) + bv.bv_offset; +  		csum = bch_crc64_update(csum, d, bv.bv_len);  		kunmap(bv.bv_page);  	} @@ -98,7 +99,7 @@ static void bch_data_insert_keys(struct closure *cl)  	closure_return(cl);  } -static int bch_keylist_realloc(struct keylist *l, unsigned u64s, +static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,  			       struct cache_set *c)  {  	size_t oldsize = bch_keylist_nkeys(l); @@ -125,7 +126,7 @@ static void bch_data_invalidate(struct closure *cl)  		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);  	while (bio_sectors(bio)) { -		unsigned sectors = min(bio_sectors(bio), +		unsigned int sectors = min(bio_sectors(bio),  				       1U << (KEY_SIZE_BITS - 1));  		if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) @@ -135,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl)  		bio->bi_iter.bi_size	-= sectors << 9;  		bch_keylist_add(&op->insert_keys, -				&KEY(op->inode, bio->bi_iter.bi_sector, sectors)); +				&KEY(op->inode, +				     bio->bi_iter.bi_sector, +				     sectors));  	}  	op->insert_data_done = true; @@ -151,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl)  	/*  	 * Our data write just errored, which means we've got a bunch of keys to -	 * insert that point to data that wasn't succesfully written. +	 * insert that point to data that wasn't successfully written.  	 *  	 * We don't have to insert those keys but we still have to invalidate  	 * that region of the cache - so, if we just strip off all the pointers @@ -211,7 +214,7 @@ static void bch_data_insert_start(struct closure *cl)  	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);  	do { -		unsigned i; +		unsigned int i;  		struct bkey *k;  		struct bio_set *split = &op->c->bio_split; @@ -328,7 +331,7 @@ void bch_data_insert(struct closure *cl)  /* Congested? */ -unsigned bch_get_congested(struct cache_set *c) +unsigned int bch_get_congested(struct cache_set *c)  {  	int i;  	long rand; @@ -372,8 +375,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)  static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)  {  	struct cache_set *c = dc->disk.c; -	unsigned mode = cache_mode(dc); -	unsigned sectors, congested = bch_get_congested(c); +	unsigned int mode = cache_mode(dc); +	unsigned int sectors, congested = bch_get_congested(c);  	struct task_struct *task = current;  	struct io *i; @@ -392,7 +395,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)  	 * unless the read-ahead request is for metadata (eg, for gfs2).  	 */  	if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && -	    !(bio->bi_opf & REQ_META)) +	    !(bio->bi_opf & REQ_PRIO))  		goto skip;  	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || @@ -469,11 +472,11 @@ struct search {  	struct bio		*cache_miss;  	struct bcache_device	*d; -	unsigned		insert_bio_sectors; -	unsigned		recoverable:1; -	unsigned		write:1; -	unsigned		read_dirty_data:1; -	unsigned		cache_missed:1; +	unsigned int		insert_bio_sectors; +	unsigned int		recoverable:1; +	unsigned int		write:1; +	unsigned int		read_dirty_data:1; +	unsigned int		cache_missed:1;  	unsigned long		start_time; @@ -514,20 +517,20 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)  	struct search *s = container_of(op, struct search, op);  	struct bio *n, *bio = &s->bio.bio;  	struct bkey *bio_key; -	unsigned ptr; +	unsigned int ptr;  	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)  		return MAP_CONTINUE;  	if (KEY_INODE(k) != s->iop.inode ||  	    KEY_START(k) > bio->bi_iter.bi_sector) { -		unsigned bio_sectors = bio_sectors(bio); -		unsigned sectors = KEY_INODE(k) == s->iop.inode +		unsigned int bio_sectors = bio_sectors(bio); +		unsigned int sectors = KEY_INODE(k) == s->iop.inode  			? min_t(uint64_t, INT_MAX,  				KEY_START(k) - bio->bi_iter.bi_sector)  			: INT_MAX; -  		int ret = s->d->cache_miss(b, s, bio, sectors); +  		if (ret != MAP_CONTINUE)  			return ret; @@ -623,6 +626,7 @@ static void request_endio(struct bio *bio)  	if (bio->bi_status) {  		struct search *s = container_of(cl, struct search, cl); +  		s->iop.status = bio->bi_status;  		/* Only cache read errors are recoverable */  		s->recoverable = false; @@ -813,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl)  	if (s->iop.bio) {  		bio_reset(s->iop.bio); -		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; +		s->iop.bio->bi_iter.bi_sector = +			s->cache_miss->bi_iter.bi_sector;  		bio_copy_dev(s->iop.bio, s->cache_miss);  		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;  		bch_bio_map(s->iop.bio, NULL); @@ -845,7 +850,7 @@ static void cached_dev_read_done_bh(struct closure *cl)  	bch_mark_cache_accounting(s->iop.c, s->d,  				  !s->cache_missed, s->iop.bypass); -	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); +	trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);  	if (s->iop.status)  		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); @@ -856,10 +861,10 @@ static void cached_dev_read_done_bh(struct closure *cl)  }  static int cached_dev_cache_miss(struct btree *b, struct search *s, -				 struct bio *bio, unsigned sectors) +				 struct bio *bio, unsigned int sectors)  {  	int ret = MAP_CONTINUE; -	unsigned reada = 0; +	unsigned int reada = 0;  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);  	struct bio *miss, *cache_bio; @@ -872,7 +877,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,  	}  	if (!(bio->bi_opf & REQ_RAHEAD) && -	    !(bio->bi_opf & REQ_META) && +	    !(bio->bi_opf & REQ_PRIO) &&  	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)  		reada = min_t(sector_t, dc->readahead >> 9,  			      get_capacity(bio->bi_disk) - bio_end_sector(bio)); @@ -1212,6 +1217,10 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,  			    unsigned int cmd, unsigned long arg)  {  	struct cached_dev *dc = container_of(d, struct cached_dev, disk); + +	if (dc->io_disable) +		return -EIO; +  	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);  } @@ -1226,7 +1235,7 @@ static int cached_dev_congested(void *data, int bits)  		return 1;  	if (cached_dev_get(dc)) { -		unsigned i; +		unsigned int i;  		struct cache *ca;  		for_each_cache(ca, d->c, i) { @@ -1253,9 +1262,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)  /* Flash backed devices */  static int flash_dev_cache_miss(struct btree *b, struct search *s, -				struct bio *bio, unsigned sectors) +				struct bio *bio, unsigned int sectors)  { -	unsigned bytes = min(sectors, bio_sectors(bio)) << 9; +	unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;  	swap(bio->bi_iter.bi_size, bytes);  	zero_fill_bio(bio); @@ -1338,7 +1347,7 @@ static int flash_dev_congested(void *data, int bits)  	struct bcache_device *d = data;  	struct request_queue *q;  	struct cache *ca; -	unsigned i; +	unsigned int i;  	int ret = 0;  	for_each_cache(ca, d->c, i) { @@ -1361,8 +1370,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)  void bch_request_exit(void)  { -	if (bch_search_cache) -		kmem_cache_destroy(bch_search_cache); +	kmem_cache_destroy(bch_search_cache);  }  int __init bch_request_init(void) |