diff options
Diffstat (limited to 'drivers/lightnvm')
| -rw-r--r-- | drivers/lightnvm/core.c | 26 | ||||
| -rw-r--r-- | drivers/lightnvm/gennvm.c | 91 | ||||
| -rw-r--r-- | drivers/lightnvm/gennvm.h | 6 | ||||
| -rw-r--r-- | drivers/lightnvm/rrpc.c | 203 | ||||
| -rw-r--r-- | drivers/lightnvm/rrpc.h | 16 | 
5 files changed, 249 insertions, 93 deletions
| diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 9f6acd5d1d2e..0dc9a80adb94 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -250,7 +250,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,  		return 0;  	} -	plane_cnt = (1 << dev->plane_mode); +	plane_cnt = dev->plane_mode;  	rqd->nr_pages = plane_cnt * nr_ppas;  	if (dev->ops->max_phys_sect < rqd->nr_pages) @@ -463,13 +463,14 @@ static int nvm_core_init(struct nvm_dev *dev)  	dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;  	dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls; -	dev->total_blocks = dev->nr_planes * -				dev->blks_per_lun * -				dev->luns_per_chnl * -				dev->nr_chnls; -	dev->total_pages = dev->total_blocks * dev->pgs_per_blk; +	dev->total_secs = dev->nr_luns * dev->sec_per_lun; +	dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns), +					sizeof(unsigned long), GFP_KERNEL); +	if (!dev->lun_map) +		return -ENOMEM;  	INIT_LIST_HEAD(&dev->online_targets);  	mutex_init(&dev->mlock); +	spin_lock_init(&dev->lock);  	return 0;  } @@ -589,6 +590,7 @@ int nvm_register(struct request_queue *q, char *disk_name,  	return 0;  err_init: +	kfree(dev->lun_map);  	kfree(dev);  	return ret;  } @@ -611,6 +613,7 @@ void nvm_unregister(char *disk_name)  	up_write(&nvm_lock);  	nvm_exit(dev); +	kfree(dev->lun_map);  	kfree(dev);  }  EXPORT_SYMBOL(nvm_unregister); @@ -872,20 +875,19 @@ static int nvm_configure_by_str_event(const char *val,  static int nvm_configure_get(char *buf, const struct kernel_param *kp)  { -	int sz = 0; -	char *buf_start = buf; +	int sz;  	struct nvm_dev *dev; -	buf += sprintf(buf, "available devices:\n"); +	sz = sprintf(buf, "available devices:\n");  	down_write(&nvm_lock);  	list_for_each_entry(dev, &nvm_devices, devices) { -		if (sz > 4095 - DISK_NAME_LEN) +		if (sz > 4095 - DISK_NAME_LEN - 2)  			break; -		buf += sprintf(buf, " %32s\n", dev->name); +		sz += sprintf(buf + sz, " %32s\n", dev->name);  	}  	up_write(&nvm_lock); -	return buf - buf_start - 1; +	return sz;  }  static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = { diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 7fb725b16148..72e124a3927d 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -20,6 +20,68 @@  #include "gennvm.h" +static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) +{ +	struct gen_nvm *gn = dev->mp; +	struct gennvm_area *area, *prev, *next; +	sector_t begin = 0; +	sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9; + +	if (len > max_sectors) +		return -EINVAL; + +	area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL); +	if (!area) +		return -ENOMEM; + +	prev = NULL; + +	spin_lock(&dev->lock); +	list_for_each_entry(next, &gn->area_list, list) { +		if (begin + len > next->begin) { +			begin = next->end; +			prev = next; +			continue; +		} +		break; +	} + +	if ((begin + len) > max_sectors) { +		spin_unlock(&dev->lock); +		kfree(area); +		return -EINVAL; +	} + +	area->begin = *lba = begin; +	area->end = begin + len; + +	if (prev) /* insert into sorted order */ +		list_add(&area->list, &prev->list); +	else +		list_add(&area->list, &gn->area_list); +	spin_unlock(&dev->lock); + +	return 0; +} + +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) +{ +	struct gen_nvm *gn = dev->mp; +	struct gennvm_area *area; + +	spin_lock(&dev->lock); +	list_for_each_entry(area, &gn->area_list, list) { +		if (area->begin != begin) +			continue; + +		list_del(&area->list); +		spin_unlock(&dev->lock); +		kfree(area); +		return; +	} +	spin_unlock(&dev->lock); +} +  static void gennvm_blocks_free(struct nvm_dev *dev)  {  	struct gen_nvm *gn = dev->mp; @@ -100,14 +162,13 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)  {  	struct nvm_dev *dev = private;  	struct gen_nvm *gn = dev->mp; -	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);  	u64 elba = slba + nlb;  	struct gen_lun *lun;  	struct nvm_block *blk;  	u64 i;  	int lun_id; -	if (unlikely(elba > dev->total_pages)) { +	if (unlikely(elba > dev->total_secs)) {  		pr_err("gennvm: L2P data from device is out of bounds!\n");  		return -EINVAL;  	} @@ -115,7 +176,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)  	for (i = 0; i < nlb; i++) {  		u64 pba = le64_to_cpu(entries[i]); -		if (unlikely(pba >= max_pages && pba != U64_MAX)) { +		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {  			pr_err("gennvm: L2P data entry is out of bounds!\n");  			return -EINVAL;  		} @@ -196,8 +257,8 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)  		}  	} -	if (dev->ops->get_l2p_tbl) { -		ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, +	if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) { +		ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,  							gennvm_block_map, dev);  		if (ret) {  			pr_err("gennvm: could not read L2P table.\n"); @@ -230,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev)  	gn->dev = dev;  	gn->nr_luns = dev->nr_luns; +	INIT_LIST_HEAD(&gn->area_list);  	dev->mp = gn;  	ret = gennvm_luns_init(dev, gn); @@ -420,10 +482,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,  	return nvm_erase_ppa(dev, &addr, 1);  } +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid) +{ +	return test_and_set_bit(lunid, dev->lun_map); +} + +static void gennvm_release_lun(struct nvm_dev *dev, int lunid) +{ +	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); +} +  static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)  {  	struct gen_nvm *gn = dev->mp; +	if (unlikely(lunid >= dev->nr_luns)) +		return NULL; +  	return &gn->luns[lunid].vlun;  } @@ -465,7 +540,13 @@ static struct nvmm_type gennvm = {  	.erase_blk		= gennvm_erase_blk,  	.get_lun		= gennvm_get_lun, +	.reserve_lun		= gennvm_reserve_lun, +	.release_lun		= gennvm_release_lun,  	.lun_info_print		= gennvm_lun_info_print, + +	.get_area		= gennvm_get_area, +	.put_area		= gennvm_put_area, +  };  static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index 9c24b5b32dac..04d7c23cfc61 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h @@ -39,8 +39,14 @@ struct gen_nvm {  	int nr_luns;  	struct gen_lun *luns; +	struct list_head area_list;  }; +struct gennvm_area { +	struct list_head list; +	sector_t begin; +	sector_t end;	/* end is excluded */ +};  #define gennvm_for_each_lun(bm, lun, i) \  		for ((i) = 0, lun = &(bm)->luns[0]; \  			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 307db1ea22de..3ab6495c3fd8 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)  	spin_lock(&rblk->lock); -	div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset); +	div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);  	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));  	rblk->nr_invalid_pages++; @@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)  static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)  { -	return (rblk->next_page == rrpc->dev->pgs_per_blk); +	return (rblk->next_page == rrpc->dev->sec_per_blk);  } +/* Calculate relative addr for the given block, considering instantiated LUNs */ +static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) +{ +	struct nvm_block *blk = rblk->parent; +	int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns); + +	return lun_blk * rrpc->dev->sec_per_blk; +} + +/* Calculate global addr for the given block */  static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)  {  	struct nvm_block *blk = rblk->parent; -	return blk->id * rrpc->dev->pgs_per_blk; +	return blk->id * rrpc->dev->sec_per_blk;  }  static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, @@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,  	l.g.sec = secs;  	sector_div(ppa, dev->sec_per_pg); -	div_u64_rem(ppa, dev->sec_per_blk, &pgs); +	div_u64_rem(ppa, dev->pgs_per_blk, &pgs);  	l.g.pg = pgs;  	sector_div(ppa, dev->pgs_per_blk); @@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,  		return NULL;  	} -	rblk = &rlun->blocks[blk->id]; +	rblk = rrpc_get_rblk(rlun, blk->id);  	list_add_tail(&rblk->list, &rlun->open_list);  	spin_unlock(&lun->lock);  	blk->priv = rblk; -	bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); +	bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);  	rblk->next_page = 0;  	rblk->nr_invalid_pages = 0;  	atomic_set(&rblk->data_cmnt_size, 0); @@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)  	struct bio *bio;  	struct page *page;  	int slot; -	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; +	int nr_sec_per_blk = rrpc->dev->sec_per_blk;  	u64 phys_addr;  	DECLARE_COMPLETION_ONSTACK(wait); -	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) +	if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))  		return 0;  	bio = bio_alloc(GFP_NOIO, 1); @@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)  	}  	while ((slot = find_first_zero_bit(rblk->invalid_pages, -					    nr_pgs_per_blk)) < nr_pgs_per_blk) { +					    nr_sec_per_blk)) < nr_sec_per_blk) {  		/* Lock laddr */ -		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot; +		phys_addr = rblk->parent->id * nr_sec_per_blk + slot;  try:  		spin_lock(&rrpc->rev_lock); @@ -381,7 +391,7 @@ finished:  	mempool_free(page, rrpc->page_pool);  	bio_put(bio); -	if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) { +	if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {  		pr_err("nvm: failed to garbage collect block\n");  		return -EIO;  	} @@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work)  	struct rrpc *rrpc = gcb->rrpc;  	struct rrpc_block *rblk = gcb->rblk;  	struct nvm_lun *lun = rblk->parent->lun; +	struct nvm_block *blk = rblk->parent;  	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];  	spin_lock(&rlun->lock);  	list_add_tail(&rblk->prio, &rlun->prio_list);  	spin_unlock(&rlun->lock); +	spin_lock(&lun->lock); +	lun->nr_open_blocks--; +	lun->nr_closed_blocks++; +	blk->state &= ~NVM_BLK_ST_OPEN; +	blk->state |= NVM_BLK_ST_CLOSED; +	list_move_tail(&rblk->list, &rlun->closed_list); +	spin_unlock(&lun->lock); +  	mempool_free(gcb, rrpc->gcb_pool);  	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",  							rblk->parent->id); @@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,  	struct rrpc_addr *gp;  	struct rrpc_rev_addr *rev; -	BUG_ON(laddr >= rrpc->nr_pages); +	BUG_ON(laddr >= rrpc->nr_sects);  	gp = &rrpc->trans_map[laddr];  	spin_lock(&rrpc->rev_lock); @@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,  		lun = rblk->parent->lun;  		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); -		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { -			struct nvm_block *blk = rblk->parent; -			struct rrpc_lun *rlun = rblk->rlun; - -			spin_lock(&lun->lock); -			lun->nr_open_blocks--; -			lun->nr_closed_blocks++; -			blk->state &= ~NVM_BLK_ST_OPEN; -			blk->state |= NVM_BLK_ST_CLOSED; -			list_move_tail(&rblk->list, &rlun->closed_list); -			spin_unlock(&lun->lock); - +		if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))  			rrpc_run_gc(rrpc, rblk); -		}  	}  } @@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,  	for (i = 0; i < npages; i++) {  		/* We assume that mapping occurs at 4KB granularity */ -		BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages)); +		BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));  		gp = &rrpc->trans_map[laddr + i];  		if (gp->rblk) { @@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,  	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))  		return NVM_IO_REQUEUE; -	BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages)); +	BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));  	gp = &rrpc->trans_map[laddr];  	if (gp->rblk) { @@ -958,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work)  static void rrpc_gc_free(struct rrpc *rrpc)  { -	struct rrpc_lun *rlun; -	int i; -  	if (rrpc->krqd_wq)  		destroy_workqueue(rrpc->krqd_wq);  	if (rrpc->kgc_wq)  		destroy_workqueue(rrpc->kgc_wq); - -	if (!rrpc->luns) -		return; - -	for (i = 0; i < rrpc->nr_luns; i++) { -		rlun = &rrpc->luns[i]; - -		if (!rlun->blocks) -			break; -		vfree(rlun->blocks); -	}  }  static int rrpc_gc_init(struct rrpc *rrpc) @@ -1007,21 +1000,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)  	struct nvm_dev *dev = rrpc->dev;  	struct rrpc_addr *addr = rrpc->trans_map + slba;  	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; -	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);  	u64 elba = slba + nlb;  	u64 i; -	if (unlikely(elba > dev->total_pages)) { +	if (unlikely(elba > dev->total_secs)) {  		pr_err("nvm: L2P data from device is out of bounds!\n");  		return -EINVAL;  	}  	for (i = 0; i < nlb; i++) {  		u64 pba = le64_to_cpu(entries[i]); +		unsigned int mod;  		/* LNVM treats address-spaces as silos, LBA and PBA are  		 * equally large and zero-indexed.  		 */ -		if (unlikely(pba >= max_pages && pba != U64_MAX)) { +		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {  			pr_err("nvm: L2P data entry is out of bounds!\n");  			return -EINVAL;  		} @@ -1033,8 +1026,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)  		if (!pba)  			continue; +		div_u64_rem(pba, rrpc->nr_sects, &mod); +  		addr[i].addr = pba; -		raddr[pba].addr = slba + i; +		raddr[mod].addr = slba + i;  	}  	return 0; @@ -1044,18 +1039,21 @@ static int rrpc_map_init(struct rrpc *rrpc)  {  	struct nvm_dev *dev = rrpc->dev;  	sector_t i; +	u64 slba;  	int ret; -	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages); +	slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9); + +	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);  	if (!rrpc->trans_map)  		return -ENOMEM;  	rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) -							* rrpc->nr_pages); +							* rrpc->nr_sects);  	if (!rrpc->rev_trans_map)  		return -ENOMEM; -	for (i = 0; i < rrpc->nr_pages; i++) { +	for (i = 0; i < rrpc->nr_sects; i++) {  		struct rrpc_addr *p = &rrpc->trans_map[i];  		struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; @@ -1067,8 +1065,8 @@ static int rrpc_map_init(struct rrpc *rrpc)  		return 0;  	/* Bring up the mapping table from device */ -	ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, -							rrpc_l2p_update, rrpc); +	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update, +									rrpc);  	if (ret) {  		pr_err("nvm: rrpc: could not read L2P table.\n");  		return -EINVAL; @@ -1077,7 +1075,6 @@ static int rrpc_map_init(struct rrpc *rrpc)  	return 0;  } -  /* Minimum pages needed within a lun */  #define PAGE_POOL_SIZE 16  #define ADDR_POOL_SIZE 64 @@ -1132,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc)  static void rrpc_luns_free(struct rrpc *rrpc)  { +	struct nvm_dev *dev = rrpc->dev; +	struct nvm_lun *lun; +	struct rrpc_lun *rlun; +	int i; + +	if (!rrpc->luns) +		return; + +	for (i = 0; i < rrpc->nr_luns; i++) { +		rlun = &rrpc->luns[i]; +		lun = rlun->parent; +		if (!lun) +			break; +		dev->mt->release_lun(dev, lun->id); +		vfree(rlun->blocks); +	} +  	kfree(rrpc->luns);  } @@ -1139,9 +1153,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)  {  	struct nvm_dev *dev = rrpc->dev;  	struct rrpc_lun *rlun; -	int i, j; +	int i, j, ret = -EINVAL; -	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { +	if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {  		pr_err("rrpc: number of pages per block too high.");  		return -EINVAL;  	} @@ -1155,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)  	/* 1:1 mapping */  	for (i = 0; i < rrpc->nr_luns; i++) { -		struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); +		int lunid = lun_begin + i; +		struct nvm_lun *lun; -		rlun = &rrpc->luns[i]; -		rlun->rrpc = rrpc; -		rlun->parent = lun; -		INIT_LIST_HEAD(&rlun->prio_list); -		INIT_LIST_HEAD(&rlun->open_list); -		INIT_LIST_HEAD(&rlun->closed_list); - -		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); -		spin_lock_init(&rlun->lock); +		if (dev->mt->reserve_lun(dev, lunid)) { +			pr_err("rrpc: lun %u is already allocated\n", lunid); +			goto err; +		} -		rrpc->total_blocks += dev->blks_per_lun; -		rrpc->nr_pages += dev->sec_per_lun; +		lun = dev->mt->get_lun(dev, lunid); +		if (!lun) +			goto err; +		rlun = &rrpc->luns[i]; +		rlun->parent = lun;  		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *  						rrpc->dev->blks_per_lun); -		if (!rlun->blocks) +		if (!rlun->blocks) { +			ret = -ENOMEM;  			goto err; +		}  		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {  			struct rrpc_block *rblk = &rlun->blocks[j]; @@ -1184,11 +1199,43 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)  			INIT_LIST_HEAD(&rblk->prio);  			spin_lock_init(&rblk->lock);  		} + +		rlun->rrpc = rrpc; +		INIT_LIST_HEAD(&rlun->prio_list); +		INIT_LIST_HEAD(&rlun->open_list); +		INIT_LIST_HEAD(&rlun->closed_list); + +		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); +		spin_lock_init(&rlun->lock); + +		rrpc->total_blocks += dev->blks_per_lun; +		rrpc->nr_sects += dev->sec_per_lun; +  	}  	return 0;  err: -	return -ENOMEM; +	return ret; +} + +/* returns 0 on success and stores the beginning address in *begin */ +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) +{ +	struct nvm_dev *dev = rrpc->dev; +	struct nvmm_type *mt = dev->mt; +	sector_t size = rrpc->nr_sects * dev->sec_size; + +	size >>= 9; + +	return mt->get_area(dev, begin, size); +} + +static void rrpc_area_free(struct rrpc *rrpc) +{ +	struct nvm_dev *dev = rrpc->dev; +	struct nvmm_type *mt = dev->mt; + +	mt->put_area(dev, rrpc->soffset);  }  static void rrpc_free(struct rrpc *rrpc) @@ -1197,6 +1244,7 @@ static void rrpc_free(struct rrpc *rrpc)  	rrpc_map_free(rrpc);  	rrpc_core_free(rrpc);  	rrpc_luns_free(rrpc); +	rrpc_area_free(rrpc);  	kfree(rrpc);  } @@ -1221,9 +1269,9 @@ static sector_t rrpc_capacity(void *private)  	/* cur, gc, and two emergency blocks for each lun */  	reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; -	provisioned = rrpc->nr_pages - reserved; +	provisioned = rrpc->nr_sects - reserved; -	if (reserved > rrpc->nr_pages) { +	if (reserved > rrpc->nr_sects) {  		pr_err("rrpc: not enough space available to expose storage.\n");  		return 0;  	} @@ -1242,10 +1290,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)  	struct nvm_dev *dev = rrpc->dev;  	int offset;  	struct rrpc_addr *laddr; -	u64 paddr, pladdr; +	u64 bpaddr, paddr, pladdr; -	for (offset = 0; offset < dev->pgs_per_blk; offset++) { -		paddr = block_to_addr(rrpc, rblk) + offset; +	bpaddr = block_to_rel_addr(rrpc, rblk); +	for (offset = 0; offset < dev->sec_per_blk; offset++) { +		paddr = bpaddr + offset;  		pladdr = rrpc->rev_trans_map[paddr].addr;  		if (pladdr == ADDR_EMPTY) @@ -1317,6 +1366,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,  	struct request_queue *bqueue = dev->q;  	struct request_queue *tqueue = tdisk->queue;  	struct rrpc *rrpc; +	sector_t soffset;  	int ret;  	if (!(dev->identity.dom & NVM_RSP_L2P)) { @@ -1342,6 +1392,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,  	/* simple round-robin strategy */  	atomic_set(&rrpc->next_lun, -1); +	ret = rrpc_area_init(rrpc, &soffset); +	if (ret < 0) { +		pr_err("nvm: rrpc: could not initialize area\n"); +		return ERR_PTR(ret); +	} +	rrpc->soffset = soffset; +  	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);  	if (ret) {  		pr_err("nvm: rrpc: could not initialize luns\n"); @@ -1386,7 +1443,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,  	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));  	pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", -			rrpc->nr_luns, (unsigned long long)rrpc->nr_pages); +			rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);  	mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index f7b37336353f..2653484a3b40 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -97,6 +97,7 @@ struct rrpc {  	struct nvm_dev *dev;  	struct gendisk *disk; +	sector_t soffset; /* logical sector offset */  	u64 poffset; /* physical page offset */  	int lun_offset; @@ -104,7 +105,7 @@ struct rrpc {  	struct rrpc_lun *luns;  	/* calculated values */ -	unsigned long long nr_pages; +	unsigned long long nr_sects;  	unsigned long total_blocks;  	/* Write strategy variables. Move these into each for structure for each @@ -156,6 +157,15 @@ struct rrpc_rev_addr {  	u64 addr;  }; +static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun, +								int blk_id) +{ +	struct rrpc *rrpc = rlun->rrpc; +	int lun_blk = blk_id % rrpc->dev->blks_per_lun; + +	return &rlun->blocks[lun_blk]; +} +  static inline sector_t rrpc_get_laddr(struct bio *bio)  {  	return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; @@ -206,7 +216,7 @@ static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,  				 unsigned pages,  				 struct rrpc_inflight_rq *r)  { -	BUG_ON((laddr + pages) > rrpc->nr_pages); +	BUG_ON((laddr + pages) > rrpc->nr_sects);  	return __rrpc_lock_laddr(rrpc, laddr, pages, r);  } @@ -243,7 +253,7 @@ static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)  	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);  	uint8_t pages = rqd->nr_pages; -	BUG_ON((r->l_start + pages) > rrpc->nr_pages); +	BUG_ON((r->l_start + pages) > rrpc->nr_sects);  	rrpc_unlock_laddr(rrpc, r);  } |