diff options
Diffstat (limited to 'fs/dax.c')
| -rw-r--r-- | fs/dax.c | 157 | 
1 files changed, 90 insertions, 67 deletions
| @@ -709,26 +709,26 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,  	return __dax_invalidate_entry(mapping, index, false);  } -static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, -			     sector_t sector, struct page *to, unsigned long vaddr) +static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)  { +	return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); +} + +static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) +{ +	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);  	void *vto, *kaddr; -	pgoff_t pgoff;  	long rc;  	int id; -	rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); -	if (rc) -		return rc; -  	id = dax_read_lock(); -	rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); +	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);  	if (rc < 0) {  		dax_read_unlock(id);  		return rc;  	} -	vto = kmap_atomic(to); -	copy_user_page(vto, (void __force *)kaddr, vaddr, to); +	vto = kmap_atomic(vmf->cow_page); +	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);  	kunmap_atomic(vto);  	dax_read_unlock(id);  	return 0; @@ -1005,22 +1005,13 @@ int dax_writeback_mapping_range(struct address_space *mapping,  }  EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); -static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos) -{ -	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; -} -  static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,  			 pfn_t *pfnp)  { -	const sector_t sector = dax_iomap_sector(iomap, pos); -	pgoff_t pgoff; +	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);  	int id, rc;  	long length; -	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); -	if (rc) -		return rc;  	id = dax_read_lock();  	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),  				   NULL, pfnp); @@ -1126,42 +1117,87 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,  }  #endif /* CONFIG_FS_DAX_PMD */ -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) +static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, +		unsigned int offset, size_t size)  { -	sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); -	pgoff_t pgoff; -	long rc, id;  	void *kaddr; -	bool page_aligned = false; -	unsigned offset = offset_in_page(pos); -	unsigned size = min_t(u64, PAGE_SIZE - offset, length); +	long ret; -	if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) && -	    (size == PAGE_SIZE)) -		page_aligned = true; +	ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); +	if (ret > 0) { +		memset(kaddr + offset, 0, size); +		dax_flush(dax_dev, kaddr + offset, size); +	} +	return ret; +} -	rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); -	if (rc) -		return rc; +static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) +{ +	const struct iomap *iomap = &iter->iomap; +	const struct iomap *srcmap = iomap_iter_srcmap(iter); +	loff_t pos = iter->pos; +	u64 length = iomap_length(iter); +	s64 written = 0; + +	/* already zeroed?  we're done. */ +	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) +		return length; + +	do { +		unsigned offset = offset_in_page(pos); +		unsigned size = min_t(u64, PAGE_SIZE - offset, length); +		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); +		long rc; +		int id; + +		id = dax_read_lock(); +		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) +			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); +		else +			rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); +		dax_read_unlock(id); -	id = dax_read_lock(); +		if (rc < 0) +			return rc; +		pos += size; +		length -= size; +		written += size; +		if (did_zero) +			*did_zero = true; +	} while (length > 0); -	if (page_aligned) -		rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); -	else -		rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); -	if (rc < 0) { -		dax_read_unlock(id); -		return rc; -	} +	return written; +} -	if (!page_aligned) { -		memset(kaddr + offset, 0, size); -		dax_flush(iomap->dax_dev, kaddr + offset, size); -	} -	dax_read_unlock(id); -	return size; +int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, +		const struct iomap_ops *ops) +{ +	struct iomap_iter iter = { +		.inode		= inode, +		.pos		= pos, +		.len		= len, +		.flags		= IOMAP_DAX | IOMAP_ZERO, +	}; +	int ret; + +	while ((ret = iomap_iter(&iter, ops)) > 0) +		iter.processed = dax_zero_iter(&iter, did_zero); +	return ret;  } +EXPORT_SYMBOL_GPL(dax_zero_range); + +int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, +		const struct iomap_ops *ops) +{ +	unsigned int blocksize = i_blocksize(inode); +	unsigned int off = pos & (blocksize - 1); + +	/* Block boundary? Nothing to do */ +	if (!off) +		return 0; +	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); +} +EXPORT_SYMBOL_GPL(dax_truncate_page);  static loff_t dax_iomap_iter(const struct iomap_iter *iomi,  		struct iov_iter *iter) @@ -1169,7 +1205,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,  	const struct iomap *iomap = &iomi->iomap;  	loff_t length = iomap_length(iomi);  	loff_t pos = iomi->pos; -	struct block_device *bdev = iomap->bdev;  	struct dax_device *dax_dev = iomap->dax_dev;  	loff_t end = pos + length, done = 0;  	ssize_t ret = 0; @@ -1203,9 +1238,8 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,  	while (pos < end) {  		unsigned offset = pos & (PAGE_SIZE - 1);  		const size_t size = ALIGN(length + offset, PAGE_SIZE); -		const sector_t sector = dax_iomap_sector(iomap, pos); +		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);  		ssize_t map_len; -		pgoff_t pgoff;  		void *kaddr;  		if (fatal_signal_pending(current)) { @@ -1213,10 +1247,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,  			break;  		} -		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); -		if (ret) -			break; -  		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),  				&kaddr, NULL);  		if (map_len < 0) { @@ -1230,11 +1260,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,  		if (map_len > end - pos)  			map_len = end - pos; -		/* -		 * The userspace address for the memory copy has already been -		 * validated via access_ok() in either vfs_read() or -		 * vfs_write(), depending on which operation we are doing. -		 */  		if (iov_iter_rw(iter) == WRITE)  			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,  					map_len, iter); @@ -1274,6 +1299,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,  		.inode		= iocb->ki_filp->f_mapping->host,  		.pos		= iocb->ki_pos,  		.len		= iov_iter_count(iter), +		.flags		= IOMAP_DAX,  	};  	loff_t done = 0;  	int ret; @@ -1332,19 +1358,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)  static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,  		const struct iomap_iter *iter)  { -	sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos); -	unsigned long vaddr = vmf->address;  	vm_fault_t ret;  	int error = 0;  	switch (iter->iomap.type) {  	case IOMAP_HOLE:  	case IOMAP_UNWRITTEN: -		clear_user_highpage(vmf->cow_page, vaddr); +		clear_user_highpage(vmf->cow_page, vmf->address);  		break;  	case IOMAP_MAPPED: -		error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev, -					  sector, vmf->cow_page, vaddr); +		error = copy_cow_page_dax(vmf, iter);  		break;  	default:  		WARN_ON_ONCE(1); @@ -1430,7 +1453,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,  		.inode		= mapping->host,  		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,  		.len		= PAGE_SIZE, -		.flags		= IOMAP_FAULT, +		.flags		= IOMAP_DAX | IOMAP_FAULT,  	};  	vm_fault_t ret = 0;  	void *entry; @@ -1539,7 +1562,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,  	struct iomap_iter iter = {  		.inode		= mapping->host,  		.len		= PMD_SIZE, -		.flags		= IOMAP_FAULT, +		.flags		= IOMAP_DAX | IOMAP_FAULT,  	};  	vm_fault_t ret = VM_FAULT_FALLBACK;  	pgoff_t max_pgoff; |