diff options
Diffstat (limited to 'fs/dax.c')
| -rw-r--r-- | fs/dax.c | 38 | 
1 files changed, 19 insertions, 19 deletions
@@ -244,7 +244,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,   * @iocb: The control block for this I/O   * @inode: The file which the I/O is directed at   * @iter: The addresses to do I/O from or to - * @pos: The file offset where the I/O starts   * @get_block: The filesystem method used to translate file offsets to blocks   * @end_io: A filesystem callback for I/O completion   * @flags: See below @@ -257,11 +256,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,   * is in progress.   */  ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, -		  struct iov_iter *iter, loff_t pos, get_block_t get_block, +		  struct iov_iter *iter, get_block_t get_block,  		  dio_iodone_t end_io, int flags)  {  	struct buffer_head bh;  	ssize_t retval = -EINVAL; +	loff_t pos = iocb->ki_pos;  	loff_t end = pos + iov_iter_count(iter);  	memset(&bh, 0, sizeof(bh)); @@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,  	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;  	if (vmf->pgoff >= size) {  		unlock_page(page); -		page_cache_release(page); +		put_page(page);  		return VM_FAULT_SIGBUS;  	} @@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,  }  #define NO_SECTOR -1 -#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) +#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))  static int dax_radix_entry(struct address_space *mapping, pgoff_t index,  		sector_t sector, bool pmd_entry, bool dirty) @@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,  	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)  		return 0; -	start_index = wbc->range_start >> PAGE_CACHE_SHIFT; -	end_index = wbc->range_end >> PAGE_CACHE_SHIFT; +	start_index = wbc->range_start >> PAGE_SHIFT; +	end_index = wbc->range_end >> PAGE_SHIFT;  	pmd_index = DAX_PMD_INDEX(start_index);  	rcu_read_lock(); @@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,  	page = find_get_page(mapping, vmf->pgoff);  	if (page) {  		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { -			page_cache_release(page); +			put_page(page);  			return VM_FAULT_RETRY;  		}  		if (unlikely(page->mapping != mapping)) {  			unlock_page(page); -			page_cache_release(page); +			put_page(page);  			goto repeat;  		}  		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,  	if (page) {  		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, -							PAGE_CACHE_SIZE, 0); +							PAGE_SIZE, 0);  		delete_from_page_cache(page);  		unlock_page(page); -		page_cache_release(page); +		put_page(page);  		page = NULL;  	} @@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,   unlock_page:  	if (page) {  		unlock_page(page); -		page_cache_release(page); +		put_page(page);  	}  	goto out;  } @@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);   * you are truncating a file, the helper function dax_truncate_page() may be   * more convenient.   * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with   * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem   * took care of disposing of the unnecessary blocks.  Even if the filesystem   * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,  							get_block_t get_block)  {  	struct buffer_head bh; -	pgoff_t index = from >> PAGE_CACHE_SHIFT; -	unsigned offset = from & (PAGE_CACHE_SIZE-1); +	pgoff_t index = from >> PAGE_SHIFT; +	unsigned offset = from & (PAGE_SIZE-1);  	int err;  	/* Block boundary? Nothing to do */  	if (!length)  		return 0; -	BUG_ON((offset + length) > PAGE_CACHE_SIZE); +	BUG_ON((offset + length) > PAGE_SIZE);  	memset(&bh, 0, sizeof(bh));  	bh.b_bdev = inode->i_sb->s_bdev; -	bh.b_size = PAGE_CACHE_SIZE; +	bh.b_size = PAGE_SIZE;  	err = get_block(inode, index, &bh, 0);  	if (err < 0)  		return err; @@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,  		struct block_device *bdev = bh.b_bdev;  		struct blk_dax_ctl dax = {  			.sector = to_sector(&bh, inode), -			.size = PAGE_CACHE_SIZE, +			.size = PAGE_SIZE,  		};  		if (dax_map_atomic(bdev, &dax) < 0) @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);   * Similar to block_truncate_page(), this function can be called by a   * filesystem when it is truncating a DAX file to handle the partial page.   * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with   * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem   * took care of disposing of the unnecessary blocks.  Even if the filesystem   * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);   */  int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)  { -	unsigned length = PAGE_CACHE_ALIGN(from) - from; +	unsigned length = PAGE_ALIGN(from) - from;  	return dax_zero_page_range(inode, from, length, get_block);  }  EXPORT_SYMBOL_GPL(dax_truncate_page);  |