diff options
Diffstat (limited to 'fs/ntfs/aops.c')
| -rw-r--r-- | fs/ntfs/aops.c | 50 | 
1 files changed, 25 insertions, 25 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 7521e11db728..97768a1379f2 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)  		set_buffer_uptodate(bh); -		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + +		file_ofs = ((s64)page->index << PAGE_SHIFT) +  				bh_offset(bh);  		read_lock_irqsave(&ni->size_lock, flags);  		init_size = ni->initialized_size; @@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)  		u32 rec_size;  		rec_size = ni->itype.index.block_size; -		recs = PAGE_CACHE_SIZE / rec_size; +		recs = PAGE_SIZE / rec_size;  		/* Should have been verified before we got here... */  		BUG_ON(!recs);  		local_irq_save(flags); @@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)  	 * fully truncated, truncate will throw it away as soon as we unlock  	 * it so no need to worry what we do with it.  	 */ -	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); +	iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);  	read_lock_irqsave(&ni->size_lock, flags);  	lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;  	init_size = ni->initialized_size; @@ -412,9 +412,9 @@ retry_readpage:  	vi = page->mapping->host;  	i_size = i_size_read(vi);  	/* Is the page fully outside i_size? (truncate in progress) */ -	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> -			PAGE_CACHE_SHIFT)) { -		zero_user(page, 0, PAGE_CACHE_SIZE); +	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> +			PAGE_SHIFT)) { +		zero_user(page, 0, PAGE_SIZE);  		ntfs_debug("Read outside i_size - truncated?");  		goto done;  	} @@ -463,7 +463,7 @@ retry_readpage:  	 * ok to ignore the compressed flag here.  	 */  	if (unlikely(page->index > 0)) { -		zero_user(page, 0, PAGE_CACHE_SIZE); +		zero_user(page, 0, PAGE_SIZE);  		goto done;  	}  	if (!NInoAttr(ni)) @@ -509,7 +509,7 @@ retry_readpage:  			le16_to_cpu(ctx->attr->data.resident.value_offset),  			attr_len);  	/* Zero the remainder of the page. */ -	memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); +	memset(addr + attr_len, 0, PAGE_SIZE - attr_len);  	flush_dcache_page(page);  	kunmap_atomic(addr);  put_unm_err_out: @@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)  	/* NOTE: Different naming scheme to ntfs_read_block()! */  	/* The first block in the page. */ -	block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); +	block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);  	read_lock_irqsave(&ni->size_lock, flags);  	i_size = i_size_read(vi); @@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)  				// in the inode.  				// Again, for each page do:  				//	__set_page_dirty_buffers(); -				// page_cache_release() +				// put_page()  				// We don't need to wait on the writes.  				// Update iblock.  			} @@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,  	ntfs_volume *vol = ni->vol;  	u8 *kaddr;  	unsigned int rec_size = ni->itype.index.block_size; -	ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; +	ntfs_inode *locked_nis[PAGE_SIZE / rec_size];  	struct buffer_head *bh, *head, *tbh, *rec_start_bh;  	struct buffer_head *bhs[MAX_BUF_PER_PAGE];  	runlist_element *rl; @@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,  			(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));  	bh_size = vol->sb->s_blocksize;  	bh_size_bits = vol->sb->s_blocksize_bits; -	max_bhs = PAGE_CACHE_SIZE / bh_size; +	max_bhs = PAGE_SIZE / bh_size;  	BUG_ON(!max_bhs);  	BUG_ON(max_bhs > MAX_BUF_PER_PAGE); @@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,  	BUG_ON(!bh);  	rec_size_bits = ni->itype.index.block_size_bits; -	BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); +	BUG_ON(!(PAGE_SIZE >> rec_size_bits));  	bhs_per_rec = rec_size >> bh_size_bits;  	BUG_ON(!bhs_per_rec);  	/* The first block in the page. */  	rec_block = block = (sector_t)page->index << -			(PAGE_CACHE_SHIFT - bh_size_bits); +			(PAGE_SHIFT - bh_size_bits);  	/* The first out of bounds block for the data size. */  	dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; @@ -1133,7 +1133,7 @@ lock_retry_remap:  			unsigned long mft_no;  			/* Get the mft record number. */ -			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) +			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)  					>> rec_size_bits;  			/* Check whether to write this mft record. */  			tni = NULL; @@ -1249,7 +1249,7 @@ do_mirror:  				continue;  			ofs = bh_offset(tbh);  			/* Get the mft record number. */ -			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) +			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)  					>> rec_size_bits;  			if (mft_no < vol->mftmirr_size)  				ntfs_sync_mft_mirror(vol, mft_no, @@ -1300,7 +1300,7 @@ done:  		 * Set page error if there is only one ntfs record in the page.  		 * Otherwise we would loose per-record granularity.  		 */ -		if (ni->itype.index.block_size == PAGE_CACHE_SIZE) +		if (ni->itype.index.block_size == PAGE_SIZE)  			SetPageError(page);  		NVolSetErrors(vol);  	} @@ -1308,7 +1308,7 @@ done:  		ntfs_debug("Page still contains one or more dirty ntfs "  				"records.  Redirtying the page starting at "  				"record 0x%lx.", page->index << -				(PAGE_CACHE_SHIFT - rec_size_bits)); +				(PAGE_SHIFT - rec_size_bits));  		redirty_page_for_writepage(wbc, page);  		unlock_page(page);  	} else { @@ -1365,13 +1365,13 @@ retry_writepage:  	BUG_ON(!PageLocked(page));  	i_size = i_size_read(vi);  	/* Is the page fully outside i_size? (truncate in progress) */ -	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> -			PAGE_CACHE_SHIFT)) { +	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> +			PAGE_SHIFT)) {  		/*  		 * The page may have dirty, unmapped buffers.  Make them  		 * freeable here, so the page does not leak.  		 */ -		block_invalidatepage(page, 0, PAGE_CACHE_SIZE); +		block_invalidatepage(page, 0, PAGE_SIZE);  		unlock_page(page);  		ntfs_debug("Write outside i_size - truncated?");  		return 0; @@ -1414,10 +1414,10 @@ retry_writepage:  	/* NInoNonResident() == NInoIndexAllocPresent() */  	if (NInoNonResident(ni)) {  		/* We have to zero every time due to mmap-at-end-of-file. */ -		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { +		if (page->index >= (i_size >> PAGE_SHIFT)) {  			/* The page straddles i_size. */ -			unsigned int ofs = i_size & ~PAGE_CACHE_MASK; -			zero_user_segment(page, ofs, PAGE_CACHE_SIZE); +			unsigned int ofs = i_size & ~PAGE_MASK; +			zero_user_segment(page, ofs, PAGE_SIZE);  		}  		/* Handle mst protected attributes. */  		if (NInoMstProtected(ni)) @@ -1500,7 +1500,7 @@ retry_writepage:  			le16_to_cpu(ctx->attr->data.resident.value_offset),  			addr, attr_len);  	/* Zero out of bounds area in the page cache page. */ -	memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); +	memset(addr + attr_len, 0, PAGE_SIZE - attr_len);  	kunmap_atomic(addr);  	flush_dcache_page(page);  	flush_dcache_mft_record_page(ctx->ntfs_ino);  |