diff options
Diffstat (limited to 'mm/filemap.c')
| -rw-r--r-- | mm/filemap.c | 23 | 
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index a8251a8d3457..867d40222ec7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -78,10 +78,7 @@   *  ->i_mutex			(generic_file_buffered_write)   *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)   * - *  ->i_mutex - *    ->i_alloc_sem             (various) - * - *  inode_wb_list_lock + *  bdi->wb.list_lock   *    sb_lock			(fs/fs-writeback.c)   *    ->mapping->tree_lock	(__sync_single_inode)   * @@ -99,9 +96,9 @@   *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)   *    ->private_lock		(page_remove_rmap->set_page_dirty)   *    ->tree_lock		(page_remove_rmap->set_page_dirty) - *    inode_wb_list_lock	(page_remove_rmap->set_page_dirty) + *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)   *    ->inode->i_lock		(page_remove_rmap->set_page_dirty) - *    inode_wb_list_lock	(zap_pte_range->set_page_dirty) + *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)   *    ->inode->i_lock		(zap_pte_range->set_page_dirty)   *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)   * @@ -131,6 +128,7 @@ void __delete_from_page_cache(struct page *page)  	radix_tree_delete(&mapping->page_tree, page->index);  	page->mapping = NULL; +	/* Leave page->index set: truncation lookup relies upon it */  	mapping->nrpages--;  	__dec_zone_page_state(page, NR_FILE_PAGES);  	if (PageSwapBacked(page)) @@ -486,6 +484,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,  			spin_unlock_irq(&mapping->tree_lock);  		} else {  			page->mapping = NULL; +			/* Leave page->index set: truncation relies upon it */  			spin_unlock_irq(&mapping->tree_lock);  			mem_cgroup_uncharge_cache_page(page);  			page_cache_release(page); @@ -1795,7 +1794,7 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);  static struct page *__read_cache_page(struct address_space *mapping,  				pgoff_t index, -				int (*filler)(void *,struct page*), +				int (*filler)(void *, struct page *),  				void *data,  				gfp_t gfp)  { @@ -1826,7 +1825,7 @@ repeat:  static struct page *do_read_cache_page(struct address_space *mapping,  				pgoff_t index, -				int (*filler)(void *,struct page*), +				int (*filler)(void *, struct page *),  				void *data,  				gfp_t gfp) @@ -1866,7 +1865,7 @@ out:   * @mapping:	the page's address_space   * @index:	the page index   * @filler:	function to perform the read - * @data:	destination for read data + * @data:	first arg to filler(data, page) function, often left as NULL   *   * Same as read_cache_page, but don't wait for page to become unlocked   * after submitting it to the filler. @@ -1878,7 +1877,7 @@ out:   */  struct page *read_cache_page_async(struct address_space *mapping,  				pgoff_t index, -				int (*filler)(void *,struct page*), +				int (*filler)(void *, struct page *),  				void *data)  {  	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); @@ -1926,7 +1925,7 @@ EXPORT_SYMBOL(read_cache_page_gfp);   * @mapping:	the page's address_space   * @index:	the page index   * @filler:	function to perform the read - * @data:	destination for read data + * @data:	first arg to filler(data, page) function, often left as NULL   *   * Read into the page cache. If a page already exists, and PageUptodate() is   * not set, try to fill the page then wait for it to become unlocked. @@ -1935,7 +1934,7 @@ EXPORT_SYMBOL(read_cache_page_gfp);   */  struct page *read_cache_page(struct address_space *mapping,  				pgoff_t index, -				int (*filler)(void *,struct page*), +				int (*filler)(void *, struct page *),  				void *data)  {  	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));  |