diff options
Diffstat (limited to 'Documentation/filesystems/locking.rst')
-rw-r--r-- | Documentation/filesystems/locking.rst | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index c26d854275a0..515bc48ab58b 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -237,20 +237,20 @@ address_space_operations prototypes:: int (*writepage)(struct page *page, struct writeback_control *wbc); - int (*readpage)(struct file *, struct page *); + int (*read_folio)(struct file *, struct folio *); int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); - int (*releasepage) (struct page *, int); - void (*freepage)(struct page *); + bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *); int (*direct_IO)(struct kiocb *, struct iov_iter *iter); bool (*isolate_page) (struct page *, isolate_mode_t); int (*migratepage)(struct address_space *, struct page *, struct page *); @@ -262,22 +262,22 @@ prototypes:: int (*swap_deactivate)(struct file *); locking rules: - All except dirty_folio and freepage may block + All except dirty_folio and free_folio may block ====================== ======================== ========= =============== -ops PageLocked(page) i_rwsem invalidate_lock +ops folio locked i_rwsem invalidate_lock ====================== ======================== ========= =============== writepage: yes, unlocks (see below) -readpage: yes, unlocks shared +read_folio: yes, unlocks shared writepages: -dirty_folio maybe +dirty_folio: maybe readahead: yes, unlocks shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: invalidate_folio: yes exclusive -releasepage: yes -freepage: yes +release_folio: yes +free_folio: yes direct_IO: isolate_page: yes migratepage: yes (both) @@ -289,13 +289,13 @@ swap_activate: no swap_deactivate: no ====================== ======================== ========= =============== -->write_begin(), ->write_end() and ->readpage() may be called from +->write_begin(), ->write_end() and ->read_folio() may be called from the request handler (/dev/loop). -->readpage() unlocks the page, either synchronously or via I/O +->read_folio() unlocks the folio, either synchronously or via I/O completion. -->readahead() unlocks the pages that I/O is attempted on like ->readpage(). +->readahead() unlocks the folios that I/O is attempted on like ->read_folio(). ->writepage() is used for two purposes: for "memory cleansing" and for "sync". These are quite different operations and the behaviour may differ @@ -372,12 +372,12 @@ invalidate_lock before invalidating page cache in truncate / hole punch path (and thus calling into ->invalidate_folio) to block races between page cache invalidation and page cache filling functions (fault, read, ...). -->releasepage() is called when the kernel is about to try to drop the -buffers from the page in preparation for freeing it. It returns zero to -indicate that the buffers are (or may be) freeable. If ->releasepage is zero, -the kernel assumes that the fs has no private interest in the buffers. +->release_folio() is called when the kernel is about to try to drop the +buffers from the folio in preparation for freeing it. It returns false to +indicate that the buffers are (or may be) freeable. If ->release_folio is +NULL, the kernel assumes that the fs has no private interest in the buffers. -->freepage() is called when the kernel is done dropping the page +->free_folio() is called when the kernel has dropped the folio from the page cache. ->launder_folio() may be called prior to releasing a folio if |