diff options
author | Ingo Molnar <mingo@kernel.org> | 2024-01-26 10:26:50 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2024-01-26 10:26:50 +0100 |
commit | 42ac0be18bfa09c03f52244f7c3e15c89b38532f (patch) | |
tree | 601fd5e6da2ca0b77aa8cb22f0e81a29ba6fe1d6 /mm/filemap.c | |
parent | 8e5647a723c49d73b9f108a8bb38e8c29d3948ea (diff) | |
parent | ecb1b8288dc7ccbdcb3b9df005fa1c0e0c0388a7 (diff) |
Merge branch 'linus' into x86/mm, to refresh the branch and pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 32eedf3afd45..750e779c23db 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -45,6 +45,7 @@ #include <linux/migrate.h> #include <linux/pipe_fs_i.h> #include <linux/splice.h> +#include <linux/rcupdate_wait.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include "internal.h" @@ -113,11 +114,11 @@ * ->i_pages lock (try_to_unmap_one) * ->lruvec->lru_lock (follow_page->mark_page_accessed) * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) - * ->private_lock (page_remove_rmap->set_page_dirty) - * ->i_pages lock (page_remove_rmap->set_page_dirty) - * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) - * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) + * ->private_lock (folio_remove_rmap_pte->set_page_dirty) + * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) + * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) + * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) + * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) @@ -1623,7 +1624,7 @@ EXPORT_SYMBOL_GPL(__folio_lock_killable); static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) { struct wait_queue_head *q = folio_waitqueue(folio); - int ret = 0; + int ret; wait->folio = folio; wait->bit_nr = PG_locked; @@ -2173,7 +2174,7 @@ update_start: if (nr) { folio = fbatch->folios[nr - 1]; - *start = folio->index + folio_nr_pages(folio); + *start = folio_next_index(folio); } out: rcu_read_unlock(); @@ -2608,6 +2609,15 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); /* + * Pairs with a barrier in + * block_write_end()->mark_buffer_dirty() or other page + * dirtying routines like iomap_write_end() to ensure + * changes to page contents are visible before we see + * increased inode size. + */ + smp_rmb(); + + /* * Once we start copying data, we don't want to be touching any * cachelines that might be contended: */ @@ -2678,6 +2688,7 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count) return filemap_write_and_wait_range(mapping, pos, end); } +EXPORT_SYMBOL_GPL(kiocb_write_and_wait); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) { @@ -2705,6 +2716,7 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT); } +EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); /** * generic_file_read_iter - generic filesystem read routine @@ -3371,7 +3383,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, } } - if (pmd_none(*vmf->pmd)) + if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); return false; |