aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Hubbard <[email protected]>2022-03-22 14:39:46 -0700
committerLinus Torvalds <[email protected]>2022-03-22 15:57:01 -0700
commitf728b9c48d567b3f79b5fba8b993dd4f3e36bab2 (patch)
tree94858b717f6c0859951f54ccd0480b6f6478cb50
parentad6c441266dcd50be080a47e1178a1b15369923c (diff)
mm: change lookup_node() to use get_user_pages_fast()
The purpose of calling get_user_pages_locked() from lookup_node() was to allow for unlocking the mmap_lock when reading a page from the disk during a page fault (hidden behind VM_FAULT_RETRY). The idea was to reduce contention on the heavily-used mmap_lock. (Thanks to Jan Kara for clearly pointing that out, and in fact I've used some of his wording here.) However, it is unlikely for lookup_node() to take a page fault. With that in mind, change over to calling get_user_pages_fast(). This simplifies the code, runs a little faster in the expected case, and allows removing get_user_pages_locked() entirely, in a subsequent patch. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: John Hubbard <[email protected]> Reviewed-by: Jan Kara <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Claudio Imbrenda <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Cc: Alex Williamson <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Lukas Bulwahn <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/mempolicy.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 69284d3b5e53..340c22c62193 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -907,17 +907,14 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
static int lookup_node(struct mm_struct *mm, unsigned long addr)
{
struct page *p = NULL;
- int err;
+ int ret;
- int locked = 1;
- err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
- if (err > 0) {
- err = page_to_nid(p);
+ ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
+ if (ret > 0) {
+ ret = page_to_nid(p);
put_page(p);
}
- if (locked)
- mmap_read_unlock(mm);
- return err;
+ return ret;
}
/* Retrieve NUMA policy */
@@ -968,14 +965,14 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
/*
- * Take a refcount on the mpol, lookup_node()
- * will drop the mmap_lock, so after calling
- * lookup_node() only "pol" remains valid, "vma"
- * is stale.
+ * Take a refcount on the mpol, because we are about to
+ * drop the mmap_lock, after which only "pol" remains
+ * valid, "vma" is stale.
*/
pol_refcount = pol;
vma = NULL;
mpol_get(pol);
+ mmap_read_unlock(mm);
err = lookup_node(mm, addr);
if (err < 0)
goto out;