aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAneesh Kumar K.V <[email protected]>2021-07-07 18:10:18 -0700
committerLinus Torvalds <[email protected]>2021-07-08 11:48:23 -0700
commit3bbda69c48d27474a9e6a90cf4680b295a7efa46 (patch)
treee7c9acf3a396ef49be10a2385b38e232824e318c
parent97113eb39fa7972722ff490b947d8af023e1f6a2 (diff)
mm/mremap: allow arch runtime override
Patch series "Speedup mremap on ppc64", v8. This patchset enables MOVE_PMD/MOVE_PUD support on power. This requires the platform to support updating higher-level page tables without updating page table entries. This also needs to invalidate the Page Walk Cache on architecture supporting the same. This patch (of 3): Architectures like ppc64 support faster mremap only with radix translation. Hence allow a runtime check w.r.t support for fast mremap. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Aneesh Kumar K.V <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Kalesh Singh <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Joel Fernandes <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: "Aneesh Kumar K . V" <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Stephen Rothwell <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--arch/powerpc/include/asm/tlb.h6
-rw-r--r--mm/mremap.c15
2 files changed, 20 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 160422a439aa..09a9ae5f3656 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -83,5 +83,11 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
}
#endif
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return radix_enabled();
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/mm/mremap.c b/mm/mremap.c
index 52369139f1b9..5989d3990020 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,7 +25,7 @@
#include <linux/userfaultfd_k.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
+#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
@@ -210,6 +210,15 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
drop_rmap_locks(vma);
}
+#ifndef arch_supports_page_table_move
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
+ IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
+}
+#endif
+
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
@@ -218,6 +227,8 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
struct mm_struct *mm = vma->vm_mm;
pmd_t pmd;
+ if (!arch_supports_page_table_move())
+ return false;
/*
* The destination pmd shouldn't be established, free_pgtables()
* should have released it.
@@ -284,6 +295,8 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
+ if (!arch_supports_page_table_move())
+ return false;
/*
* The destination pud shouldn't be established, free_pgtables()
* should have released it.