aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm/highmem_32.c
diff options
context:
space:
mode:
authorIra Weiny <[email protected]>2020-06-04 16:47:42 -0700
committerLinus Torvalds <[email protected]>2020-06-04 19:06:22 -0700
commit78b6d91ec7bbfc5bcc2dd05bb2cf13c9de1dc7cd (patch)
tree63a7267f0fd2bac5dabc6cfd189cbc9cbd820f6c /arch/x86/mm/highmem_32.c
parentee9bc5fdf5b6d24875fc55d43d5a0728bc2add21 (diff)
arch/kmap_atomic: consolidate duplicate code
Every arch has the same code to ensure atomic operations and a check for !HIGHMEM page. Remove the duplicate code by defining a core kmap_atomic() which only calls the arch specific kmap_atomic_high() when the page is high memory. [[email protected]: coding style fixes] Signed-off-by: Ira Weiny <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Cc: Al Viro <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Christian König <[email protected]> Cc: Chris Zankel <[email protected]> Cc: Daniel Vetter <[email protected]> Cc: Dan Williams <[email protected]> Cc: Dave Hansen <[email protected]> Cc: "David S. Miller" <[email protected]> Cc: Helge Deller <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "James E.J. Bottomley" <[email protected]> Cc: Max Filippov <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
Diffstat (limited to 'arch/x86/mm/highmem_32.c')
-rw-r--r--arch/x86/mm/highmem_32.c14
1 files changed, 0 insertions, 14 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 48b56b1af902..c3e272a759e0 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -4,14 +4,6 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h>
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap it is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
@@ -28,12 +20,6 @@ void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
-void *kmap_atomic(struct page *page)
-{
- return kmap_atomic_prot(page, kmap_prot);
-}
-EXPORT_SYMBOL(kmap_atomic);
-
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.