diff options
author | Tom Lendacky <[email protected]> | 2017-07-17 16:10:30 -0500 |
---|---|---|
committer | Ingo Molnar <[email protected]> | 2017-07-18 11:38:05 +0200 |
commit | 8458bf94b0399cd1bca6c437366bcafb29c230c5 (patch) | |
tree | 2a4f11621971ca1e9402bdcf2bf2b3cad33a16bb | |
parent | f2f931c6819467af5260a21c59fb787ce2863f92 (diff) |
x86/mm: Use proper encryption attributes with /dev/mem
When accessing memory using /dev/mem (or /dev/kmem) use the proper
encryption attributes when mapping the memory.
To insure the proper attributes are applied when reading or writing
/dev/mem, update the xlate_dev_mem_ptr() function to use memremap()
which will essentially perform the same steps of applying __va for
RAM or using ioremap() if not RAM.
To insure the proper attributes are applied when mmapping /dev/mem,
update the phys_mem_access_prot() to call phys_mem_access_encrypted(),
a new function which will check if the memory should be mapped encrypted
or not. If it is not to be mapped encrypted then the VMA protection
value is updated to remove the encryption bit.
Signed-off-by: Tom Lendacky <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brijesh Singh <[email protected]>
Cc: Dave Young <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Konrad Rzeszutek Wilk <[email protected]>
Cc: Larry Woodman <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Matt Fleming <[email protected]>
Cc: Michael S. Tsirkin <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Toshimitsu Kani <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/c917f403ab9f61cbfd455ad6425ed8429a5e7b54.1500319216.git.thomas.lendacky@amd.com
Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r-- | arch/x86/include/asm/io.h | 3 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 3 |
3 files changed, 15 insertions, 9 deletions
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 09c5557b1454..e080a39b2108 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -386,4 +386,7 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset, unsigned long flags); #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap +extern bool phys_mem_access_encrypted(unsigned long phys_addr, + unsigned long size); + #endif /* _ASM_X86_IO_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 704fc081c104..34f0e1847dd6 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -400,12 +400,10 @@ void *xlate_dev_mem_ptr(phys_addr_t phys) unsigned long offset = phys & ~PAGE_MASK; void *vaddr; - /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ - if (page_is_ram(start >> PAGE_SHIFT)) - return __va(phys); + /* memremap() maps if RAM, otherwise falls back to ioremap() */ + vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); - vaddr = ioremap_cache(start, PAGE_SIZE); - /* Only add the offset on success and return NULL if the ioremap() failed: */ + /* Only add the offset on success and return NULL if memremap() failed */ if (vaddr) vaddr += offset; @@ -414,10 +412,7 @@ void *xlate_dev_mem_ptr(phys_addr_t phys) void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { - if (page_is_ram(phys >> PAGE_SHIFT)) - return; - - iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); + memunmap((void *)((unsigned long)addr & PAGE_MASK)); } /* @@ -626,6 +621,11 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, return prot; } +bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) +{ + return arch_memremap_can_ram_remap(phys_addr, size, 0); +} + #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 88990ab87961..fe7d57a8fb60 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -744,6 +744,9 @@ EXPORT_SYMBOL(arch_io_free_memtype_wc); pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { + if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) + vma_prot = pgprot_decrypted(vma_prot); + return vma_prot; } |