diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2017-02-07 18:18:13 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-02-07 18:18:13 +0100 |
commit | d9c0e59f92d491a7be5172eaf2d600b4953a0bd4 (patch) | |
tree | 0823b289a65ae4a3fffa69571fe7d72f51aa2aa3 /arch/mips/mm/tlbex.c | |
parent | d5b798c15fb97136dc13ac5a9629f91e88d5d565 (diff) | |
parent | 12ed1faece3f141c2604b5b3a8377ba71d23ec9d (diff) |
Merge tag 'kvm_mips_4.11_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips into HEAD
KVM: MIPS: GVA/GPA page tables, dirty logging, SYNC_MMU etc
Numerous MIPS KVM fixes, improvements, and features for 4.11, many of
which continue to pave the way for VZ support, the most interesting of
which are:
- Add GVA->HPA page tables for T&E, to cache GVA mappings.
- Generate fast-path TLB refill exception handler which loads host TLB
entries from GVA page table, avoiding repeated guest memory
translation and guest TLB lookups.
- Use uaccess macros when T&E needs to access guest memory, which with
GVA page tables and the Linux TLB refill handler improves robustness
against TLB faults and fixes EVA hosts.
- Use BadInstr/BadInstrP registers when available to obtain instruction
encodings after a synchronous trap.
- Add GPA->HPA page tables to replace the inflexible linear array,
allowing for multiple sparsely arranged memory regions.
- Properly implement dirty page logging.
- Add KVM_CAP_SYNC_MMU support so that changes in GPA mappings become
effective in guests even if they are already running, allowing for
copy-on-write, KSM, idle page tracking, swapping, and guest memory
ballooning.
- Add KVM_CAP_READONLY_MEM support, so writes to specified memory
regions are treated as MMIO.
- Implement proper CP0_EBase support in T&E.
- Expose a few more missing CP0 registers to userland.
- Add KVM_CAP_NR_VCPUS and KVM_CAP_MAX_VCPUS support, and allow up to 8
VCPUs to be created in a VM.
- Various cleanups and dropping of dead and duplicated code.
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 38 |
1 files changed, 20 insertions, 18 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 55ce39606cb8..2465f83c79c3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -22,6 +22,7 @@ */ #include <linux/bug.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/smp.h> @@ -34,6 +35,7 @@ #include <asm/war.h> #include <asm/uasm.h> #include <asm/setup.h> +#include <asm/tlbex.h> static int mips_xpa_disabled; @@ -344,7 +346,8 @@ static int allocate_kscratch(void) } static int scratch_reg; -static int pgd_reg; +int pgd_reg; +EXPORT_SYMBOL_GPL(pgd_reg); enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; static struct work_registers build_get_work_registers(u32 **p) @@ -496,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) } } -/* - * Write random or indexed TLB entry, and care about the hazards from - * the preceding mtc0 and for the following eret. - */ -enum tlb_write_entry { tlb_random, tlb_indexed }; - -static void build_tlb_write_entry(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, - enum tlb_write_entry wmode) +void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; @@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, break; } } +EXPORT_SYMBOL_GPL(build_tlb_write_entry); static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) @@ -781,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static void -build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, - unsigned int tmp, unsigned int ptr) +void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + unsigned int tmp, unsigned int ptr) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; @@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } +EXPORT_SYMBOL_GPL(build_get_pmde64); /* * BVADDR is the faulting address, PTR is scratch. @@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static void __maybe_unused -build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { if (pgd_reg != -1) { /* pgd is in pgd_reg */ @@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ } +EXPORT_SYMBOL_GPL(build_get_pgde32); #endif /* !CONFIG_64BIT */ @@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) uasm_i_andi(p, ctx, ctx, mask); } -static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) build_adjust_context(p, tmp); UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } +EXPORT_SYMBOL_GPL(build_get_ptep); -static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) +void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { int pte_off_even = 0; int pte_off_odd = sizeof(pte_t); @@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } +EXPORT_SYMBOL_GPL(build_update_entries); struct mips_huge_tlb_info { int huge_pte; @@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void) extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbm[], handle_tlbm_end[]; -extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; +extern u32 tlbmiss_handler_setup_pgd_start[]; +extern u32 tlbmiss_handler_setup_pgd[]; +EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd); extern u32 tlbmiss_handler_setup_pgd_end[]; static void build_setup_pgd(void) |