From 17d489019c5249d059768184c80e9b2b0269b81e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 29 Apr 2015 14:49:07 +1000 Subject: KVM: PPC: Book3S HV: Fix list traversal in error case This fixes a regression introduced in commit 25fedfca94cf, "KVM: PPC: Book3S HV: Move vcore preemption point up into kvmppc_run_vcpu", which leads to a user-triggerable oops. In the case where we try to run a vcore on a physical core that is not in single-threaded mode, or the vcore has too many threads for the physical core, we iterate the list of runnable vcpus to make each one return an EBUSY error to userspace. Since this involves taking each vcpu off the runnable_threads list for the vcore, we need to use list_for_each_entry_safe rather than list_for_each_entry to traverse the list. Otherwise the kernel will crash with an oops message like this: Unable to handle kernel paging request for data at address 0x000fff88 Faulting instruction address: 0xd00000001e635dc8 Oops: Kernel access of bad area, sig: 11 [#2] SMP NR_CPUS=1024 NUMA PowerNV ... CPU: 48 PID: 91256 Comm: qemu-system-ppc Tainted: G D 3.18.0 #1 task: c00000274e507500 ti: c0000027d1924000 task.ti: c0000027d1924000 NIP: d00000001e635dc8 LR: d00000001e635df8 CTR: c00000000011ba50 REGS: c0000027d19275b0 TRAP: 0300 Tainted: G D (3.18.0) MSR: 9000000000009033 CR: 22002824 XER: 00000000 CFAR: c000000000008468 DAR: 00000000000fff88 DSISR: 40000000 SOFTE: 1 GPR00: d00000001e635df8 c0000027d1927830 d00000001e64c850 0000000000000001 GPR04: 0000000000000001 0000000000000001 0000000000000000 0000000000000000 GPR08: 0000000000200200 0000000000000000 0000000000000000 d00000001e63e588 GPR12: 0000000000002200 c000000007dbc800 c000000fc7800000 000000000000000a GPR16: fffffffffffffffc c000000fd5439690 c000000fc7801c98 0000000000000001 GPR20: 0000000000000003 c0000027d1927aa8 c000000fd543b348 c000000fd543b350 GPR24: 0000000000000000 c000000fa57f0000 0000000000000030 0000000000000000 GPR28: fffffffffffffff0 c000000fd543b328 00000000000fe468 c000000fd543b300 NIP [d00000001e635dc8] kvmppc_run_core+0x198/0x17c0 [kvm_hv] LR [d00000001e635df8] kvmppc_run_core+0x1c8/0x17c0 [kvm_hv] Call Trace: [c0000027d1927830] [d00000001e635df8] kvmppc_run_core+0x1c8/0x17c0 [kvm_hv] (unreliable) [c0000027d1927a30] [d00000001e638350] kvmppc_vcpu_run_hv+0x5b0/0xdd0 [kvm_hv] [c0000027d1927b70] [d00000001e510504] kvmppc_vcpu_run+0x44/0x60 [kvm] [c0000027d1927ba0] [d00000001e50d4a4] kvm_arch_vcpu_ioctl_run+0x64/0x170 [kvm] [c0000027d1927be0] [d00000001e504be8] kvm_vcpu_ioctl+0x5e8/0x7a0 [kvm] [c0000027d1927d40] [c0000000002d6720] do_vfs_ioctl+0x490/0x780 [c0000027d1927de0] [c0000000002d6ae4] SyS_ioctl+0xd4/0xf0 [c0000027d1927e30] [c000000000009358] syscall_exit+0x0/0x98 Instruction dump: 60000000 60420000 387e1b30 38800003 38a00001 38c00000 480087d9 e8410018 ebde1c98 7fbdf040 3bdee368 419e0048 <813e1b20> 939e1b18 2f890001 409effcc ---[ end trace 8cdf50251cca6680 ]--- Fixes: 25fedfca94cfbf2461314c6c34ef58e74a31b025 Signed-off-by: Paul Mackerras Reviewed-by: Alexander Graf Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s_hv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 48d3c5d2ecc9..df81caab7383 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc) */ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu, *vnext; int i; int srcu_idx; @@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ if ((threads_per_core > 1) && ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { + list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, + arch.run_list) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu); wake_up(&vcpu->arch.cpu_run); -- cgit From 13bd817bb88499ce1dc1dfdaffcde17fa492aca5 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 11 May 2015 11:56:01 +0530 Subject: powerpc/thp: Serialize pmd clear against a linux page table walk. Serialize against find_linux_pte_or_hugepte() which does lock-less lookup in page tables with local interrupts disabled. For huge pages it casts pmd_t to pte_t. Since the format of pte_t is different from pmd_t we want to prevent transit from pmd pointing to page table to pmd pointing to huge page (and back) while interrupts are disabled. We clear pmd to possibly replace it with page table pointer in different code paths. So make sure we wait for the parallel find_linux_pte_or_hugepage() to finish. Without this patch, a find_linux_pte_or_hugepte() running in parallel to __split_huge_zero_page_pmd() or do_huge_pmd_wp_page_fallback() or zap_huge_pmd() can run into the above issue. With __split_huge_zero_page_pmd() and do_huge_pmd_wp_page_fallback() we clear the hugepage pte before inserting the pmd entry with a regular pgtable address. Such a clear need to wait for the parallel find_linux_pte_or_hugepte() to finish. With zap_huge_pmd(), we can run into issues, with a hugepage pte getting zapped due to a MADV_DONTNEED while other cpu fault it in as small pages. Reported-by: Kirill A. Shutemov Signed-off-by: Aneesh Kumar K.V Reviewed-by: Kirill A. Shutemov Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_64.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 59daa5eeec25..6bfadf1aa5cb 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm, * hash fault look at them. */ memset(pgtable, 0, PTE_FRAG_SIZE); + /* + * Serialize against find_linux_pte_or_hugepte which does lock-less + * lookup in page tables with local interrupts disabled. For huge pages + * it casts pmd_t to pte_t. Since format of pte_t is different from + * pmd_t we want to prevent transit from pmd pointing to page table + * to pmd pointing to huge page (and back) while interrupts are disabled. + * We clear pmd to possibly replace it with page table pointer in + * different code paths. So make sure we wait for the parallel + * find_linux_pte_or_hugepage to finish. + */ + kick_all_cpus_sync(); return old_pmd; } -- cgit From 7b868e81be38d5ad4f4aa4be819a5fa543cc5ee8 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 11 May 2015 11:58:29 +0530 Subject: powerpc/mm: Return NULL for not present hugetlb page We need to check whether pte is present in follow_huge_addr() and properly return NULL if mapping is not present. Also use READ_ONCE when dereferencing pte_t address. Without this patch, we may wrongly return a zero pfn page in follow_huge_addr(). Reviewed-by: David Gibson Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hugetlbpage.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0ce968b00b7c..3385e3d0506e 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, struct page * follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { - pte_t *ptep; - struct page *page; + pte_t *ptep, pte; unsigned shift; unsigned long mask, flags; + struct page *page = ERR_PTR(-EINVAL); + + local_irq_save(flags); + ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); + if (!ptep) + goto no_page; + pte = READ_ONCE(*ptep); /* + * Verify it is a huge page else bail. * Transparent hugepages are handled by generic code. We can skip them * here. */ - local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); + if (!shift || pmd_trans_huge(__pmd(pte_val(pte)))) + goto no_page; - /* Verify it is a huge page else bail. */ - if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) { - local_irq_restore(flags); - return ERR_PTR(-EINVAL); + if (!pte_present(pte)) { + page = NULL; + goto no_page; } mask = (1UL << shift) - 1; - page = pte_page(*ptep); + page = pte_page(pte); if (page) page += (address & mask) / PAGE_SIZE; +no_page: local_irq_restore(flags); return page; } -- cgit From ffb2d78eca08a1451137583d4e435aecfd6af809 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Tue, 12 May 2015 13:23:59 +1000 Subject: powerpc/mce: fix off by one errors in mce event handling Before 69111bac42f5 ("powerpc: Replace __get_cpu_var uses"), in save_mce_event, index got the value of mce_nest_count, and mce_nest_count was incremented *after* index was set. However, that patch changed the behaviour so that mce_nest count was incremented *before* setting index. This causes an off-by-one error, as get_mce_event sets index as mce_nest_count - 1 before reading mce_event. Thus get_mce_event reads bogus data, causing warnings like "Machine Check Exception, Unknown event version 0 !" and breaking MCEs handling. Restore the old behaviour and unbreak MCE handling by subtracting one from the newly incremented value. The same broken change occured in machine_check_queue_event (which set a queue read by machine_check_process_queued_event). Fix that too, unbreaking printing of MCE information. Fixes: 69111bac42f5 ("powerpc: Replace __get_cpu_var uses") CC: stable@vger.kernel.org CC: Mahesh Salgaonkar CC: Christoph Lameter Signed-off-by: Daniel Axtens Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 15c99b649b04..b2eb4686bd8f 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled, uint64_t nip, uint64_t addr) { uint64_t srr1; - int index = __this_cpu_inc_return(mce_nest_count); + int index = __this_cpu_inc_return(mce_nest_count) - 1; struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); /* @@ -184,7 +184,7 @@ void machine_check_queue_event(void) if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; - index = __this_cpu_inc_return(mce_queue_count); + index = __this_cpu_inc_return(mce_queue_count) - 1; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { __this_cpu_dec(mce_queue_count); -- cgit From 5e95235ccd5442d4a4fe11ec4eb99ba1b7959368 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 14 May 2015 14:45:40 +1000 Subject: powerpc: Align TOC to 256 bytes Recent toolchains force the TOC to be 256 byte aligned. We need to enforce this alignment in our linker script, otherwise pointers to our TOC variables (__toc_start, __prom_init_toc_start) could be incorrect. If they are bad, we die a few hundred instructions into boot. Cc: stable@vger.kernel.org Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/vmlinux.lds.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index f096e72262f4..1db685104ffc 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -213,6 +213,7 @@ SECTIONS *(.opd) } + . = ALIGN(256); .got : AT(ADDR(.got) - LOAD_OFFSET) { __toc_start = .; #ifndef CONFIG_RELOCATABLE -- cgit