diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 46 | 
1 files changed, 30 insertions, 16 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index f6ce1f111f5b..c8d709ab489d 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -54,6 +54,9 @@  #include "mmu_decl.h" +#define CREATE_TRACE_POINTS +#include <trace/events/thp.h> +  /* Some sanity checking */  #if TASK_SIZE_USER64 > PGTABLE_RANGE  #error TASK_SIZE_USER64 exceeds pagetable range @@ -68,7 +71,7 @@  unsigned long ioremap_bot = IOREMAP_BASE;  #ifdef CONFIG_PPC_MMU_NOHASH -static void *early_alloc_pgtable(unsigned long size) +static __ref void *early_alloc_pgtable(unsigned long size)  {  	void *pt; @@ -537,8 +540,9 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,  	old = pmd_val(*pmdp);  	*pmdp = __pmd((old & ~clr) | set);  #endif +	trace_hugepage_update(addr, old, clr, set);  	if (old & _PAGE_HASHPTE) -		hpte_do_hugepage_flush(mm, addr, pmdp); +		hpte_do_hugepage_flush(mm, addr, pmdp, old);  	return old;  } @@ -642,10 +646,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,  	 * If we didn't had the splitting flag set, go and flush the  	 * HPTE entries.  	 */ +	trace_hugepage_splitting(address, old);  	if (!(old & _PAGE_SPLITTING)) {  		/* We need to flush the hpte */  		if (old & _PAGE_HASHPTE) -			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); +			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);  	}  	/*  	 * This ensures that generic code that rely on IRQ disabling @@ -709,6 +714,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,  	assert_spin_locked(&mm->page_table_lock);  	WARN_ON(!pmd_trans_huge(pmd));  #endif +	trace_hugepage_set_pmd(addr, pmd);  	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));  } @@ -723,7 +729,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,   * neesd to be flushed.   */  void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, -			    pmd_t *pmdp) +			    pmd_t *pmdp, unsigned long old_pmd)  {  	int ssize, i;  	unsigned long s_addr; @@ -745,12 +751,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,  	if (!hpte_slot_array)  		return; -	/* get the base page size */ +	/* get the base page size,vsid and segment size */ +#ifdef CONFIG_DEBUG_VM  	psize = get_slice_psize(mm, s_addr); +	BUG_ON(psize == MMU_PAGE_16M); +#endif +	if (old_pmd & _PAGE_COMBO) +		psize = MMU_PAGE_4K; +	else +		psize = MMU_PAGE_64K; + +	if (!is_kernel_addr(s_addr)) { +		ssize = user_segment_size(s_addr); +		vsid = get_vsid(mm->context.id, s_addr, ssize); +		WARN_ON(vsid == 0); +	} else { +		vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); +		ssize = mmu_kernel_ssize; +	}  	if (ppc_md.hugepage_invalidate) -		return ppc_md.hugepage_invalidate(mm, hpte_slot_array, -						  s_addr, psize); +		return ppc_md.hugepage_invalidate(vsid, s_addr, +						  hpte_slot_array, +						  psize, ssize);  	/*  	 * No bluk hpte removal support, invalidate each entry  	 */ @@ -768,15 +791,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,  		/* get the vpn */  		addr = s_addr + (i * (1ul << shift)); -		if (!is_kernel_addr(addr)) { -			ssize = user_segment_size(addr); -			vsid = get_vsid(mm->context.id, addr, ssize); -			WARN_ON(vsid == 0); -		} else { -			vsid = get_kernel_vsid(addr, mmu_kernel_ssize); -			ssize = mmu_kernel_ssize; -		} -  		vpn = hpt_vpn(addr, vsid, ssize);  		hash = hpt_hash(vpn, shift, ssize);  		if (hidx & _PTEIDX_SECONDARY)  |