diff options
| author | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
|---|---|---|
| committer | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
| commit | c74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch) | |
| tree | f2690a1a916b73ef94657fbf0e0141ae57701825 /arch/powerpc/include/asm/nohash/pgtable.h | |
| parent | 6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff) | |
| parent | 500775074f88d9cf5416bed2ca19592812d62c41 (diff) | |
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other
i915 changes to dinq that could break compilation.
Signed-off-by: Rodrigo Vivi <[email protected]>
Diffstat (limited to 'arch/powerpc/include/asm/nohash/pgtable.h')
| -rw-r--r-- | arch/powerpc/include/asm/nohash/pgtable.h | 58 | 
1 files changed, 10 insertions, 48 deletions
| diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index c56de1e8026f..2160be2e4339 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -17,7 +17,6 @@ static inline int pte_write(pte_t pte)  }  static inline int pte_read(pte_t pte)		{ return 1; }  static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }  static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }  static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }  static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } @@ -148,70 +147,33 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,  static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,  				pte_t *ptep, pte_t pte, int percpu)  { -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) -	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the -	 * helper pte_update() which does an atomic update. We need to do that -	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a -	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving -	 * the hash bits instead (ie, same as the non-SMP case) -	 */ -	if (percpu) -		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) -			      | (pte_val(pte) & ~_PAGE_HASHPTE)); -	else -		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); - -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)  	/* Second case is 32-bit with 64-bit PTE.  In this case, we  	 * can just store as long as we do the two halves in the right order -	 * with a barrier in between. This is possible because we take care, -	 * in the hash code, to pre-invalidate if the PTE was already hashed, -	 * which synchronizes us with any concurrent invalidation. -	 * In the percpu case, we also fallback to the simple update preserving -	 * the hash bits +	 * with a barrier in between. +	 * In the percpu case, we also fallback to the simple update  	 */ -	if (percpu) { -		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) -			      | (pte_val(pte) & ~_PAGE_HASHPTE)); +	if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { +		__asm__ __volatile__("\ +			stw%U0%X0 %2,%0\n\ +			eieio\n\ +			stw%U0%X0 %L2,%1" +		: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) +		: "r" (pte) : "memory");  		return;  	} -#if _PAGE_HASHPTE != 0 -	if (pte_val(*ptep) & _PAGE_HASHPTE) -		flush_hash_entry(mm, ptep, addr); -#endif -	__asm__ __volatile__("\ -		stw%U0%X0 %2,%0\n\ -		eieio\n\ -		stw%U0%X0 %L2,%1" -	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) -	: "r" (pte) : "memory"); - -#elif defined(CONFIG_PPC_STD_MMU_32) -	/* Third case is 32-bit hash table in UP mode, we need to preserve -	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous -	 * translation in the hash yet (done in a subsequent flush_tlb_xxx()) -	 * and see we need to keep track that this PTE needs invalidating -	 */ -	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) -		      | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else  	/* Anything else just stores the PTE normally. That covers all 64-bit  	 * cases, and 32-bit non-hash with 32-bit PTEs.  	 */  	*ptep = pte; -#ifdef CONFIG_PPC_BOOK3E_64  	/*  	 * With hardware tablewalk, a sync is needed to ensure that  	 * subsequent accesses see the PTE we just wrote.  Unlike userspace  	 * mappings, we can't tolerate spurious faults, so make sure  	 * the new PTE will be seen the first time.  	 */ -	if (is_kernel_addr(addr)) +	if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))  		mb(); -#endif -#endif  } |