diff options
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
| -rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 78 | 
1 files changed, 41 insertions, 37 deletions
| diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 1d049c78c82a..729f02df8290 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -23,13 +23,13 @@  #include <asm/mmu.h>  #include <asm/mmu_context.h>  #include <asm/pgtable.h> -#include <asm/tlbflush.h>  #include <asm/trace.h>  #include <asm/tlb.h>  #include <asm/cputable.h>  #include <asm/udbg.h>  #include <asm/kexec.h>  #include <asm/ppc-opcode.h> +#include <asm/feature-fixups.h>  #include <misc/cxl-base.h> @@ -423,9 +423,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,  	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",  		vpn, want_v & HPTE_V_AVPN, slot, newpp); -	hpte_v = be64_to_cpu(hptep->v); -	if (cpu_has_feature(CPU_FTR_ARCH_300)) -		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); +	hpte_v = hpte_get_old_v(hptep);  	/*  	 * We need to invalidate the TLB always because hpte_remove doesn't do  	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -439,9 +437,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,  	} else {  		native_lock_hpte(hptep);  		/* recheck with locks held */ -		hpte_v = be64_to_cpu(hptep->v); -		if (cpu_has_feature(CPU_FTR_ARCH_300)) -			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); +		hpte_v = hpte_get_old_v(hptep);  		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||  			     !(hpte_v & HPTE_V_VALID))) {  			ret = -1; @@ -481,11 +477,9 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)  	/* Bolted mappings are only ever in the primary group */  	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;  	for (i = 0; i < HPTES_PER_GROUP; i++) { -		hptep = htab_address + slot; -		hpte_v = be64_to_cpu(hptep->v); -		if (cpu_has_feature(CPU_FTR_ARCH_300)) -			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); +		hptep = htab_address + slot; +		hpte_v = hpte_get_old_v(hptep);  		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))  			/* HPTE matches */  			return slot; @@ -574,11 +568,19 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,  	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);  	want_v = hpte_encode_avpn(vpn, bpsize, ssize); -	native_lock_hpte(hptep); -	hpte_v = be64_to_cpu(hptep->v); -	if (cpu_has_feature(CPU_FTR_ARCH_300)) -		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); +	hpte_v = hpte_get_old_v(hptep); +	if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { +		native_lock_hpte(hptep); +		/* recheck with locks held */ +		hpte_v = hpte_get_old_v(hptep); + +		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) +			/* Invalidate the hpte. NOTE: this also unlocks it */ +			hptep->v = 0; +		else +			native_unlock_hpte(hptep); +	}  	/*  	 * We need to invalidate the TLB always because hpte_remove doesn't do  	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -586,13 +588,6 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,  	 * (hpte_remove) because we assume the old translation is still  	 * technically "valid".  	 */ -	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) -		native_unlock_hpte(hptep); -	else -		/* Invalidate the hpte. NOTE: this also unlocks it */ -		hptep->v = 0; - -	/* Invalidate the TLB */  	tlbie(vpn, bpsize, apsize, ssize, local);  	local_irq_restore(flags); @@ -634,17 +629,23 @@ static void native_hugepage_invalidate(unsigned long vsid,  		hptep = htab_address + slot;  		want_v = hpte_encode_avpn(vpn, psize, ssize); -		native_lock_hpte(hptep); -		hpte_v = be64_to_cpu(hptep->v); -		if (cpu_has_feature(CPU_FTR_ARCH_300)) -			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); +		hpte_v = hpte_get_old_v(hptep);  		/* Even if we miss, we need to invalidate the TLB */ -		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) -			native_unlock_hpte(hptep); -		else -			/* Invalidate the hpte. NOTE: this also unlocks it */ -			hptep->v = 0; +		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { +			/* recheck with locks held */ +			native_lock_hpte(hptep); +			hpte_v = hpte_get_old_v(hptep); + +			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { +				/* +				 * Invalidate the hpte. NOTE: this also unlocks it +				 */ + +				hptep->v = 0; +			} else +				native_unlock_hpte(hptep); +		}  		/*  		 * We need to do tlb invalidate for all the address, tlbie  		 * instruction compares entry_VA in tlb with the VA specified @@ -812,16 +813,19 @@ static void native_flush_hash_range(unsigned long number, int local)  			slot += hidx & _PTEIDX_GROUP_IX;  			hptep = htab_address + slot;  			want_v = hpte_encode_avpn(vpn, psize, ssize); +			hpte_v = hpte_get_old_v(hptep); + +			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) +				continue; +			/* lock and try again */  			native_lock_hpte(hptep); -			hpte_v = be64_to_cpu(hptep->v); -			if (cpu_has_feature(CPU_FTR_ARCH_300)) -				hpte_v = hpte_new_to_old_v(hpte_v, -						be64_to_cpu(hptep->r)); -			if (!HPTE_V_COMPARE(hpte_v, want_v) || -			    !(hpte_v & HPTE_V_VALID)) +			hpte_v = hpte_get_old_v(hptep); + +			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))  				native_unlock_hpte(hptep);  			else  				hptep->v = 0; +  		} pte_iterate_hashed_end();  	} |