diff options
Diffstat (limited to 'arch/arm/mm/context.c')
| -rw-r--r-- | arch/arm/mm/context.c | 32 | 
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb78..a5a4b2bc42ba 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -34,6 +34,9 @@   * The ASID is used to tag entries in the CPU caches and TLBs.   * The context ID is used by debuggers and trace logic, and   * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accesed by + * non 64-bit operations.   */  #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)  #define NUM_USER_ASIDS		(ASID_FIRST_VERSION - 1) @@ -149,9 +152,9 @@ static int is_reserved_asid(u64 asid)  	return 0;  } -static void new_context(struct mm_struct *mm, unsigned int cpu) +static u64 new_context(struct mm_struct *mm, unsigned int cpu)  { -	u64 asid = mm->context.id; +	u64 asid = atomic64_read(&mm->context.id);  	u64 generation = atomic64_read(&asid_generation);  	if (asid != 0 && is_reserved_asid(asid)) { @@ -178,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)  		cpumask_clear(mm_cpumask(mm));  	} -	mm->context.id = asid; +	return asid;  }  void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)  {  	unsigned long flags;  	unsigned int cpu = smp_processor_id(); +	u64 asid;  	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))  		__check_vmalloc_seq(mm); @@ -195,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)  	 */  	cpu_set_reserved_ttbr0(); -	if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) -	    && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) +	asid = atomic64_read(&mm->context.id); +	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) +	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))  		goto switch_mm_fastpath;  	raw_spin_lock_irqsave(&cpu_asid_lock, flags);  	/* Check that our ASID belongs to the current generation. */ -	if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) -		new_context(mm, cpu); - -	atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); -	cpumask_set_cpu(cpu, mm_cpumask(mm)); +	asid = atomic64_read(&mm->context.id); +	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { +		asid = new_context(mm, cpu); +		atomic64_set(&mm->context.id, asid); +	} -	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) +	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { +		local_flush_bp_all();  		local_flush_tlb_all(); +	} + +	atomic64_set(&per_cpu(active_asids, cpu), asid); +	cpumask_set_cpu(cpu, mm_cpumask(mm));  	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);  switch_mm_fastpath:  |