diff options
Diffstat (limited to 'arch/sparc/mm')
| -rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 2 | ||||
| -rw-r--r-- | arch/sparc/mm/init_32.c | 2 | ||||
| -rw-r--r-- | arch/sparc/mm/init_64.c | 89 | ||||
| -rw-r--r-- | arch/sparc/mm/tsb.c | 7 | ||||
| -rw-r--r-- | arch/sparc/mm/ultra.S | 5 | 
5 files changed, 67 insertions, 38 deletions
| diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7c29d38e6b99..88855e383b34 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,  		addr = ALIGN(addr, huge_page_size(h));  		vma = find_vma(mm, addr);  		if (task_size - len >= addr && -		    (!vma || addr + len <= vma->vm_start)) +		    (!vma || addr + len <= vm_start_gap(vma)))  			return addr;  	}  	if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index c6afe98de4d9..3bd0d513bddb 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -290,7 +290,7 @@ void __init mem_init(void)  	/* Saves us work later. */ -	memset((void *)&empty_zero_page, 0, PAGE_SIZE); +	memset((void *)empty_zero_page, 0, PAGE_SIZE);  	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);  	i += 1; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 0cda653ae007..3c40ebd50f92 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)  	}  	if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { -		pr_warn("hugepagesz=%llu not supported by MMU.\n", +		hugetlb_bad_size(); +		pr_err("hugepagesz=%llu not supported by MMU.\n",  			hugepage_size);  		goto out;  	} @@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);  /* get_new_mmu_context() uses "cache + 1".  */  DEFINE_SPINLOCK(ctx_alloc_lock); -unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; +unsigned long tlb_context_cache = CTX_FIRST_VERSION;  #define MAX_CTX_NR	(1UL << CTX_NR_BITS)  #define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)  DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); +DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; + +static void mmu_context_wrap(void) +{ +	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; +	unsigned long new_ver, new_ctx, old_ctx; +	struct mm_struct *mm; +	int cpu; + +	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); + +	/* Reserve kernel context */ +	set_bit(0, mmu_context_bmap); + +	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; +	if (unlikely(new_ver == 0)) +		new_ver = CTX_FIRST_VERSION; +	tlb_context_cache = new_ver; + +	/* +	 * Make sure that any new mm that are added into per_cpu_secondary_mm, +	 * are going to go through get_new_mmu_context() path. +	 */ +	mb(); + +	/* +	 * Updated versions to current on those CPUs that had valid secondary +	 * contexts +	 */ +	for_each_online_cpu(cpu) { +		/* +		 * If a new mm is stored after we took this mm from the array, +		 * it will go into get_new_mmu_context() path, because we +		 * already bumped the version in tlb_context_cache. +		 */ +		mm = per_cpu(per_cpu_secondary_mm, cpu); + +		if (unlikely(!mm || mm == &init_mm)) +			continue; + +		old_ctx = mm->context.sparc64_ctx_val; +		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { +			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; +			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); +			mm->context.sparc64_ctx_val = new_ctx; +		} +	} +}  /* Caller does TLB context flushing on local CPU if necessary.   * The caller also ensures that CTX_VALID(mm->context) is false. @@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)  {  	unsigned long ctx, new_ctx;  	unsigned long orig_pgsz_bits; -	int new_version;  	spin_lock(&ctx_alloc_lock); +retry: +	/* wrap might have happened, test again if our context became valid */ +	if (unlikely(CTX_VALID(mm->context))) +		goto out;  	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);  	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;  	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); -	new_version = 0;  	if (new_ctx >= (1 << CTX_NR_BITS)) {  		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);  		if (new_ctx >= ctx) { -			int i; -			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + -				CTX_FIRST_VERSION; -			if (new_ctx == 1) -				new_ctx = CTX_FIRST_VERSION; - -			/* Don't call memset, for 16 entries that's just -			 * plain silly... -			 */ -			mmu_context_bmap[0] = 3; -			mmu_context_bmap[1] = 0; -			mmu_context_bmap[2] = 0; -			mmu_context_bmap[3] = 0; -			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { -				mmu_context_bmap[i + 0] = 0; -				mmu_context_bmap[i + 1] = 0; -				mmu_context_bmap[i + 2] = 0; -				mmu_context_bmap[i + 3] = 0; -			} -			new_version = 1; -			goto out; +			mmu_context_wrap(); +			goto retry;  		}  	} +	if (mm->context.sparc64_ctx_val) +		cpumask_clear(mm_cpumask(mm));  	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));  	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); -out:  	tlb_context_cache = new_ctx;  	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; +out:  	spin_unlock(&ctx_alloc_lock); - -	if (unlikely(new_version)) -		smp_new_mmu_context_version();  }  static int numa_enabled = 1; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index bedf08b22a47..0d4b998c7d7b 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -496,7 +496,8 @@ retry_tsb_alloc:  		extern void copy_tsb(unsigned long old_tsb_base,  				     unsigned long old_tsb_size,  				     unsigned long new_tsb_base, -				     unsigned long new_tsb_size); +				     unsigned long new_tsb_size, +				     unsigned long page_size_shift);  		unsigned long old_tsb_base = (unsigned long) old_tsb;  		unsigned long new_tsb_base = (unsigned long) new_tsb; @@ -504,7 +505,9 @@ retry_tsb_alloc:  			old_tsb_base = __pa(old_tsb_base);  			new_tsb_base = __pa(new_tsb_base);  		} -		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); +		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, +			tsb_index == MM_TSB_BASE ? +			PAGE_SHIFT : REAL_HPAGE_SHIFT);  	}  	mm->context.tsb_block[tsb_index].tsb = new_tsb; diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 5d2fd6cd3189..fcf4d27a38fb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -971,11 +971,6 @@ xcall_capture:  	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint  	retry -	.globl		xcall_new_mmu_context_version -xcall_new_mmu_context_version: -	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint -	retry -  #ifdef CONFIG_KGDB  	.globl		xcall_kgdb_capture  xcall_kgdb_capture: |