diff options
Diffstat (limited to 'arch/sparc/mm')
| -rw-r--r-- | arch/sparc/mm/fault_64.c | 9 | ||||
| -rw-r--r-- | arch/sparc/mm/init_64.c | 86 | ||||
| -rw-r--r-- | arch/sparc/mm/tlb.c | 11 | ||||
| -rw-r--r-- | arch/sparc/mm/tsb.c | 2 | 
4 files changed, 69 insertions, 39 deletions
| diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 097aee763af3..5062ff389e83 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -472,8 +472,13 @@ good_area:  #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)  	mm_rss = mm->context.huge_pte_count;  	if (unlikely(mm_rss > -		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) -		tsb_grow(mm, MM_TSB_HUGE, mm_rss); +		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { +		if (mm->context.tsb_block[MM_TSB_HUGE].tsb) +			tsb_grow(mm, MM_TSB_HUGE, mm_rss); +		else +			hugetlb_setup(regs); + +	}  #endif  	return; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index c3b72423c846..5c2c6e61facb 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde  	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;  	unsigned long tag; +	if (unlikely(!tsb)) +		return; +  	tsb += ((address >> tsb_hash_shift) &  		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));  	tag = (address >> 22UL);  	tsb_insert(tsb, tag, tte);  } +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline bool is_hugetlb_pte(pte_t pte) +{ +	if ((tlb_type == hypervisor && +	     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || +	    (tlb_type != hypervisor && +	     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) +		return true; +	return false; +} +#endif +  void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)  { -	unsigned long tsb_index, tsb_hash_shift, flags;  	struct mm_struct *mm; +	unsigned long flags;  	pte_t pte = *ptep;  	if (tlb_type != hypervisor) { @@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *  	mm = vma->vm_mm; -	tsb_index = MM_TSB_BASE; -	tsb_hash_shift = PAGE_SHIFT; -  	spin_lock_irqsave(&mm->context.lock, flags);  #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { -		if ((tlb_type == hypervisor && -		     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || -		    (tlb_type != hypervisor && -		     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { -			tsb_index = MM_TSB_HUGE; -			tsb_hash_shift = HPAGE_SHIFT; -		} -	} +	if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) +		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, +					address, pte_val(pte)); +	else  #endif - -	__update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, -				address, pte_val(pte)); +		__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, +					address, pte_val(pte));  	spin_unlock_irqrestore(&mm->context.lock, flags);  } @@ -2021,6 +2027,16 @@ static void __init patch_tlb_miss_handler_bitmap(void)  	flushi(&valid_addr_bitmap_insn[0]);  } +static void __init register_page_bootmem_info(void) +{ +#ifdef CONFIG_NEED_MULTIPLE_NODES +	int i; + +	for_each_online_node(i) +		if (NODE_DATA(i)->node_spanned_pages) +			register_page_bootmem_info_node(NODE_DATA(i)); +#endif +}  void __init mem_init(void)  {  	unsigned long codepages, datapages, initpages; @@ -2038,20 +2054,8 @@ void __init mem_init(void)  	high_memory = __va(last_valid_pfn << PAGE_SHIFT); -#ifdef CONFIG_NEED_MULTIPLE_NODES -	{ -		int i; -		for_each_online_node(i) { -			if (NODE_DATA(i)->node_spanned_pages != 0) { -				totalram_pages += -					free_all_bootmem_node(NODE_DATA(i)); -			} -		} -		totalram_pages += free_low_memory_core_early(MAX_NUMNODES); -	} -#else +	register_page_bootmem_info();  	totalram_pages = free_all_bootmem(); -#endif  	/* We subtract one to account for the mem_map_zero page  	 * allocated below. @@ -2712,14 +2716,28 @@ static void context_reload(void *__data)  		load_secondary_context(mm);  } -void hugetlb_setup(struct mm_struct *mm) +void hugetlb_setup(struct pt_regs *regs)  { -	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; +	struct mm_struct *mm = current->mm; +	struct tsb_config *tp; -	if (likely(tp->tsb != NULL)) -		return; +	if (in_atomic() || !mm) { +		const struct exception_table_entry *entry; + +		entry = search_exception_tables(regs->tpc); +		if (entry) { +			regs->tpc = entry->fixup; +			regs->tnpc = regs->tpc + 4; +			return; +		} +		pr_alert("Unexpected HugeTLB setup in atomic context.\n"); +		die_if_kernel("HugeTSB in atomic", regs); +	} + +	tp = &mm->context.tsb_block[MM_TSB_HUGE]; +	if (likely(tp->tsb == NULL)) +		tsb_grow(mm, MM_TSB_HUGE, 0); -	tsb_grow(mm, MM_TSB_HUGE, 0);  	tsb_context_switch(mm);  	smp_tsb_sync(mm); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 3e8fec391fe0..ba6ae7ffdc2c 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,  			mm->context.huge_pte_count++;  		else  			mm->context.huge_pte_count--; -		if (mm->context.huge_pte_count == 1) -			hugetlb_setup(mm); + +		/* Do not try to allocate the TSB hash table if we +		 * don't have one already.  We have various locks held +		 * and thus we'll end up doing a GFP_KERNEL allocation +		 * in an atomic context. +		 * +		 * Instead, we let the first TLB miss on a hugepage +		 * take care of this. +		 */  	}  	if (!pmd_none(orig)) { diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 7f6474347491..428982b9becf 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)  retry_tsb_alloc:  	gfp_flags = GFP_KERNEL;  	if (new_size > (PAGE_SIZE * 2)) -		gfp_flags = __GFP_NOWARN | __GFP_NORETRY; +		gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;  	new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],  					gfp_flags, numa_node_id()); |