diff options
Diffstat (limited to 'arch/csky/abiv2/cacheflush.c')
| -rw-r--r-- | arch/csky/abiv2/cacheflush.c | 84 | 
1 files changed, 59 insertions, 25 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index 5bb887b275e1..790f1ebfba44 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -6,46 +6,80 @@  #include <linux/mm.h>  #include <asm/cache.h> -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, +		      pte_t *pte)  { -	unsigned long start; +	unsigned long addr; +	struct page *page; -	start = (unsigned long) kmap_atomic(page); +	page = pfn_to_page(pte_pfn(*pte)); +	if (page == ZERO_PAGE(0)) +		return; -	cache_wbinv_range(start, start + PAGE_SIZE); +	if (test_and_set_bit(PG_dcache_clean, &page->flags)) +		return; -	kunmap_atomic((void *)start); -} +	addr = (unsigned long) kmap_atomic(page); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, -			     unsigned long vaddr, int len) -{ -	unsigned long kaddr; +	dcache_wb_range(addr, addr + PAGE_SIZE); -	kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); +	if (vma->vm_flags & VM_EXEC) +		icache_inv_range(addr, addr + PAGE_SIZE); + +	kunmap_atomic((void *) addr); +} -	cache_wbinv_range(kaddr, kaddr + len); +void flush_icache_deferred(struct mm_struct *mm) +{ +	unsigned int cpu = smp_processor_id(); +	cpumask_t *mask = &mm->context.icache_stale_mask; -	kunmap_atomic((void *)kaddr); +	if (cpumask_test_cpu(cpu, mask)) { +		cpumask_clear_cpu(cpu, mask); +		/* +		 * Ensure the remote hart's writes are visible to this hart. +		 * This pairs with a barrier in flush_icache_mm. +		 */ +		smp_mb(); +		local_icache_inv_all(NULL); +	}  } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, -		      pte_t *pte) +void flush_icache_mm_range(struct mm_struct *mm, +		unsigned long start, unsigned long end)  { -	unsigned long addr, pfn; -	struct page *page; +	unsigned int cpu; +	cpumask_t others, *mask; -	pfn = pte_pfn(*pte); -	if (unlikely(!pfn_valid(pfn))) -		return; +	preempt_disable(); -	page = pfn_to_page(pfn); -	if (page == ZERO_PAGE(0)) +#ifdef CONFIG_CPU_HAS_ICACHE_INS +	if (mm == current->mm) { +		icache_inv_range(start, end); +		preempt_enable();  		return; +	} +#endif -	addr = (unsigned long) kmap_atomic(page); +	/* Mark every hart's icache as needing a flush for this MM. */ +	mask = &mm->context.icache_stale_mask; +	cpumask_setall(mask); -	cache_wbinv_range(addr, addr + PAGE_SIZE); +	/* Flush this hart's I$ now, and mark it as flushed. */ +	cpu = smp_processor_id(); +	cpumask_clear_cpu(cpu, mask); +	local_icache_inv_all(NULL); -	kunmap_atomic((void *) addr); +	/* +	 * Flush the I$ of other harts concurrently executing, and mark them as +	 * flushed. +	 */ +	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + +	if (mm != current->active_mm || !cpumask_empty(&others)) { +		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); +		cpumask_clear(mask); +	} + +	preempt_enable();  }  |