diff options
author | Peter Zijlstra <[email protected]> | 2011-05-24 17:11:50 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2011-05-25 08:39:13 -0700 |
commit | 90f08e399d054d017c0e2c5089a0f44a76418271 (patch) | |
tree | 771d8a9ff675e3fe5deda53d0701e41f1f15e9a1 /arch/sparc/mm/tlb.c | |
parent | d6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (diff) |
sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-)
Sparc mmu_gather does two things:
- tracks vaddrs to unhash
- tracks pages to free
Split these two things like powerpc has done and keep the vaddrs
in per-cpu data structures and flush them on context switch.
The remaining bits can then use the generic mmu_gather.
Signed-off-by: Peter Zijlstra <[email protected]>
Acked-by: David Miller <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: Russell King <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: Jeff Dike <[email protected]>
Cc: Richard Weinberger <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Namhyung Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Diffstat (limited to 'arch/sparc/mm/tlb.c')
-rw-r--r-- | arch/sparc/mm/tlb.c | 43 |
1 files changed, 24 insertions, 19 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index d8f21e24a82f..b1f279cd00bf 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -19,33 +19,34 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); - if (mp->tlb_nr) { - flush_tsb_user(mp); + if (tb->tlb_nr) { + flush_tsb_user(tb); - if (CTX_VALID(mp->mm->context)) { + if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP - smp_flush_tlb_pending(mp->mm, mp->tlb_nr, - &mp->vaddrs[0]); + smp_flush_tlb_pending(tb->mm, tb->tlb_nr, + &tb->vaddrs[0]); #else - __flush_tlb_pending(CTX_HWBITS(mp->mm->context), - mp->tlb_nr, &mp->vaddrs[0]); + __flush_tlb_pending(CTX_HWBITS(tb->mm->context), + tb->tlb_nr, &tb->vaddrs[0]); #endif } - mp->tlb_nr = 0; + tb->tlb_nr = 0; } - put_cpu_var(mmu_gathers); + put_cpu_var(tlb_batch); } -void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) +void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, + pte_t *ptep, pte_t orig, int fullmm) { - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; @@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t no_cache_flush: - if (mp->fullmm) + if (fullmm) { + put_cpu_var(tlb_batch); return; + } - nr = mp->tlb_nr; + nr = tb->tlb_nr; - if (unlikely(nr != 0 && mm != mp->mm)) { + if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) - mp->mm = mm; + tb->mm = mm; - mp->vaddrs[nr] = vaddr; - mp->tlb_nr = ++nr; + tb->vaddrs[nr] = vaddr; + tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); + + put_cpu_var(tlb_batch); } |