diff options
author | Peter Zijlstra <[email protected]> | 2011-05-24 17:11:48 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2011-05-25 08:39:13 -0700 |
commit | d6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (patch) | |
tree | 777e98ebcbf207ea8442e977bd93053bb23a8df8 /arch/powerpc/mm/pgtable.c | |
parent | d16dfc550f5326a4000f3322582a7c05dec91d7a (diff) |
powerpc: mmu_gather rework
Fix up powerpc to the new mmu_gather stuff.
PPC has an extra batching queue to RCU free the actual pagetable
allocations, use the ARCH extentions for that for now.
For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the
hardware hash-table, keep using per-cpu arrays but flush on context switch
and use a TLF bit to track the lazy_mmu state.
Signed-off-by: Peter Zijlstra <[email protected]>
Acked-by: Benjamin Herrenschmidt <[email protected]>
Cc: David Miller <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: Russell King <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: Jeff Dike <[email protected]>
Cc: Richard Weinberger <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Namhyung Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 6a3997f98dfb..6e72788598f8 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -33,8 +33,6 @@ #include "mmu_decl.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - #ifdef CONFIG_SMP /* @@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); * freeing a page table page that is being walked without locks */ -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); static unsigned long pte_freelist_forced_free; struct pte_freelist_batch @@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch) void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; unsigned long pgf; - if (atomic_read(&tlb->mm->mm_users) < 2 || - cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ + if (atomic_read(&tlb->mm->mm_users) < 2) { pgtable_free(table, shift); return; } @@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) } } -void pte_free_finish(void) +void pte_free_finish(struct mmu_gather *tlb) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; if (*batchp == NULL) return; |