aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c46
-rw-r--r--arch/powerpc/mm/mmu_context.c3
-rw-r--r--arch/powerpc/mm/slb.c20
-rw-r--r--arch/powerpc/mm/slice.c29
4 files changed, 39 insertions, 59 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f23a89d8e4ce..88c95dc8b141 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1088,16 +1088,16 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
}
#ifdef CONFIG_PPC_MM_SLICES
-static unsigned int get_paca_psize(unsigned long addr)
+static unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
unsigned long index, mask_index;
if (addr < SLICE_LOW_TOP) {
- psizes = get_paca()->mm_ctx_low_slices_psize;
+ psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
} else {
- psizes = get_paca()->mm_ctx_high_slices_psize;
+ psizes = mm->context.high_slices_psize;
index = GET_HIGH_SLICE_INDEX(addr);
}
mask_index = index & 0x1;
@@ -1105,9 +1105,9 @@ static unsigned int get_paca_psize(unsigned long addr)
}
#else
-unsigned int get_paca_psize(unsigned long addr)
+unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
{
- return get_paca()->mm_ctx_user_psize;
+ return mm->context.user_psize;
}
#endif
@@ -1118,15 +1118,11 @@ unsigned int get_paca_psize(unsigned long addr)
#ifdef CONFIG_PPC_64K_PAGES
void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
- if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
+ if (get_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
copro_flush_all_slbs(mm);
- if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
-
- copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
- }
+ core_flush_all_slbs(mm);
}
#endif /* CONFIG_PPC_64K_PAGES */
@@ -1191,22 +1187,6 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
trap, vsid, ssize, psize, lpsize, pte);
}
-static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
- int psize, bool user_region)
-{
- if (user_region) {
- if (psize != get_paca_psize(ea)) {
- copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
- }
- } else if (get_paca()->vmalloc_sllp !=
- mmu_psize_defs[mmu_vmalloc_psize].sllp) {
- get_paca()->vmalloc_sllp =
- mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_vmalloc_update();
- }
-}
-
/* Result code is:
* 0 - handled
* 1 - normal page fault
@@ -1239,7 +1219,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
rc = 1;
goto bail;
}
- psize = get_slice_psize(mm, ea);
+ psize = get_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_user_vsid(&mm->context, ea, ssize);
break;
@@ -1327,9 +1307,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
WARN_ON(1);
}
#endif
- if (current->mm == mm)
- check_paca_psize(ea, mm, psize, user_region);
-
goto bail;
}
@@ -1364,15 +1341,14 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+ slb_vmalloc_update();
copro_flush_all_slbs(mm);
+ core_flush_all_slbs(mm);
}
}
#endif /* CONFIG_PPC_64K_PAGES */
- if (current->mm == mm)
- check_paca_psize(ea, mm, psize, user_region);
-
#ifdef CONFIG_PPC_64K_PAGES
if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap,
@@ -1460,7 +1436,7 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
#ifdef CONFIG_PPC_MM_SLICES
static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
{
- int psize = get_slice_psize(mm, ea);
+ int psize = get_psize(mm, ea);
/* We only prefault standard pages for now */
if (unlikely(psize != mm->context.user_psize))
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index f84e14f23e50..28ae2835db3d 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -54,8 +54,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
- * in switch_slb(), and/or the store of paca->mm_ctx_id in
- * copy_mm_to_paca().
+ * in switch_slb(), and/or load of MMU context id.
*
* On the other side, the barrier is in mm/tlb-radix.c for
* radix which orders earlier stores to clear the PTEs vs
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6fec2ce3ccf4..1347ab86d32e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -347,8 +347,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
get_paca()->slb_cache_ptr = 0;
}
- copy_mm_to_paca(mm);
-
/*
* preload some userspace segments into the SLB.
* Almost all 32 and 64bit PowerPC executables are linked at
@@ -375,6 +373,24 @@ void slb_set_size(u16 size)
mmu_slb_size = size;
}
+static void cpu_flush_slb(void *parm)
+{
+ struct mm_struct *mm = parm;
+ unsigned long flags;
+
+ if (mm != current->active_mm)
+ return;
+
+ local_irq_save(flags);
+ slb_flush_and_rebolt();
+ local_irq_restore(flags);
+}
+
+void core_flush_all_slbs(struct mm_struct *mm)
+{
+ on_each_cpu(cpu_flush_slb, mm, 1);
+}
+
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 205fe557ca10..606f424aac47 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -207,23 +207,6 @@ static bool slice_check_range_fits(struct mm_struct *mm,
return true;
}
-static void slice_flush_segments(void *parm)
-{
-#ifdef CONFIG_PPC64
- struct mm_struct *mm = parm;
- unsigned long flags;
-
- if (mm != current->active_mm)
- return;
-
- copy_mm_to_paca(current->active_mm);
-
- local_irq_save(flags);
- slb_flush_and_rebolt();
- local_irq_restore(flags);
-#endif
-}
-
static void slice_convert(struct mm_struct *mm,
const struct slice_mask *mask, int psize)
{
@@ -289,6 +272,9 @@ static void slice_convert(struct mm_struct *mm,
spin_unlock_irqrestore(&slice_convert_lock, flags);
copro_flush_all_slbs(mm);
+#ifdef CONFIG_PPC64
+ core_flush_all_slbs(mm);
+#endif
}
/*
@@ -502,8 +488,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* be already initialised beyond the old address limit.
*/
mm->context.slb_addr_limit = high_limit;
-
- on_each_cpu(slice_flush_segments, mm, 1);
+#ifdef CONFIG_PPC64
+ core_flush_all_slbs(mm);
+#endif
}
/* Sanity checks */
@@ -665,8 +652,10 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
(SLICE_NUM_HIGH &&
!bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
slice_convert(mm, &potential_mask, psize);
+#ifdef CONFIG_PPC64
if (psize > MMU_PAGE_BASE)
- on_each_cpu(slice_flush_segments, mm, 1);
+ core_flush_all_slbs(mm);
+#endif
}
return newaddr;