diff options
Diffstat (limited to 'arch/csky/mm')
-rw-r--r-- | arch/csky/mm/cachev2.c | 45 | ||||
-rw-r--r-- | arch/csky/mm/fault.c | 13 |
2 files changed, 51 insertions, 7 deletions
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c index bc419f8039d3..7a9664adce43 100644 --- a/arch/csky/mm/cachev2.c +++ b/arch/csky/mm/cachev2.c @@ -7,8 +7,12 @@ #include <asm/cache.h> #include <asm/barrier.h> +/* for L1-cache */ #define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) #define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) void local_icache_inv_all(void *priv) { @@ -16,11 +20,6 @@ void local_icache_inv_all(void *priv) sync_is(); } -void icache_inv_all(void) -{ - on_each_cpu(local_icache_inv_all, NULL, 1); -} - #ifdef CONFIG_CPU_HAS_ICACHE_INS void icache_inv_range(unsigned long start, unsigned long end) { @@ -31,9 +30,43 @@ void icache_inv_range(unsigned long start, unsigned long end) sync_is(); } #else +struct cache_range { + unsigned long start; + unsigned long end; +}; + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +void local_icache_inv_range(void *priv) +{ + struct cache_range *param = priv; + unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); + unsigned long flags; + + spin_lock_irqsave(&cache_lock, flags); + + for (; i < param->end; i += L1_CACHE_BYTES) + cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); + + spin_unlock_irqrestore(&cache_lock, flags); + + sync_is(); +} + void icache_inv_range(unsigned long start, unsigned long end) { - icache_inv_all(); + struct cache_range param = { start, end }; + + if (irqs_disabled()) + local_icache_inv_range(¶m); + else + on_each_cpu(local_icache_inv_range, ¶m, 1); } #endif diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index f76618b630f9..4e6dc68f3258 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -18,6 +18,7 @@ #include <linux/extable.h> #include <linux/uaccess.h> #include <linux/perf_event.h> +#include <linux/kprobes.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> @@ -53,6 +54,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, int fault; unsigned long address = mmu_meh & PAGE_MASK; + if (kprobe_page_fault(regs, tsk->thread.trap_no)) + return; + si_code = SEGV_MAPERR; #ifndef CONFIG_CPU_HAS_TLBI @@ -137,7 +141,7 @@ good_area: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { - if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + if (unlikely(!vma_is_accessible(vma))) goto bad_area; } @@ -179,11 +183,14 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; force_sig_fault(SIGSEGV, si_code, (void __user *)address); return; } no_context: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; @@ -198,6 +205,8 @@ no_context: die_if_kernel("Oops", regs, write); out_of_memory: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). @@ -206,6 +215,8 @@ out_of_memory: return; do_sigbus: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ |