aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c12
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c91
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/ptrace.c4
-rw-r--r--arch/mips/kernel/ptrace32.c4
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/kernel/setup.c4
-rw-r--r--arch/mips/kernel/time.c15
-rw-r--r--arch/mips/kernel/vpe.c2
12 files changed, 64 insertions, 78 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index c1cd41456d42..cbe4742d2fff 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -83,7 +83,7 @@ void output_task_defines(void)
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
-#if defined(CONFIG_CC_STACKPROTECTOR)
+#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6b07b739f914..b2509c19cfb5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -414,6 +414,14 @@ static int __init ftlb_disable(char *s)
__setup("noftlb", ftlb_disable);
+/*
+ * Check if the CPU has per tc perf counters
+ */
+static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c)
+{
+ if (read_c0_config7() & MTI_CONF7_PTC)
+ c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS;
+}
static inline void check_errata(void)
{
@@ -1572,6 +1580,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_34K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 34Kc";
+ cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
@@ -1592,6 +1601,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_1004K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 1004Kc";
+ cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_1074K:
c->cputype = CPU_1074K;
@@ -1601,10 +1611,12 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_INTERAPTIV_UP:
c->cputype = CPU_INTERAPTIV;
__cpu_name[cpu] = "MIPS interAptiv";
+ cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_INTERAPTIV_MP:
c->cputype = CPU_INTERAPTIV;
__cpu_name[cpu] = "MIPS interAptiv (multi)";
+ cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_PROAPTIV_UP:
c->cputype = CPU_PROAPTIV;
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index e42113fe2762..896080b445c2 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -61,7 +61,7 @@
#endif
3:
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index ee73550f0b9a..413863508f6f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -129,20 +129,14 @@ static struct mips_pmu mipspmu;
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
-static int cpu_has_mipsmt_pertccounters;
-
static DEFINE_RWLOCK(pmuint_rwlock);
#if defined(CONFIG_CPU_BMIPS5000)
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
#else
-/*
- * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
- * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
- */
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
- 0 : smp_processor_id())
+ 0 : cpu_vpe_id(&current_cpu_data))
#endif
/* Copied from op_model_mipsxx.c */
@@ -329,7 +323,11 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int range = evt->event_base >> 24;
+#endif /* CONFIG_MIPS_MT_SMP */
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
@@ -337,11 +335,37 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
(evt->config_base & M_PERFCTL_CONFIG_MASK) |
/* Make sure interrupt enabled. */
MIPS_PERFCTRL_IE;
- if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
+
+#ifdef CONFIG_CPU_BMIPS5000
+ {
/* enable the counter for the calling thread */
cpuc->saved_ctrl[idx] |=
(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
+ }
+#else
+#ifdef CONFIG_MIPS_MT_SMP
+ if (range > V) {
+ /* The counter is processor wide. Set it up to count all TCs. */
+ pr_debug("Enabling perf counter for all TCs\n");
+ cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
+ } else
+#endif /* CONFIG_MIPS_MT_SMP */
+ {
+ unsigned int cpu, ctrl;
+
+ /*
+ * Set up the counter for a particular CPU when event->cpu is
+ * a valid CPU number. Otherwise set up the counter for the CPU
+ * scheduling this thread.
+ */
+ cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+ ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
+ ctrl |= M_TC_EN_VPE;
+ cpuc->saved_ctrl[idx] |= ctrl;
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+ }
+#endif /* CONFIG_CPU_BMIPS5000 */
/*
* We do not actually let the counter run. Leave it until start().
*/
@@ -655,13 +679,14 @@ static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
- return ((unsigned int)pev->range << 24) |
- (pev->cntr_mask & 0xffff00) |
- (pev->event_id & 0xff);
-#else
- return (pev->cntr_mask & 0xffff00) |
- (pev->event_id & 0xff);
-#endif
+ if (num_possible_cpus() > 1)
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+ else
+#endif /* CONFIG_MIPS_MT_SMP */
+ return ((pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff));
}
static const struct mips_perf_event *mipspmu_map_general_event(int idx)
@@ -1265,37 +1290,6 @@ static const struct mips_perf_event xlp_cache_map
},
};
-#ifdef CONFIG_MIPS_MT_SMP
-static void check_and_calc_range(struct perf_event *event,
- const struct mips_perf_event *pev)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (event->cpu >= 0) {
- if (pev->range > V) {
- /*
- * The user selected an event that is processor
- * wide, while expecting it to be VPE wide.
- */
- hwc->config_base |= M_TC_EN_ALL;
- } else {
- /*
- * FIXME: cpu_data[event->cpu].vpe_id reports 0
- * for both CPUs.
- */
- hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
- hwc->config_base |= M_TC_EN_VPE;
- }
- } else
- hwc->config_base |= M_TC_EN_ALL;
-}
-#else
-static void check_and_calc_range(struct perf_event *event,
- const struct mips_perf_event *pev)
-{
-}
-#endif
-
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
@@ -1331,10 +1325,6 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
hwc->config_base = MIPS_PERFCTRL_IE;
- /* Calculate range bits and validate it. */
- if (num_possible_cpus() > 1)
- check_and_calc_range(event, pev);
-
hwc->event_base = mipspmu_perf_event_encode(pev);
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
@@ -1723,7 +1713,6 @@ init_hw_perf_events(void)
}
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
- cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
if (!cpu_has_mipsmt_pertccounters)
counters = counters_total_to_per_cpu(counters);
#endif
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 3775a8d694fb..8d85046adcc8 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -180,7 +180,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
return 0;
}
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0c0c23c9c9f5..9f6c3f2aa2e2 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -811,7 +811,7 @@ long arch_ptrace(struct task_struct *child, long request,
/*
* The odd registers are actually the high
* order bits of the values stored in the even
- * registers - unless we're using r2k_switch.S.
+ * registers.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
@@ -906,7 +906,7 @@ long arch_ptrace(struct task_struct *child, long request,
/*
* The odd registers are actually the high
* order bits of the values stored in the even
- * registers - unless we're using r2k_switch.S.
+ * registers.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index f30c381d3e1c..7edc629304c8 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -103,7 +103,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
/*
* The odd registers are actually the high
* order bits of the values stored in the even
- * registers - unless we're using r2k_switch.S.
+ * registers.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
@@ -216,7 +216,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
/*
* The odd registers are actually the high
* order bits of the values stored in the even
- * registers - unless we're using r2k_switch.S.
+ * registers.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 665897139f30..71b1aafae1bb 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -36,7 +36,7 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 17cf9341c1cf..58232ae6cfae 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -31,7 +31,7 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 563188ac6fa2..2c96c0c68116 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -93,7 +93,7 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
* If the region reaches the top of the physical address space, adjust
* the size slightly so that (start + size) doesn't overflow
*/
- if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
+ if (start + size - 1 == PHYS_ADDR_MAX)
--size;
/* Sanity check */
@@ -376,7 +376,7 @@ static void __init bootmem_init(void)
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
- phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t ramstart = PHYS_ADDR_MAX;
bool bootmap_valid = false;
int i;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index a6ebc8135112..bfe02ded25d1 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -34,21 +34,6 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-int __weak rtc_mips_set_time(unsigned long sec)
-{
- return -ENODEV;
-}
-
-int __weak rtc_mips_set_mmss(unsigned long nowtime)
-{
- return rtc_mips_set_time(nowtime);
-}
-
-int update_persistent_clock(struct timespec now)
-{
- return rtc_mips_set_mmss(now.tv_sec);
-}
-
static int null_perf_irq(void)
{
return 0;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 544ea21bfef9..0bef238d2c0c 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -872,7 +872,7 @@ static ssize_t vpe_write(struct file *file, const char __user *buffer,
return -ENODEV;
if ((count + v->len) > v->plen) {
- pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
+ pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
return -ENOMEM;
}