aboutsummaryrefslogtreecommitdiff
path: root/arch/avr32/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/kernel')
-rw-r--r--arch/avr32/kernel/Makefile2
-rw-r--r--arch/avr32/kernel/entry-avr32b.S20
-rw-r--r--arch/avr32/kernel/process.c6
-rw-r--r--arch/avr32/kernel/semaphore.c148
-rw-r--r--arch/avr32/kernel/setup.c2
-rw-r--r--arch/avr32/kernel/time.c248
6 files changed, 92 insertions, 334 deletions
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile
index e4b6d122b033..18229d0d1861 100644
--- a/arch/avr32/kernel/Makefile
+++ b/arch/avr32/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o
-obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o
+obj-y += setup.o traps.o ocd.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o
obj-y += init_task.o switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 8cf16d7a7040..5f31702d6b1c 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -741,26 +741,6 @@ irq_level\level:
.section .irq.text,"ax",@progbits
-.global cpu_idle_sleep
-cpu_idle_sleep:
- mask_interrupts
- get_thread_info r8
- ld.w r9, r8[TI_flags]
- bld r9, TIF_NEED_RESCHED
- brcs cpu_idle_enable_int_and_exit
- sbr r9, TIF_CPU_GOING_TO_SLEEP
- st.w r8[TI_flags], r9
- unmask_interrupts
- sleep 0
-cpu_idle_skip_sleep:
- mask_interrupts
- ld.w r9, r8[TI_flags]
- cbr r9, TIF_CPU_GOING_TO_SLEEP
- st.w r8[TI_flags], r9
-cpu_idle_enable_int_and_exit:
- unmask_interrupts
- retal r12
-
.global irq_level0
.global irq_level1
.global irq_level2
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 7f4af0b1e111..6cf9df176274 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -18,11 +18,11 @@
#include <asm/sysreg.h>
#include <asm/ocd.h>
+#include <asm/arch/pm.h>
+
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-extern void cpu_idle_sleep(void);
-
/*
* This file handles the architecture-dependent parts of process handling..
*/
@@ -54,6 +54,8 @@ void machine_halt(void)
void machine_power_off(void)
{
+ if (pm_power_off)
+ pm_power_off();
}
void machine_restart(char *cmd)
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c
deleted file mode 100644
index 1e2705a05016..000000000000
--- a/arch/avr32/kernel/semaphore.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * AVR32 sempahore implementation.
- *
- * Copyright (C) 2004-2006 Atmel Corporation
- *
- * Based on linux/arch/i386/kernel/semaphore.c
- * Copyright (C) 1999 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/semaphore.h>
-#include <asm/atomic.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__up);
-
-void __sched __down(struct semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into the trylock
- * failure case - we won't be sleeping, and we can't
- * get the lock as it has contention. Just correct the
- * count and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 2687b730e2d0..ce48c14f4349 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -274,6 +274,8 @@ static int __init early_parse_fbmem(char *p)
printk(KERN_WARNING
"Failed to allocate framebuffer memory\n");
fbmem_size = 0;
+ } else {
+ memset(__va(fbmem_start), 0, fbmem_size);
}
}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 36a46c3ae308..00a9862380ff 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -1,16 +1,12 @@
/*
* Copyright (C) 2004-2007 Atmel Corporation
*
- * Based on MIPS implementation arch/mips/kernel/time.c
- * Copyright 2001 MontaVista Software Inc.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
#include <linux/clk.h>
-#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -27,207 +23,133 @@
#include <asm/io.h>
#include <asm/sections.h>
-/* how many counter cycles in a jiffy? */
-static u32 cycles_per_jiffy;
+#include <asm/arch/pm.h>
-/* the count value for the next timer interrupt */
-static u32 expirelo;
-cycle_t __weak read_cycle_count(void)
+static cycle_t read_cycle_count(void)
{
return (cycle_t)sysreg_read(COUNT);
}
-struct clocksource __weak clocksource_avr32 = {
- .name = "avr32",
- .rating = 350,
+/*
+ * The architectural cycle count registers are a fine clocksource unless
+ * the system idle loop use sleep states like "idle": the CPU cycles
+ * measured by COUNT (and COMPARE) don't happen during sleep states.
+ * Their duration also changes if cpufreq changes the CPU clock rate.
+ * So we rate the clocksource using COUNT as very low quality.
+ */
+static struct clocksource counter = {
+ .name = "avr32_counter",
+ .rating = 50,
.read = read_cycle_count,
.mask = CLOCKSOURCE_MASK(32),
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-irqreturn_t __weak timer_interrupt(int irq, void *dev_id);
-
-struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED,
- .name = "timer",
-};
-
-/*
- * By default we provide the null RTC ops
- */
-static unsigned long null_rtc_get_time(void)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- return mktime(2007, 1, 1, 0, 0, 0);
-}
-
-static int null_rtc_set_time(unsigned long sec)
-{
- return 0;
-}
+ struct clock_event_device *evdev = dev_id;
-static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
-static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
-
-static void avr32_timer_ack(void)
-{
- u32 count;
-
- /* Ack this timer interrupt and set the next one */
- expirelo += cycles_per_jiffy;
- /* setting COMPARE to 0 stops the COUNT-COMPARE */
- if (expirelo == 0) {
- sysreg_write(COMPARE, expirelo + 1);
- } else {
- sysreg_write(COMPARE, expirelo);
- }
+ /*
+ * Disable the interrupt until the clockevent subsystem
+ * reprograms it.
+ */
+ sysreg_write(COMPARE, 0);
- /* Check to see if we have missed any timer interrupts */
- count = sysreg_read(COUNT);
- if ((count - expirelo) < 0x7fffffff) {
- expirelo = count + cycles_per_jiffy;
- sysreg_write(COMPARE, expirelo);
- }
+ evdev->event_handler(evdev);
+ return IRQ_HANDLED;
}
-int __weak avr32_hpt_init(void)
-{
- int ret;
- unsigned long mult, shift, count_hz;
-
- count_hz = clk_get_rate(boot_cpu_data.clk);
- shift = clocksource_avr32.shift;
- mult = clocksource_hz2mult(count_hz, shift);
- clocksource_avr32.mult = mult;
-
- {
- u64 tmp;
-
- tmp = TICK_NSEC;
- tmp <<= shift;
- tmp += mult / 2;
- do_div(tmp, mult);
-
- cycles_per_jiffy = tmp;
- }
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_DISABLED,
+ .name = "avr32_comparator",
+};
- ret = setup_irq(0, &timer_irqaction);
- if (ret) {
- pr_debug("timer: could not request IRQ 0: %d\n", ret);
- return -ENODEV;
- }
+static int comparator_next_event(unsigned long delta,
+ struct clock_event_device *evdev)
+{
+ unsigned long flags;
- printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, "
- "%lu.%03lu MHz\n",
- ((count_hz + 500) / 1000) / 1000,
- ((count_hz + 500) / 1000) % 1000);
+ raw_local_irq_save(flags);
- return 0;
-}
+ /* The time to read COUNT then update COMPARE must be less
+ * than the min_delta_ns value for this clockevent source.
+ */
+ sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1);
-/*
- * Taken from MIPS c0_hpt_timer_init().
- *
- * The reason COUNT is written twice is probably to make sure we don't get any
- * timer interrupts while we are messing with the counter.
- */
-int __weak avr32_hpt_start(void)
-{
- u32 count = sysreg_read(COUNT);
- expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
- sysreg_write(COUNT, expirelo - cycles_per_jiffy);
- sysreg_write(COMPARE, expirelo);
- sysreg_write(COUNT, count);
+ raw_local_irq_restore(flags);
return 0;
}
-/*
- * local_timer_interrupt() does profiling and process accounting on a
- * per-CPU basis.
- *
- * In UP mode, it is invoked from the (global) timer_interrupt.
- */
-void local_timer_interrupt(int irq, void *dev_id)
+static void comparator_mode(enum clock_event_mode mode,
+ struct clock_event_device *evdev)
{
- if (current->pid)
- profile_tick(CPU_PROFILING);
- update_process_times(user_mode(get_irq_regs()));
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_debug("%s: start\n", evdev->name);
+ /* FALLTHROUGH */
+ case CLOCK_EVT_MODE_RESUME:
+ cpu_disable_idle_sleep();
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ sysreg_write(COMPARE, 0);
+ pr_debug("%s: stop\n", evdev->name);
+ cpu_enable_idle_sleep();
+ break;
+ default:
+ BUG();
+ }
}
-irqreturn_t __weak timer_interrupt(int irq, void *dev_id)
-{
- /* ack timer interrupt and try to set next interrupt */
- avr32_timer_ack();
-
- /*
- * Call the generic timer interrupt handler
- */
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
-
- /*
- * In UP mode, we call local_timer_interrupt() to do profiling
- * and process accounting.
- *
- * SMP is not supported yet.
- */
- local_timer_interrupt(irq, dev_id);
-
- return IRQ_HANDLED;
-}
+static struct clock_event_device comparator = {
+ .name = "avr32_comparator",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 16,
+ .rating = 50,
+ .cpumask = CPU_MASK_CPU0,
+ .set_next_event = comparator_next_event,
+ .set_mode = comparator_mode,
+};
void __init time_init(void)
{
+ unsigned long counter_hz;
int ret;
- /*
- * Make sure we don't get any COMPARE interrupts before we can
- * handle them.
- */
- sysreg_write(COMPARE, 0);
-
- xtime.tv_sec = rtc_get_time();
+ xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0);
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- ret = avr32_hpt_init();
- if (ret) {
- pr_debug("timer: failed setup: %d\n", ret);
- return;
- }
+ /* figure rate for counter */
+ counter_hz = clk_get_rate(boot_cpu_data.clk);
+ counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
- ret = clocksource_register(&clocksource_avr32);
+ ret = clocksource_register(&counter);
if (ret)
pr_debug("timer: could not register clocksource: %d\n", ret);
- ret = avr32_hpt_start();
- if (ret) {
- pr_debug("timer: failed starting: %d\n", ret);
- return;
- }
-}
+ /* setup COMPARE clockevent */
+ comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
+ comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
+ comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
-static struct sysdev_class timer_class = {
- .name = "timer",
-};
+ sysreg_write(COMPARE, 0);
+ timer_irqaction.dev_id = &comparator;
-static struct sys_device timer_device = {
- .id = 0,
- .cls = &timer_class,
-};
+ ret = setup_irq(0, &timer_irqaction);
+ if (ret)
+ pr_debug("timer: could not request IRQ 0: %d\n", ret);
+ else {
+ clockevents_register_device(&comparator);
-static int __init init_timer_sysfs(void)
-{
- int err = sysdev_class_register(&timer_class);
- if (!err)
- err = sysdev_register(&timer_device);
- return err;
+ pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name,
+ ((counter_hz + 500) / 1000) / 1000,
+ ((counter_hz + 500) / 1000) % 1000);
+ }
}
-
-device_initcall(init_timer_sysfs);