diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 26 | ||||
-rw-r--r-- | arch/arm/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/cputype.h | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/mmc.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/tcm.h | 31 | ||||
-rw-r--r-- | arch/arm/include/asm/unified.h | 4 |
6 files changed, 52 insertions, 38 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9ed2377fe8e5..d0daeab2234e 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -19,31 +19,21 @@ #ifdef __KERNEL__ +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ #define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 /* * ARMv6 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. Writing to 'v->counter' - * without using the following operations WILL break the atomic - * nature of these ops. + * to ensure that the update happens. */ -static inline void atomic_set(atomic_t *v, int i) -{ - unsigned long tmp; - - __asm__ __volatile__("@ atomic_set\n" -"1: ldrex %0, [%1]\n" -" strex %0, %2, [%1]\n" -" teq %0, #0\n" -" bne 1b" - : "=&r" (tmp) - : "r" (&v->counter), "r" (i) - : "cc"); -} - static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; @@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #error SMP not supported on pre-ARMv6 CPUs #endif -#define atomic_set(v,i) (((v)->counter) = (i)) - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index feaa75f0013e..66c160b8547f 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -4,7 +4,7 @@ #ifndef __ASMARM_CACHE_H #define __ASMARM_CACHE_H -#define L1_CACHE_SHIFT 5 +#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index b3e656c6fb78..20ae96cc0020 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -63,6 +63,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) return read_cpuid(CPUID_CACHETYPE); } +static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) +{ + return read_cpuid(CPUID_TCM); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For @@ -73,7 +78,10 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) #else static inline int cpu_is_xsc3(void) { - if ((read_cpuid_id() & 0xffffe000) == 0x69056000) + unsigned int id; + id = read_cpuid_id() & 0xffffe000; + /* It covers both Intel ID and Marvell ID */ + if ((id == 0x69056000) || (id == 0x56056000)) return 1; return 0; diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h deleted file mode 100644 index b490ecc79def..000000000000 --- a/arch/arm/include/asm/mach/mmc.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * arch/arm/include/asm/mach/mmc.h - */ -#ifndef ASMARM_MACH_MMC_H -#define ASMARM_MACH_MMC_H - -#include <linux/mmc/host.h> - -struct mmc_platform_data { - unsigned int ocr_mask; /* available voltages */ - u32 (*translate_vdd)(struct device *, unsigned int); - unsigned int (*status)(struct device *); - int gpio_wp; - int gpio_cd; -}; - -#endif diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h new file mode 100644 index 000000000000..5929ef5d927a --- /dev/null +++ b/arch/arm/include/asm/tcm.h @@ -0,0 +1,31 @@ +/* + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + * Author: Linus Walleij <linus.walleij@stericsson.com> + * + */ +#ifndef __ASMARM_TCM_H +#define __ASMARM_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include <linux/compiler.h> + +/* Tag variables with this */ +#define __tcmdata __section(.tcm.data) +/* Tag constants with this */ +#define __tcmconst __section(.tcm.rodata) +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(.tcm.text) + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 073e85b9b961..bc631161e9c6 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -35,7 +35,9 @@ #define ARM(x...) #define THUMB(x...) x +#ifdef __ASSEMBLY__ #define W(instr) instr.w +#endif #define BSYM(sym) sym + 1 #else /* !CONFIG_THUMB2_KERNEL */ @@ -45,7 +47,9 @@ #define ARM(x...) x #define THUMB(x...) +#ifdef __ASSEMBLY__ #define W(instr) instr +#endif #define BSYM(sym) sym #endif /* CONFIG_THUMB2_KERNEL */ |