diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic-long.h | 58 | ||||
-rw-r--r-- | include/asm-generic/atomic.h | 47 | ||||
-rw-r--r-- | include/asm-generic/atomic64.h | 15 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 41 | ||||
-rw-r--r-- | include/asm-generic/cputime_nsecs.h | 2 | ||||
-rw-r--r-- | include/asm-generic/futex.h | 8 | ||||
-rw-r--r-- | include/asm-generic/io.h | 75 | ||||
-rw-r--r-- | include/asm-generic/iomap.h | 8 | ||||
-rw-r--r-- | include/asm-generic/mutex-dec.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 6 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 8 | ||||
-rw-r--r-- | include/asm-generic/preempt.h | 4 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 37 | ||||
-rw-r--r-- | include/asm-generic/rtc.h | 247 | ||||
-rw-r--r-- | include/asm-generic/rwsem.h | 35 | ||||
-rw-r--r-- | include/asm-generic/seccomp.h | 14 | ||||
-rw-r--r-- | include/asm-generic/siginfo.h | 15 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 59 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 20 |
19 files changed, 369 insertions, 332 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 5e1f345b58dd..288cc9e96395 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -112,6 +112,62 @@ static __always_inline void atomic_long_dec(atomic_long_t *l) ATOMIC_LONG_PFX(_dec)(v); } +#define ATOMIC_LONG_FETCH_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ +} + +ATOMIC_LONG_FETCH_OP(add, ) +ATOMIC_LONG_FETCH_OP(add, _relaxed) +ATOMIC_LONG_FETCH_OP(add, _acquire) +ATOMIC_LONG_FETCH_OP(add, _release) +ATOMIC_LONG_FETCH_OP(sub, ) +ATOMIC_LONG_FETCH_OP(sub, _relaxed) +ATOMIC_LONG_FETCH_OP(sub, _acquire) +ATOMIC_LONG_FETCH_OP(sub, _release) +ATOMIC_LONG_FETCH_OP(and, ) +ATOMIC_LONG_FETCH_OP(and, _relaxed) +ATOMIC_LONG_FETCH_OP(and, _acquire) +ATOMIC_LONG_FETCH_OP(and, _release) +ATOMIC_LONG_FETCH_OP(andnot, ) +ATOMIC_LONG_FETCH_OP(andnot, _relaxed) +ATOMIC_LONG_FETCH_OP(andnot, _acquire) +ATOMIC_LONG_FETCH_OP(andnot, _release) +ATOMIC_LONG_FETCH_OP(or, ) +ATOMIC_LONG_FETCH_OP(or, _relaxed) +ATOMIC_LONG_FETCH_OP(or, _acquire) +ATOMIC_LONG_FETCH_OP(or, _release) +ATOMIC_LONG_FETCH_OP(xor, ) +ATOMIC_LONG_FETCH_OP(xor, _relaxed) +ATOMIC_LONG_FETCH_OP(xor, _acquire) +ATOMIC_LONG_FETCH_OP(xor, _release) + +#undef ATOMIC_LONG_FETCH_OP + +#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ +} + +ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_FETCH_INC_DEC_OP + #define ATOMIC_LONG_OP(op) \ static __always_inline void \ atomic_long_##op(long i, atomic_long_t *l) \ @@ -124,9 +180,9 @@ atomic_long_##op(long i, atomic_long_t *l) \ ATOMIC_LONG_OP(add) ATOMIC_LONG_OP(sub) ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(andnot) ATOMIC_LONG_OP(or) ATOMIC_LONG_OP(xor) -ATOMIC_LONG_OP(andnot) #undef ATOMIC_LONG_OP diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 74f1a3704d7a..9ed8b987185b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return c c_op i; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c; \ +} + #else #include <linux/irqflags.h> @@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return ret; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = v->counter; \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + #endif /* CONFIG_SMP */ #ifndef atomic_add_return @@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +) ATOMIC_OP_RETURN(sub, -) #endif +#ifndef atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#endif + +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + #ifndef atomic_and ATOMIC_OP(and, &) #endif @@ -110,6 +156,7 @@ ATOMIC_OP(or, |) ATOMIC_OP(xor, ^) #endif +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index d48e78ccad3d..dad68bf46c77 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ extern long long atomic64_##op##_return(long long a, atomic64_t *v); -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) +#define ATOMIC64_FETCH_OP(op) \ +extern long long atomic64_fetch_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) ATOMIC64_OPS(add) ATOMIC64_OPS(sub) -ATOMIC64_OP(and) -ATOMIC64_OP(or) -ATOMIC64_OP(xor) +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) #undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 1cceca146905..fe297b599b0a 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -194,7 +194,7 @@ do { \ }) #endif -#endif +#endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ #define virt_mb() __smp_mb() @@ -207,5 +207,44 @@ do { \ #define virt_store_release(p, v) __smp_store_release(p, v) #define virt_load_acquire(p) __smp_load_acquire(p) +/** + * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency + * + * A control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. (load)-ACQUIRE. + * + * Architectures that do not do load speculation can have this be barrier(). + */ +#ifndef smp_acquire__after_ctrl_dep +#define smp_acquire__after_ctrl_dep() smp_rmb() +#endif + +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ + smp_acquire__after_ctrl_dep(); \ + VAL; \ +}) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h index 0f1c6f315cdc..a84e28e0c634 100644 --- a/include/asm-generic/cputime_nsecs.h +++ b/include/asm-generic/cputime_nsecs.h @@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t; (__force u64)(__ct) #define nsecs_to_cputime(__nsecs) \ (__force cputime_t)(__nsecs) +#define nsecs_to_cputime64(__nsecs) \ + (__force cputime64_t)(__nsecs) /* diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index e56272c919b5..bf2d34c9d804 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 val; preempt_disable(); - if (unlikely(get_user(val, uaddr) != 0)) + if (unlikely(get_user(val, uaddr) != 0)) { + preempt_enable(); return -EFAULT; + } - if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { + preempt_enable(); return -EFAULT; + } *uval = val; preempt_enable(); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index eed3bbe88c8a..7ef015eb3403 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define readl_relaxed readl #endif -#ifndef readq_relaxed +#if defined(readq) && !defined(readq_relaxed) #define readq_relaxed readq #endif @@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define writel_relaxed writel #endif -#ifndef writeq_relaxed +#if defined(writeq) && !defined(writeq_relaxed) #define writeq_relaxed writeq #endif @@ -585,6 +585,16 @@ static inline u32 ioread32(const volatile void __iomem *addr) } #endif +#ifdef CONFIG_64BIT +#ifndef ioread64 +#define ioread64 ioread64 +static inline u64 ioread64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef iowrite8 #define iowrite8 iowrite8 static inline void iowrite8(u8 value, volatile void __iomem *addr) @@ -609,11 +619,21 @@ static inline void iowrite32(u32 value, volatile void __iomem *addr) } #endif +#ifdef CONFIG_64BIT +#ifndef iowrite64 +#define iowrite64 iowrite64 +static inline void iowrite64(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef ioread16be #define ioread16be ioread16be static inline u16 ioread16be(const volatile void __iomem *addr) { - return __be16_to_cpu(__raw_readw(addr)); + return swab16(readw(addr)); } #endif @@ -621,15 +641,25 @@ static inline u16 ioread16be(const volatile void __iomem *addr) #define ioread32be ioread32be static inline u32 ioread32be(const volatile void __iomem *addr) { - return __be32_to_cpu(__raw_readl(addr)); + return swab32(readl(addr)); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64be +#define ioread64be ioread64be +static inline u64 ioread64be(const volatile void __iomem *addr) +{ + return swab64(readq(addr)); } #endif +#endif /* CONFIG_64BIT */ #ifndef iowrite16be #define iowrite16be iowrite16be static inline void iowrite16be(u16 value, void volatile __iomem *addr) { - __raw_writew(__cpu_to_be16(value), addr); + writew(swab16(value), addr); } #endif @@ -637,10 +667,20 @@ static inline void iowrite16be(u16 value, void volatile __iomem *addr) #define iowrite32be iowrite32be static inline void iowrite32be(u32 value, volatile void __iomem *addr) { - __raw_writel(__cpu_to_be32(value), addr); + writel(swab32(value), addr); } #endif +#ifdef CONFIG_64BIT +#ifndef iowrite64be +#define iowrite64be iowrite64be +static inline void iowrite64be(u64 value, volatile void __iomem *addr) +{ + writeq(swab64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef ioread8_rep #define ioread8_rep ioread8_rep static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, @@ -668,6 +708,17 @@ static inline void ioread32_rep(const volatile void __iomem *addr, } #endif +#ifdef CONFIG_64BIT +#ifndef ioread64_rep +#define ioread64_rep ioread64_rep +static inline void ioread64_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef iowrite8_rep #define iowrite8_rep iowrite8_rep static inline void iowrite8_rep(volatile void __iomem *addr, @@ -697,6 +748,18 @@ static inline void iowrite32_rep(volatile void __iomem *addr, writesl(addr, buffer, count); } #endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64_rep +#define iowrite64_rep iowrite64_rep +static inline void iowrite64_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ #endif /* CONFIG_GENERIC_IOMAP */ #ifdef __KERNEL__ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index d8f8622fa044..650fede33c25 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -30,12 +30,20 @@ extern unsigned int ioread16(void __iomem *); extern unsigned int ioread16be(void __iomem *); extern unsigned int ioread32(void __iomem *); extern unsigned int ioread32be(void __iomem *); +#ifdef CONFIG_64BIT +extern u64 ioread64(void __iomem *); +extern u64 ioread64be(void __iomem *); +#endif extern void iowrite8(u8, void __iomem *); extern void iowrite16(u16, void __iomem *); extern void iowrite16be(u16, void __iomem *); extern void iowrite32(u32, void __iomem *); extern void iowrite32be(u32, void __iomem *); +#ifdef CONFIG_64BIT +extern void iowrite64(u64, void __iomem *); +extern void iowrite64be(u64, void __iomem *); +#endif /* * "string" versions of the above. Note that they diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index fd694cfd678a..c54829d3de37 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) + if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1)) return 1; return 0; } diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index a6b4a7bd6ac9..3269ec4e195f 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -91,8 +91,12 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int prev = atomic_xchg_acquire(count, 0); + int prev; + if (atomic_read(count) != 1) + return 0; + + prev = atomic_xchg_acquire(count, 0); if (unlikely(prev < 0)) { /* * The lock was marked contended so we must restore that diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 9401f4819891..d4458b6dbfb4 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd) #define io_remap_pfn_range remap_pfn_range #endif +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 5d8ffa3e6f8c..c1cde3577551 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -7,10 +7,10 @@ static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count; + return READ_ONCE(current_thread_info()->preempt_count); } -static __always_inline int *preempt_count_ptr(void) +static __always_inline volatile int *preempt_count_ptr(void) { return ¤t_thread_info()->preempt_count; } diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 35a52a880b2f..9f0681bf1e87 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -22,14 +22,33 @@ #include <asm-generic/qspinlock_types.h> /** + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock + * @lock : Pointer to queued spinlock structure + * + * There is a very slight possibility of live-lock if the lockers keep coming + * and the waiter is just unfortunate enough to not see any unlock state. + */ +#ifndef queued_spin_unlock_wait +extern void queued_spin_unlock_wait(struct qspinlock *lock); +#endif + +/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ +#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { + /* + * See queued_spin_unlock_wait(). + * + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. + */ return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -92,26 +111,12 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) static __always_inline void queued_spin_unlock(struct qspinlock *lock) { /* - * smp_mb__before_atomic() in order to guarantee release semantics + * unlock() needs release semantics: */ - smp_mb__before_atomic(); - atomic_sub(_Q_LOCKED_VAL, &lock->val); + (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); } #endif -/** - * queued_spin_unlock_wait - wait until current lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -static inline void queued_spin_unlock_wait(struct qspinlock *lock) -{ - while (atomic_read(&lock->val) & _Q_LOCKED_MASK) - cpu_relax(); -} - #ifndef virt_spin_lock static __always_inline bool virt_spin_lock(struct qspinlock *lock) { diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h deleted file mode 100644 index 4e3b6558331e..000000000000 --- a/include/asm-generic/rtc.h +++ /dev/null @@ -1,247 +0,0 @@ -/* - * include/asm-generic/rtc.h - * - * Author: Tom Rini <trini@mvista.com> - * - * Based on: - * drivers/char/rtc.c - * - * Please read the COPYING file for all license details. - */ - -#ifndef __ASM_RTC_H__ -#define __ASM_RTC_H__ - -#include <linux/mc146818rtc.h> -#include <linux/rtc.h> -#include <linux/bcd.h> -#include <linux/delay.h> -#ifdef CONFIG_ACPI -#include <linux/acpi.h> -#endif - -#define RTC_PIE 0x40 /* periodic interrupt enable */ -#define RTC_AIE 0x20 /* alarm interrupt enable */ -#define RTC_UIE 0x10 /* update-finished interrupt enable */ - -/* some dummy definitions */ -#define RTC_BATT_BAD 0x100 /* battery bad */ -#define RTC_SQWE 0x08 /* enable square-wave output */ -#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ -#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ -#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ - -/* - * Returns true if a clock update is in progress - */ -static inline unsigned char rtc_is_updating(void) -{ - unsigned char uip; - unsigned long flags; - - spin_lock_irqsave(&rtc_lock, flags); - uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irqrestore(&rtc_lock, flags); - return uip; -} - -static inline unsigned int __get_rtc_time(struct rtc_time *time) -{ - unsigned char ctrl; - unsigned long flags; - unsigned char century = 0; - -#ifdef CONFIG_MACH_DECSTATION - unsigned int real_year; -#endif - - /* - * read RTC once any update in progress is done. The update - * can take just over 2ms. We wait 20ms. There is no need to - * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. - * If you need to know *exactly* when a second has started, enable - * periodic update complete interrupts, (via ioctl) and then - * immediately read /dev/rtc which will block until you get the IRQ. - * Once the read clears, read the RTC time (again via ioctl). Easy. - */ - if (rtc_is_updating()) - mdelay(20); - - /* - * Only the values that we read from the RTC are set. We leave - * tm_wday, tm_yday and tm_isdst untouched. Even though the - * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated - * by the RTC when initially set to a non-zero value. - */ - spin_lock_irqsave(&rtc_lock, flags); - time->tm_sec = CMOS_READ(RTC_SECONDS); - time->tm_min = CMOS_READ(RTC_MINUTES); - time->tm_hour = CMOS_READ(RTC_HOURS); - time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); - time->tm_mon = CMOS_READ(RTC_MONTH); - time->tm_year = CMOS_READ(RTC_YEAR); -#ifdef CONFIG_MACH_DECSTATION - real_year = CMOS_READ(RTC_DEC_YEAR); -#endif -#ifdef CONFIG_ACPI - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && - acpi_gbl_FADT.century) - century = CMOS_READ(acpi_gbl_FADT.century); -#endif - ctrl = CMOS_READ(RTC_CONTROL); - spin_unlock_irqrestore(&rtc_lock, flags); - - if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { - time->tm_sec = bcd2bin(time->tm_sec); - time->tm_min = bcd2bin(time->tm_min); - time->tm_hour = bcd2bin(time->tm_hour); - time->tm_mday = bcd2bin(time->tm_mday); - time->tm_mon = bcd2bin(time->tm_mon); - time->tm_year = bcd2bin(time->tm_year); - century = bcd2bin(century); - } - -#ifdef CONFIG_MACH_DECSTATION - time->tm_year += real_year - 72; -#endif - - if (century) - time->tm_year += (century - 19) * 100; - - /* - * Account for differences between how the RTC uses the values - * and how they are defined in a struct rtc_time; - */ - if (time->tm_year <= 69) - time->tm_year += 100; - - time->tm_mon--; - - return RTC_24H; -} - -#ifndef get_rtc_time -#define get_rtc_time __get_rtc_time -#endif - -/* Set the current date and time in the real time clock. */ -static inline int __set_rtc_time(struct rtc_time *time) -{ - unsigned long flags; - unsigned char mon, day, hrs, min, sec; - unsigned char save_control, save_freq_select; - unsigned int yrs; -#ifdef CONFIG_MACH_DECSTATION - unsigned int real_yrs, leap_yr; -#endif - unsigned char century = 0; - - yrs = time->tm_year; - mon = time->tm_mon + 1; /* tm_mon starts at zero */ - day = time->tm_mday; - hrs = time->tm_hour; - min = time->tm_min; - sec = time->tm_sec; - - if (yrs > 255) /* They are unsigned */ - return -EINVAL; - - spin_lock_irqsave(&rtc_lock, flags); -#ifdef CONFIG_MACH_DECSTATION - real_yrs = yrs; - leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || - !((yrs + 1900) % 400)); - yrs = 72; - - /* - * We want to keep the year set to 73 until March - * for non-leap years, so that Feb, 29th is handled - * correctly. - */ - if (!leap_yr && mon < 3) { - real_yrs--; - yrs = 73; - } -#endif - -#ifdef CONFIG_ACPI - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && - acpi_gbl_FADT.century) { - century = (yrs + 1900) / 100; - yrs %= 100; - } -#endif - - /* These limits and adjustments are independent of - * whether the chip is in binary mode or not. - */ - if (yrs > 169) { - spin_unlock_irqrestore(&rtc_lock, flags); - return -EINVAL; - } - - if (yrs >= 100) - yrs -= 100; - - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { - sec = bin2bcd(sec); - min = bin2bcd(min); - hrs = bin2bcd(hrs); - day = bin2bcd(day); - mon = bin2bcd(mon); - yrs = bin2bcd(yrs); - century = bin2bcd(century); - } - - save_control = CMOS_READ(RTC_CONTROL); - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - -#ifdef CONFIG_MACH_DECSTATION - CMOS_WRITE(real_yrs, RTC_DEC_YEAR); -#endif - CMOS_WRITE(yrs, RTC_YEAR); - CMOS_WRITE(mon, RTC_MONTH); - CMOS_WRITE(day, RTC_DAY_OF_MONTH); - CMOS_WRITE(hrs, RTC_HOURS); - CMOS_WRITE(min, RTC_MINUTES); - CMOS_WRITE(sec, RTC_SECONDS); -#ifdef CONFIG_ACPI - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && - acpi_gbl_FADT.century) - CMOS_WRITE(century, acpi_gbl_FADT.century); -#endif - - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - - spin_unlock_irqrestore(&rtc_lock, flags); - - return 0; -} - -#ifndef set_rtc_time -#define set_rtc_time __set_rtc_time -#endif - -static inline unsigned int get_rtc_ss(void) -{ - struct rtc_time h; - - get_rtc_time(&h); - return h.tm_sec; -} - -static inline int get_rtc_pll(struct rtc_pll_info *pll) -{ - return -EINVAL; -} -static inline int set_rtc_pll(struct rtc_pll_info *pll) -{ - return -EINVAL; -} - -#endif /* __ASM_RTC_H__ */ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index d6d5dc98d7da..5be122e3d326 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) { long tmp; - while ((tmp = sem->count) >= 0) { - if (tmp == cmpxchg_acquire(&sem->count, tmp, + while ((tmp = atomic_long_read(&sem->count)) >= 0) { + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } @@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void __down_write(struct rw_semaphore *sem) { long tmp; @@ -63,16 +63,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) rwsem_down_write_failed(sem); } -static inline void __down_write(struct rw_semaphore *sem) +static inline int __down_write_killable(struct rw_semaphore *sem) { - __down_write_nested(sem, 0); + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + return 0; } static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; - tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, + tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } @@ -100,14 +107,6 @@ static inline void __up_write(struct rw_semaphore *sem) } /* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) -{ - atomic_long_add(delta, (atomic_long_t *)&sem->count); -} - -/* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) @@ -127,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem) rwsem_downgrade_wake(sem); } -/* - * implement exchange and add functionality - */ -static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) -{ - return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); -} - #endif /* __KERNEL__ */ #endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h index c9ccafa0d99a..e74072d23e69 100644 --- a/include/asm-generic/seccomp.h +++ b/include/asm-generic/seccomp.h @@ -29,4 +29,18 @@ #define __NR_seccomp_sigreturn __NR_rt_sigreturn #endif +#ifdef CONFIG_COMPAT +#ifndef get_compat_mode1_syscalls +static inline const int *get_compat_mode1_syscalls(void) +{ + static const int mode1_syscalls_32[] = { + __NR_seccomp_read_32, __NR_seccomp_write_32, + __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, + 0, /* null terminated */ + }; + return mode1_syscalls_32; +} +#endif +#endif /* CONFIG_COMPAT */ + #endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 3d1a3af5cf59..a2508a8f9a9c 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -17,21 +17,6 @@ struct siginfo; void do_schedule_next_timer(struct siginfo *info); -#ifndef HAVE_ARCH_COPY_SIGINFO - -#include <linux/string.h> - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); -} - -#endif - extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739cafa0..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -107,6 +107,12 @@ struct mmu_gather { struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; + /* + * __tlb_adjust_range will track the new addr here, + * that that we can adjust the range after the flush + */ + unsigned long addr; + int page_size; }; #define HAVE_GENERIC_MMU_GATHER @@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (!__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + PAGE_SIZE); + /* + * Track the last address with which we adjusted the range. This + * will be used later to adjust again after a mmu_flush due to + * failed __tlb_remove_page + */ + tlb->addr = address; } static inline void __tlb_reset_range(struct mmu_gather *tlb) @@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) { + tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } +} + +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) +{ + /* active->nr should be zero when we call this */ + VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; + __tlb_adjust_range(tlb, tlb->addr); + return __tlb_remove_page(tlb, page); +} + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 339125bb4d2c..24563970ff7b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -164,7 +164,7 @@ #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) -#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) +#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) #define _OF_TABLE_0(name) #define _OF_TABLE_1(name) \ . = ALIGN(8); \ @@ -245,7 +245,17 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data..init_task) + VMLINUX_SYMBOL(__start_init_task) = .; \ + *(.data..init_task) \ + VMLINUX_SYMBOL(__end_init_task) = .; + +/* + * Allow architectures to handle ro_after_init data on their + * own by defining an empty RO_AFTER_INIT_DATA. + */ +#ifndef RO_AFTER_INIT_DATA +#define RO_AFTER_INIT_DATA *(.data..ro_after_init) +#endif /* * Read only Data @@ -255,7 +265,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ - *(.data..ro_after_init) /* Read only after init */ \ + RO_AFTER_INIT_DATA /* Read only after init */ \ *(__vermagic) /* Kernel version magic */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ @@ -540,15 +550,19 @@ #define INIT_TEXT \ *(.init.text) \ + *(.text.startup) \ MEM_DISCARD(init.text) #define EXIT_DATA \ *(.exit.data) \ + *(.fini_array) \ + *(.dtors) \ MEM_DISCARD(exit.data) \ MEM_DISCARD(exit.rodata) #define EXIT_TEXT \ *(.exit.text) \ + *(.text.exit) \ MEM_DISCARD(exit.text) #define EXIT_CALL \ |